Update gps
diff --git a/glide.lock b/glide.lock
index 49bed7b..e9e7e6b 100644
--- a/glide.lock
+++ b/glide.lock
@@ -10,7 +10,7 @@
 - name: github.com/Masterminds/vcs
   version: fbe9fb6ad5b5f35b3e82a7c21123cfc526cbf895
 - name: github.com/sdboyer/gps
-  version: a868c10855893c21ed05d0f50d6f9acb12b6366d
+  version: 63a033c13497e9f5ca47f5f6d4e02455f5d4d85d
 - name: github.com/termie/go-shutil
   version: bcacb06fecaeec8dc42af03c87c6949f4a05c74c
 - name: gopkg.in/yaml.v2
diff --git a/vendor/github.com/sdboyer/gps/README.md b/vendor/github.com/sdboyer/gps/README.md
index 227bf6b..2cd2d99 100644
--- a/vendor/github.com/sdboyer/gps/README.md
+++ b/vendor/github.com/sdboyer/gps/README.md
@@ -1,91 +1,110 @@
-# gps
-![map-marker-icon copy](https://cloud.githubusercontent.com/assets/21599/16779217/4f5cdc6c-483f-11e6-9de3-661f13d9b215.png)
+<p align="center">
+<img 
+    src="header.png"
+    width="800" height="255" border="0" alt="gps">
+<br>
+<a href="https://circleci.com/gh/sdboyer/gps"><img src="https://circleci.com/gh/sdboyer/gps.svg?style=svg" alt="Build Status"></a>
+<a href="https://ci.appveyor.com/project/sdboyer/gps"><img src="https://ci.appveyor.com/api/projects/status/github/sdboyer/gps?svg=true&branch=master&passingText=Windows%20-%20OK&failingText=Windows%20-%20failed&pendingText=Windows%20-%20pending" alt="Windows Build Status"></a>
+<a href="https://goreportcard.com/report/github.com/sdboyer/gps"><img src="https://goreportcard.com/badge/github.com/sdboyer/gps" alt="Build Status"></a>
+<a href="https://codecov.io/gh/sdboyer/gps"><img src="https://codecov.io/gh/sdboyer/gps/branch/master/graph/badge.svg" alt="Codecov" /></a>
+<a href="https://godoc.org/github.com/sdboyer/gps"><img src="https://godoc.org/github.com/sdboyer/gps?status.svg" alt="GoDoc"></a>
+</p>
+
 --
 
-[![CircleCI](https://circleci.com/gh/sdboyer/gps.svg?style=svg)](https://circleci.com/gh/sdboyer/gps) [![Go Report Card](https://goreportcard.com/badge/github.com/sdboyer/gps)](https://goreportcard.com/report/github.com/sdboyer/gps) [![GoDoc](https://godoc.org/github.com/sdboyer/gps?status.svg)](https://godoc.org/github.com/sdboyer/gps)
-
 `gps` is the Go Packaging Solver. It is an engine for tackling dependency
-management problems in Go. You can replicate the fetching bits of `go get`,
-modulo arguments, [in about 30 lines of
-code](https://github.com/sdboyer/gps/blob/master/example.go) with `gps`.
+management problems in Go. It is trivial - [about 35 lines of
+code](https://github.com/sdboyer/gps/blob/master/example.go) - to replicate the
+fetching bits of `go get` using `gps`.
 
 `gps` is _not_ Yet Another Go Package Management Tool. Rather, it's a library
 that package management (and adjacent) tools can use to solve the
 [hard](https://en.wikipedia.org/wiki/Boolean_satisfiability_problem) parts of
 the problem in a consistent,
 [holistic](https://medium.com/@sdboyer/so-you-want-to-write-a-package-manager-4ae9c17d9527)
-way. `gps` is [on track](https://github.com/Masterminds/glide/pull/384) to become the engine behind [glide](https://glide.sh).
+way. It is a distillation of the ideas behind language package managers like
+[bundler](http://bundler.io), [npm](https://www.npmjs.com/),
+[elm-package](https://github.com/elm-lang/elm-package),
+[cargo](https://crates.io/) (and others) into a library, artisanally
+handcrafted with ❤️ for Go's specific requirements.
+
+`gps` is [on track](https://github.com/Masterminds/glide/pull/384) to become the engine behind [glide](https://glide.sh).
 
 The wiki has a [general introduction to the `gps`
 approach](https://github.com/sdboyer/gps/wiki/Introduction-to-gps), as well
 as guides for folks [implementing
 tools](https://github.com/sdboyer/gps/wiki/gps-for-Implementors) or [looking
-to contribute](https://github.com/sdboyer/gps/wiki/Introduction-to-gps).
+to contribute](https://github.com/sdboyer/gps/wiki/gps-for-Contributors).
 
-**`gps` is progressing rapidly, but still beta, with a liberal sprinkling of panics.**
+**`gps` is progressing rapidly, but still in beta, with a concomitantly liberal sprinkling of panics.**
 
 ## Wait...a package management _library_?!
 
-Yup. Because it's what the Go ecosystem needs right now.
+Yup. See [the rationale](https://github.com/sdboyer/gps/wiki/Rationale).
 
-There are [scads of
-tools](https://github.com/golang/go/wiki/PackageManagementTools) out there, each
-tackling some slice of the Go package management domain. Some handle more than
-others, some impose more restrictions than others, and most are mutually
-incompatible (or mutually indifferent, which amounts to the same). This
-fragments the Go FLOSS ecosystem, harming the community as a whole.
+## Features
 
-As in all epic software arguments, some of the points of disagreement between
-tools/their authors are a bit silly. Many, though, are based on legitimate
-differences of opinion about what workflows, controls, and interfaces are
-best to give Go developers.
+A feature list for a package management library is a bit different than one for
+a package management tool. Instead of listing the things an end-user can do,
+we list the choices a tool *can* make and offer, in some form, to its users, as
+well as the non-choices/assumptions/constraints that `gps` imposes on a tool.
 
-Now, we're certainly no less opinionated than anyone else. But part of the
-challenge has been that, with a problem as
-[complex](https://medium.com/@sdboyer/so-you-want-to-write-a-package-manager-4ae9c17d9527)
-as package management, subtle design decisions made in pursuit of a particular
-workflow or interface can have far-reaching effects on architecture, leading to
-deep incompatibilities between tools and approaches.
+### Non-Choices
 
-We believe that many of [these
-differences](https://docs.google.com/document/d/1xrV9D5u8AKu1ip-A1W9JqhUmmeOhoI6d6zjVwvdn5mc/edit?usp=sharing)
-are incidental - and, given the right general solution, reconcilable. `gps` is
-our attempt at such a solution.
+We'd love for `gps`'s non-choices to be noncontroversial. But that's not always
+the case.
 
-By separating out the underlying problem into a standalone library, we are
-hoping to provide a common foundation for different tools. Such a foundation
-could improve interoperability, reduce harm to the ecosystem, and make the
-communal process of figuring out what's right for Go more about collaboration,
-and less about fiefdoms.
+Nevertheless, these non-choices remain because, taken as a whole, they make
+experiments and discussion around Go package management coherent and
+productive.
 
-### Assumptions
+* Go >=1.6, or 1.5 with `GO15VENDOREXPERIMENT = 1` set
+* Everything under `vendor/` is volatile and controlled solely by the tool
+* A central cache of repositories is used (cannot be `GOPATH`)
+* A [**project**](https://godoc.org/github.com/sdboyer/gps#ProjectRoot) concept:
+  a tree of packages, all covered by one `vendor` directory
+* A [**manifest** and
+  **lock**](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#manifests-and-locks)
+  approach to tracking version and constraint information
+* Source repositories can be `git`, `bzr`, `hg` or `svn` (Most of the work here is through a [separate lib](https://github.com/Masterminds/vcs))
+* What the available versions are for a given project/repository (all branches, tags, or revs are eligible)
+  * In general, semver tags are preferred to plain tags, are preferred to branches
+* The actual packages required (determined through import graph static analysis)
+  * How the import graph is statically analyzed (Similar to `go/build`, but with a combinatorial view of build tags)
+* Package import cycles are not allowed ([not yet implemented](https://github.com/sdboyer/gps/issues/66))
 
-Ideally, `gps` could provide this shared foundation with no additional
-assumptions beyond pure Go source files. Sadly, package management is too
-complex to be assumption-less. So, `gps` tries to keep its assumptions to the
-minimum, supporting as many situations as possible while still maintaining a
-predictable, well-formed system.
+There are also some current non-choices that we would like to push into the realm of choice:
 
-* Go 1.6, or 1.5 with `GO15VENDOREXPERIMENT = 1` set. `vendor/`
-  directories are a requirement.
-* You don't manually change what's under `vendor/`. That’s tooling’s
-  job.
-* A **project** concept, where projects comprise the set of Go packages in a
-  rooted directory tree.  By happy (not) accident, `vendor/` directories also
-  just happen to cover a rooted tree.
-* A [**manifest**](https://godoc.org/github.com/sdboyer/gps#Manifest) and
-  [**lock**](https://godoc.org/github.com/sdboyer/gps#Lock) approach to
-  tracking version and constraint information. The solver takes manifest (and,
-  optionally, lock)-type data as inputs, and produces lock-type data as its
-  output. Tools decide how to actually store this data, but these should
-  generally be at the root of the project tree.
+* Different versions of packages from the same repository cannot be used
+* Importable projects that are not bound to the repository root
+* Source inference around different import path patterns (e.g., how `github.com/*` or `my_company/*` are handled)
 
-Manifests? Locks? Eeew. Yes, we also think it'd be swell if we didn't need
-metadata files. We love the idea of Go packages as standalone, self-describing
-code. Unfortunately, the wheels come off that idea as soon as versioning and
-cross-project/repository dependencies happen. But universe alignment is hard;
-trying to intermix version information directly with the code would only make
-matters worse.
+### Choices
+
+These choices represent many of the ways that `gps`-based tools could
+substantively differ from each other.
+
+Some of these are choices designed to encompass all options for topics on which
+reasonable people have disagreed. Others are simply important controls that no
+general library could know _a priori_.
+
+* How to store manifest and lock information (file(s)? a db?)
+* Which of the other package managers to interoperate with
+* Which types of version constraints to allow the user to specify (e.g., allowing [semver ranges](https://docs.npmjs.com/misc/semver) or not)
+* Whether or not to strip nested `vendor` directories
+* Which packages in the import graph to [ignore](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#ignoring-packages) (if any)
+* What constraint [overrides](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#overrides) to apply (if any)
+* What [informational output](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#trace-and-tracelogger) to show the end user
+* What dependency version constraints are declared by the [root project](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#manifest-data)
+* What dependency version constraints are declared by [all dependencies](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#the-projectanalyzer)
+* Given a [previous solution](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#lock-data), [which versions to let change, and how](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#tochange-changeall-and-downgrade)
+  * In the absence of a previous solution, whether or not to use [preferred versions](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#preferred-versions)
+* Allowing, or not, the user to [swap in different network names](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#projectidentifier) for import paths (e.g. forks)
+* Specifying additional input/source packages not reachable from the root import graph ([not complete](https://github.com/sdboyer/gps/issues/42))
+
+This list may not be exhaustive - see the
+[implementor's guide](https://github.com/sdboyer/gps/wiki/gps-for-Implementors)
+for a proper treatment.
 
 ## Contributing
 
diff --git a/vendor/github.com/sdboyer/gps/appveyor.yml b/vendor/github.com/sdboyer/gps/appveyor.yml
index 9bf23a3..8c6b1fd 100644
--- a/vendor/github.com/sdboyer/gps/appveyor.yml
+++ b/vendor/github.com/sdboyer/gps/appveyor.yml
@@ -12,7 +12,7 @@
 install:
   - go version
   - go env
-  - choco install bzr hg
+  - choco install bzr
   - set PATH=C:\Program Files (x86)\Bazaar\;C:\Program Files\Mercurial\;%PATH%
 build_script:
   - go get github.com/Masterminds/glide
@@ -20,5 +20,6 @@
 
 test_script:
   - go test
+  - go build example.go
 
 deploy: off
diff --git a/vendor/github.com/sdboyer/gps/bridge.go b/vendor/github.com/sdboyer/gps/bridge.go
index 8b26e6b..2aae74b 100644
--- a/vendor/github.com/sdboyer/gps/bridge.go
+++ b/vendor/github.com/sdboyer/gps/bridge.go
@@ -5,37 +5,27 @@
 	"os"
 	"path/filepath"
 	"sort"
+
+	"github.com/Masterminds/semver"
 )
 
 // sourceBridges provide an adapter to SourceManagers that tailor operations
 // for a single solve run.
 type sourceBridge interface {
-	getProjectInfo(pa atom) (Manifest, Lock, error)
-	listVersions(id ProjectIdentifier) ([]Version, error)
-	listPackages(id ProjectIdentifier, v Version) (PackageTree, error)
+	SourceManager // composes SourceManager
+	verifyRootDir(path string) error
 	computeRootReach() ([]string, error)
-	revisionPresentIn(id ProjectIdentifier, r Revision) (bool, error)
 	pairRevision(id ProjectIdentifier, r Revision) []Version
 	pairVersion(id ProjectIdentifier, v UnpairedVersion) PairedVersion
-	repoExists(id ProjectIdentifier) (bool, error)
 	vendorCodeExists(id ProjectIdentifier) (bool, error)
 	matches(id ProjectIdentifier, c Constraint, v Version) bool
 	matchesAny(id ProjectIdentifier, c1, c2 Constraint) bool
 	intersect(id ProjectIdentifier, c1, c2 Constraint) Constraint
-	verifyRootDir(path string) error
-	deduceRemoteRepo(path string) (*remoteRepo, error)
 }
 
 // bridge is an adapter around a proper SourceManager. It provides localized
 // caching that's tailored to the requirements of a particular solve run.
 //
-// It also performs transformations between ProjectIdentifiers, which is what
-// the solver primarily deals in, and ProjectRoot, which is what the
-// SourceManager primarily deals in. This separation is helpful because it keeps
-// the complexities of deciding what a particular name "means" entirely within
-// the solver, while the SourceManager can traffic exclusively in
-// globally-unique network names.
-//
 // Finally, it provides authoritative version/constraint operations, ensuring
 // that any possible approach to a match - even those not literally encoded in
 // the inputs - is achieved.
@@ -60,7 +50,7 @@
 	// layered on top of the proper SourceManager's cache; the only difference
 	// is that this keeps the versions sorted in the direction required by the
 	// current solve run
-	vlists map[ProjectRoot][]Version
+	vlists map[ProjectIdentifier][]Version
 }
 
 // Global factory func to create a bridge. This exists solely to allow tests to
@@ -69,34 +59,27 @@
 	return &bridge{
 		sm:     sm,
 		s:      s,
-		vlists: make(map[ProjectRoot][]Version),
+		vlists: make(map[ProjectIdentifier][]Version),
 	}
 }
 
-func (b *bridge) getProjectInfo(pa atom) (Manifest, Lock, error) {
-	if pa.id.ProjectRoot == b.s.params.ImportRoot {
+func (b *bridge) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
+	if id.ProjectRoot == b.s.params.ImportRoot {
 		return b.s.rm, b.s.rl, nil
 	}
-	return b.sm.GetProjectInfo(ProjectRoot(pa.id.netName()), pa.v)
+	return b.sm.GetManifestAndLock(id, v)
 }
 
-func (b *bridge) key(id ProjectIdentifier) ProjectRoot {
-	k := ProjectRoot(id.NetworkName)
-	if k == "" {
-		k = id.ProjectRoot
-	}
-
-	return k
+func (b *bridge) AnalyzerInfo() (string, *semver.Version) {
+	return b.sm.AnalyzerInfo()
 }
 
-func (b *bridge) listVersions(id ProjectIdentifier) ([]Version, error) {
-	k := b.key(id)
-
-	if vl, exists := b.vlists[k]; exists {
+func (b *bridge) ListVersions(id ProjectIdentifier) ([]Version, error) {
+	if vl, exists := b.vlists[id]; exists {
 		return vl, nil
 	}
 
-	vl, err := b.sm.ListVersions(k)
+	vl, err := b.sm.ListVersions(id)
 	// TODO(sdboyer) cache errors, too?
 	if err != nil {
 		return nil, err
@@ -108,18 +91,16 @@
 		sort.Sort(upgradeVersionSorter(vl))
 	}
 
-	b.vlists[k] = vl
+	b.vlists[id] = vl
 	return vl, nil
 }
 
-func (b *bridge) revisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
-	k := b.key(id)
-	return b.sm.RevisionPresentIn(k, r)
+func (b *bridge) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
+	return b.sm.RevisionPresentIn(id, r)
 }
 
-func (b *bridge) repoExists(id ProjectIdentifier) (bool, error) {
-	k := b.key(id)
-	return b.sm.RepoExists(k)
+func (b *bridge) SourceExists(id ProjectIdentifier) (bool, error) {
+	return b.sm.SourceExists(id)
 }
 
 func (b *bridge) vendorCodeExists(id ProjectIdentifier) (bool, error) {
@@ -134,7 +115,7 @@
 }
 
 func (b *bridge) pairVersion(id ProjectIdentifier, v UnpairedVersion) PairedVersion {
-	vl, err := b.listVersions(id)
+	vl, err := b.ListVersions(id)
 	if err != nil {
 		return nil
 	}
@@ -152,7 +133,7 @@
 }
 
 func (b *bridge) pairRevision(id ProjectIdentifier, r Revision) []Version {
-	vl, err := b.listVersions(id)
+	vl, err := b.ListVersions(id)
 	if err != nil {
 		return nil
 	}
@@ -402,14 +383,17 @@
 //
 // The root project is handled separately, as the source manager isn't
 // responsible for that code.
-func (b *bridge) listPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+func (b *bridge) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
 	if id.ProjectRoot == b.s.params.ImportRoot {
 		return b.listRootPackages()
 	}
 
-	// FIXME if we're aliasing here, the returned PackageTree will have
-	// unaliased import paths, which is super not correct
-	return b.sm.ListPackages(b.key(id), v)
+	return b.sm.ListPackages(id, v)
+}
+
+func (b *bridge) ExportProject(id ProjectIdentifier, v Version, path string) error {
+	//return b.sm.ExportProject(id, v, path)
+	panic("bridge should never be used to ExportProject")
 }
 
 // verifyRoot ensures that the provided path to the project root is in good
@@ -425,10 +409,8 @@
 	return nil
 }
 
-// deduceRemoteRepo deduces certain network-oriented properties about an import
-// path.
-func (b *bridge) deduceRemoteRepo(path string) (*remoteRepo, error) {
-	return deduceRemoteRepo(path)
+func (b *bridge) DeduceProjectRoot(ip string) (ProjectRoot, error) {
+	return b.sm.DeduceProjectRoot(ip)
 }
 
 // versionTypeUnion represents a set of versions that are, within the scope of
diff --git a/vendor/github.com/sdboyer/gps/circle.yml b/vendor/github.com/sdboyer/gps/circle.yml
index 5723c35..8be1609 100644
--- a/vendor/github.com/sdboyer/gps/circle.yml
+++ b/vendor/github.com/sdboyer/gps/circle.yml
@@ -1,19 +1,23 @@
 machine:
-    environment:
-        GO15VENDOREXPERIMENT: 1
-checkout:
-    post:
+  environment:
+    GO15VENDOREXPERIMENT: 1
+    PROJECT_ROOT: "github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME"
+    RD: "$HOME/.go_workspace/src/$PROJECT_ROOT"
 dependencies:
-    override:
-        - mkdir -pv $HOME/.go_workspace/src/github.com/$CIRCLE_PROJECT_USERNAME
-        - ln -Tsf $HOME/$CIRCLE_PROJECT_REPONAME $HOME/.go_workspace/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
-        # Glide 0.10.1
-        - wget https://github.com/Masterminds/glide/releases/download/0.10.1/glide-0.10.1-linux-amd64.tar.gz
-        - tar -vxz -C $HOME/bin --strip=1 -f glide-0.10.1-linux-amd64.tar.gz
-        # Fetch deps with glide
-        - glide --home $HOME/.glide -y glide.yaml install --cache
-    cache_directories:
-        - "~/.glide"
+  pre:
+    - wget https://github.com/Masterminds/glide/releases/download/0.10.1/glide-0.10.1-linux-amd64.tar.gz
+    - tar -vxz -C $HOME/bin --strip=1 -f glide-0.10.1-linux-amd64.tar.gz
+  override:
+    - glide --home $HOME/.glide -y glide.yaml install --cache
+    - mkdir -p $RD
+    - rsync -azC --delete ./ $RD
+    #- ln -Tsf "$HOME/$CIRCLE_PROJECT_REPONAME" "$HOME/.go_workspace/src/$PROJECT_ROOT"
+  cache_directories:
+    - "~/.glide"
 test:
-    override:
-        - cd $HOME/.go_workspace/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME && go test
+  pre:
+    - go vet
+  override:
+    - cd $RD && go test -v -coverprofile=coverage.txt -covermode=atomic
+    - cd $RD && go build example.go
+    - cd $RD && bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/sdboyer/gps/codecov.yml b/vendor/github.com/sdboyer/gps/codecov.yml
new file mode 100644
index 0000000..263381f
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/codecov.yml
@@ -0,0 +1,5 @@
+coverage:
+  ignore:
+  - remove_go16.go
+  - remove_go17.go
+  - errors.go
diff --git a/vendor/github.com/sdboyer/gps/constraints.go b/vendor/github.com/sdboyer/gps/constraints.go
index 43b8b09..affde86 100644
--- a/vendor/github.com/sdboyer/gps/constraints.go
+++ b/vendor/github.com/sdboyer/gps/constraints.go
@@ -2,6 +2,7 @@
 
 import (
 	"fmt"
+	"sort"
 
 	"github.com/Masterminds/semver"
 )
@@ -164,3 +165,126 @@
 func (noneConstraint) Intersect(Constraint) Constraint {
 	return none
 }
+
+// A ProjectConstraint combines a ProjectIdentifier with a Constraint. It
+// indicates that, if packages contained in the ProjectIdentifier enter the
+// depgraph, they must do so at a version that is allowed by the Constraint.
+type ProjectConstraint struct {
+	Ident      ProjectIdentifier
+	Constraint Constraint
+}
+
+type workingConstraint struct {
+	Ident                     ProjectIdentifier
+	Constraint                Constraint
+	overrNet, overrConstraint bool
+}
+
+type ProjectConstraints map[ProjectRoot]ProjectProperties
+
+func pcSliceToMap(l []ProjectConstraint, r ...[]ProjectConstraint) ProjectConstraints {
+	final := make(ProjectConstraints)
+
+	for _, pc := range l {
+		final[pc.Ident.ProjectRoot] = ProjectProperties{
+			NetworkName: pc.Ident.netName(),
+			Constraint:  pc.Constraint,
+		}
+	}
+
+	for _, pcs := range r {
+		for _, pc := range pcs {
+			if pp, exists := final[pc.Ident.ProjectRoot]; exists {
+				// Technically this should be done through a bridge for
+				// cross-version-type matching...but this is a one off for root and
+				// that's just ridiculous for this.
+				pp.Constraint = pp.Constraint.Intersect(pc.Constraint)
+				final[pc.Ident.ProjectRoot] = pp
+			} else {
+				final[pc.Ident.ProjectRoot] = ProjectProperties{
+					NetworkName: pc.Ident.netName(),
+					Constraint:  pc.Constraint,
+				}
+			}
+		}
+	}
+
+	return final
+}
+
+func (m ProjectConstraints) asSortedSlice() []ProjectConstraint {
+	pcs := make([]ProjectConstraint, len(m))
+
+	k := 0
+	for pr, pp := range m {
+		pcs[k] = ProjectConstraint{
+			Ident: ProjectIdentifier{
+				ProjectRoot: pr,
+				NetworkName: pp.NetworkName,
+			},
+			Constraint: pp.Constraint,
+		}
+		k++
+	}
+
+	sort.Stable(sortedConstraints(pcs))
+	return pcs
+}
+
+// overrideAll treats the ProjectConstraints map as an override map, and applies
+// overridden values to the input.
+//
+// A slice of workingConstraint is returned, allowing differentiation between
+// values that were or were not overridden.
+func (m ProjectConstraints) overrideAll(in []ProjectConstraint) (out []workingConstraint) {
+	out = make([]workingConstraint, len(in))
+	k := 0
+	for _, pc := range in {
+		out[k] = m.override(pc)
+		k++
+	}
+
+	return
+}
+
+// override replaces a single ProjectConstraint with a workingConstraint,
+// overriding its values if a corresponding entry exists in the
+// ProjectConstraints map.
+func (m ProjectConstraints) override(pc ProjectConstraint) workingConstraint {
+	wc := workingConstraint{
+		Ident:      pc.Ident.normalize(), // necessary to normalize?
+		Constraint: pc.Constraint,
+	}
+
+	if pp, has := m[pc.Ident.ProjectRoot]; has {
+		// The rule for overrides is that *any* non-zero value for the prop
+		// should be considered an override, even if it's equal to what's
+		// already there.
+		if pp.Constraint != nil {
+			wc.Constraint = pp.Constraint
+			wc.overrConstraint = true
+		}
+
+		if pp.NetworkName != "" {
+			wc.Ident.NetworkName = pp.NetworkName
+			wc.overrNet = true
+		}
+
+	}
+
+	return wc
+}
+
+type sortedConstraints []ProjectConstraint
+
+func (s sortedConstraints) Len() int {
+	return len(s)
+}
+
+func (s sortedConstraints) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s sortedConstraints) Less(i, j int) bool {
+	return s[i].Ident.less(s[j].Ident)
+}
diff --git a/vendor/github.com/sdboyer/gps/deduce.go b/vendor/github.com/sdboyer/gps/deduce.go
new file mode 100644
index 0000000..25dc93d
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/deduce.go
@@ -0,0 +1,777 @@
+package gps
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"path"
+	"regexp"
+	"strings"
+)
+
+var (
+	gitSchemes = []string{"https", "ssh", "git", "http"}
+	bzrSchemes = []string{"https", "bzr+ssh", "bzr", "http"}
+	hgSchemes  = []string{"https", "ssh", "http"}
+	svnSchemes = []string{"https", "http", "svn", "svn+ssh"}
+)
+
+func validateVCSScheme(scheme, typ string) bool {
+	// everything allows plain ssh
+	if scheme == "ssh" {
+		return true
+	}
+
+	var schemes []string
+	switch typ {
+	case "git":
+		schemes = gitSchemes
+	case "bzr":
+		schemes = bzrSchemes
+	case "hg":
+		schemes = hgSchemes
+	case "svn":
+		schemes = svnSchemes
+	default:
+		panic(fmt.Sprint("unsupported vcs type", scheme))
+	}
+
+	for _, valid := range schemes {
+		if scheme == valid {
+			return true
+		}
+	}
+	return false
+}
+
+// Regexes for the different known import path flavors
+var (
+	// This regex allowed some usernames that github currently disallows. They
+	// may have allowed them in the past; keeping it in case we need to revert.
+	//ghRegex      = regexp.MustCompile(`^(?P<root>github\.com/([A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`)
+	ghRegex      = regexp.MustCompile(`^(?P<root>github\.com(/[A-Za-z0-9][-A-Za-z0-9]*[A-Za-z0-9]/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	gpinNewRegex = regexp.MustCompile(`^(?P<root>gopkg\.in(?:(/[a-zA-Z0-9][-a-zA-Z0-9]+)?)(/[a-zA-Z][-.a-zA-Z0-9]*)\.((?:v0|v[1-9][0-9]*)(?:\.0|\.[1-9][0-9]*){0,2}(?:-unstable)?)(?:\.git)?)((?:/[a-zA-Z0-9][-.a-zA-Z0-9]*)*)$`)
+	//gpinOldRegex = regexp.MustCompile(`^(?P<root>gopkg\.in/(?:([a-z0-9][-a-z0-9]+)/)?((?:v0|v[1-9][0-9]*)(?:\.0|\.[1-9][0-9]*){0,2}(-unstable)?)/([a-zA-Z][-a-zA-Z0-9]*)(?:\.git)?)((?:/[a-zA-Z][-a-zA-Z0-9]*)*)$`)
+	bbRegex = regexp.MustCompile(`^(?P<root>bitbucket\.org(?P<bitname>/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	//lpRegex = regexp.MustCompile(`^(?P<root>launchpad\.net/([A-Za-z0-9-._]+)(/[A-Za-z0-9-._]+)?)(/.+)?`)
+	lpRegex = regexp.MustCompile(`^(?P<root>launchpad\.net(/[A-Za-z0-9-._]+))((?:/[A-Za-z0-9_.\-]+)*)?`)
+	//glpRegex = regexp.MustCompile(`^(?P<root>git\.launchpad\.net/([A-Za-z0-9_.\-]+)|~[A-Za-z0-9_.\-]+/(\+git|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+)$`)
+	glpRegex = regexp.MustCompile(`^(?P<root>git\.launchpad\.net(/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	//gcRegex      = regexp.MustCompile(`^(?P<root>code\.google\.com/[pr]/(?P<project>[a-z0-9\-]+)(\.(?P<subrepo>[a-z0-9\-]+))?)(/[A-Za-z0-9_.\-]+)*$`)
+	jazzRegex         = regexp.MustCompile(`^(?P<root>hub\.jazz\.net(/git/[a-z0-9]+/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	apacheRegex       = regexp.MustCompile(`^(?P<root>git\.apache\.org(/[a-z0-9_.\-]+\.git))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	vcsExtensionRegex = regexp.MustCompile(`^(?P<root>([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?/[A-Za-z0-9_.\-/~]*?\.(?P<vcs>bzr|git|hg|svn))((?:/[A-Za-z0-9_.\-]+)*)$`)
+)
+
+// Other helper regexes
+var (
+	scpSyntaxRe = regexp.MustCompile(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`)
+	pathvld     = regexp.MustCompile(`^([A-Za-z0-9-]+)(\.[A-Za-z0-9-]+)+(/[A-Za-z0-9-_.~]+)*$`)
+)
+
+func pathDeducerTrie() deducerTrie {
+	dxt := newDeducerTrie()
+
+	dxt.Insert("github.com/", githubDeducer{regexp: ghRegex})
+	dxt.Insert("gopkg.in/", gopkginDeducer{regexp: gpinNewRegex})
+	dxt.Insert("bitbucket.org/", bitbucketDeducer{regexp: bbRegex})
+	dxt.Insert("launchpad.net/", launchpadDeducer{regexp: lpRegex})
+	dxt.Insert("git.launchpad.net/", launchpadGitDeducer{regexp: glpRegex})
+	dxt.Insert("hub.jazz.net/", jazzDeducer{regexp: jazzRegex})
+	dxt.Insert("git.apache.org/", apacheDeducer{regexp: apacheRegex})
+
+	return dxt
+}
+
+type pathDeducer interface {
+	deduceRoot(string) (string, error)
+	deduceSource(string, *url.URL) (maybeSource, error)
+}
+
+type githubDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m githubDeducer) deduceRoot(path string) (string, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on github.com", path)
+	}
+
+	return "github.com" + v[2], nil
+}
+
+func (m githubDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on github.com", path)
+	}
+
+	u.Host = "github.com"
+	u.Path = v[2]
+
+	if u.Scheme == "ssh" && u.User != nil && u.User.Username() != "git" {
+		return nil, fmt.Errorf("github ssh must be accessed via the 'git' user; %s was provided", u.User.Username())
+	} else if u.Scheme != "" {
+		if !validateVCSScheme(u.Scheme, "git") {
+			return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme)
+		}
+		if u.Scheme == "ssh" {
+			u.User = url.User("git")
+		}
+		return maybeGitSource{url: u}, nil
+	}
+
+	mb := make(maybeSources, len(gitSchemes))
+	for k, scheme := range gitSchemes {
+		u2 := *u
+		if scheme == "ssh" {
+			u2.User = url.User("git")
+		}
+		u2.Scheme = scheme
+		mb[k] = maybeGitSource{url: &u2}
+	}
+
+	return mb, nil
+}
+
+type bitbucketDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m bitbucketDeducer) deduceRoot(path string) (string, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on bitbucket.org", path)
+	}
+
+	return "bitbucket.org" + v[2], nil
+}
+
+func (m bitbucketDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on bitbucket.org", path)
+	}
+
+	u.Host = "bitbucket.org"
+	u.Path = v[2]
+
+	// This isn't definitive, but it'll probably catch most
+	isgit := strings.HasSuffix(u.Path, ".git") || (u.User != nil && u.User.Username() == "git")
+	ishg := strings.HasSuffix(u.Path, ".hg") || (u.User != nil && u.User.Username() == "hg")
+
+	// TODO(sdboyer) resolve scm ambiguity if needed by querying bitbucket's REST API
+	if u.Scheme != "" {
+		validgit, validhg := validateVCSScheme(u.Scheme, "git"), validateVCSScheme(u.Scheme, "hg")
+		if isgit {
+			if !validgit {
+				// This is unreachable for now, as the git schemes are a
+				// superset of the hg schemes
+				return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme)
+			}
+			return maybeGitSource{url: u}, nil
+		} else if ishg {
+			if !validhg {
+				return nil, fmt.Errorf("%s is not a valid scheme for accessing an hg repository", u.Scheme)
+			}
+			return maybeHgSource{url: u}, nil
+		} else if !validgit && !validhg {
+			return nil, fmt.Errorf("%s is not a valid scheme for accessing either a git or hg repository", u.Scheme)
+		}
+
+		// No other choice, make an option for both git and hg
+		return maybeSources{
+			maybeHgSource{url: u},
+			maybeGitSource{url: u},
+		}, nil
+	}
+
+	mb := make(maybeSources, 0)
+	// git is probably more common, even on bitbucket. however, bitbucket
+	// appears to fail _extremely_ slowly on git pings (ls-remote) when the
+	// underlying repository is actually an hg repository, so it's better
+	// to try hg first.
+	if !isgit {
+		for _, scheme := range hgSchemes {
+			u2 := *u
+			if scheme == "ssh" {
+				u2.User = url.User("hg")
+			}
+			u2.Scheme = scheme
+			mb = append(mb, maybeHgSource{url: &u2})
+		}
+	}
+
+	if !ishg {
+		for _, scheme := range gitSchemes {
+			u2 := *u
+			if scheme == "ssh" {
+				u2.User = url.User("git")
+			}
+			u2.Scheme = scheme
+			mb = append(mb, maybeGitSource{url: &u2})
+		}
+	}
+
+	return mb, nil
+}
+
+type gopkginDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m gopkginDeducer) deduceRoot(p string) (string, error) {
+	v, err := m.parseAndValidatePath(p)
+	if err != nil {
+		return "", err
+	}
+
+	return v[1], nil
+}
+
+func (m gopkginDeducer) parseAndValidatePath(p string) ([]string, error) {
+	v := m.regexp.FindStringSubmatch(p)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on gopkg.in", p)
+	}
+
+	// We duplicate some logic from the gopkg.in server in order to validate the
+	// import path string without having to make a network request
+	if strings.Contains(v[4], ".") {
+		return nil, fmt.Errorf("%s is not a valid import path; gopkg.in only allows major versions (%q instead of %q)",
+			p, v[4][:strings.Index(v[4], ".")], v[4])
+	}
+
+	return v, nil
+}
+
+func (m gopkginDeducer) deduceSource(p string, u *url.URL) (maybeSource, error) {
+	// Reuse root detection logic for initial validation
+	v, err := m.parseAndValidatePath(p)
+	if err != nil {
+		return nil, err
+	}
+
+	// Putting a scheme on gopkg.in would be really weird, disallow it
+	if u.Scheme != "" {
+		return nil, fmt.Errorf("Specifying alternate schemes on gopkg.in imports is not permitted")
+	}
+
+	// gopkg.in is always backed by github
+	u.Host = "github.com"
+	if v[2] == "" {
+		elem := v[3][1:]
+		u.Path = path.Join("/go-"+elem, elem)
+	} else {
+		u.Path = path.Join(v[2], v[3])
+	}
+
+	mb := make(maybeSources, len(gitSchemes))
+	for k, scheme := range gitSchemes {
+		u2 := *u
+		if scheme == "ssh" {
+			u2.User = url.User("git")
+		}
+		u2.Scheme = scheme
+		mb[k] = maybeGitSource{url: &u2}
+	}
+
+	return mb, nil
+}
+
+type launchpadDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m launchpadDeducer) deduceRoot(path string) (string, error) {
+	// TODO(sdboyer) lp handling is nasty - there's ambiguities which can only really
+	// be resolved with a metadata request. See https://github.com/golang/go/issues/11436
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on launchpad.net", path)
+	}
+
+	return "launchpad.net" + v[2], nil
+}
+
+func (m launchpadDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on launchpad.net", path)
+	}
+
+	u.Host = "launchpad.net"
+	u.Path = v[2]
+
+	if u.Scheme != "" {
+		if !validateVCSScheme(u.Scheme, "bzr") {
+			return nil, fmt.Errorf("%s is not a valid scheme for accessing a bzr repository", u.Scheme)
+		}
+		return maybeBzrSource{url: u}, nil
+	}
+
+	mb := make(maybeSources, len(bzrSchemes))
+	for k, scheme := range bzrSchemes {
+		u2 := *u
+		u2.Scheme = scheme
+		mb[k] = maybeBzrSource{url: &u2}
+	}
+
+	return mb, nil
+}
+
+type launchpadGitDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m launchpadGitDeducer) deduceRoot(path string) (string, error) {
+	// TODO(sdboyer) same ambiguity issues as with normal bzr lp
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on git.launchpad.net", path)
+	}
+
+	return "git.launchpad.net" + v[2], nil
+}
+
+func (m launchpadGitDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on git.launchpad.net", path)
+	}
+
+	u.Host = "git.launchpad.net"
+	u.Path = v[2]
+
+	if u.Scheme != "" {
+		if !validateVCSScheme(u.Scheme, "git") {
+			return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme)
+		}
+		return maybeGitSource{url: u}, nil
+	}
+
+	mb := make(maybeSources, len(gitSchemes))
+	for k, scheme := range gitSchemes {
+		u2 := *u
+		u2.Scheme = scheme
+		mb[k] = maybeGitSource{url: &u2}
+	}
+
+	return mb, nil
+}
+
+type jazzDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m jazzDeducer) deduceRoot(path string) (string, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on hub.jazz.net", path)
+	}
+
+	return "hub.jazz.net" + v[2], nil
+}
+
+func (m jazzDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on hub.jazz.net", path)
+	}
+
+	u.Host = "hub.jazz.net"
+	u.Path = v[2]
+
+	switch u.Scheme {
+	case "":
+		u.Scheme = "https"
+		fallthrough
+	case "https":
+		return maybeGitSource{url: u}, nil
+	default:
+		return nil, fmt.Errorf("IBM's jazz hub only supports https, %s is not allowed", u.String())
+	}
+}
+
+type apacheDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m apacheDeducer) deduceRoot(path string) (string, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on git.apache.org", path)
+	}
+
+	return "git.apache.org" + v[2], nil
+}
+
+func (m apacheDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on git.apache.org", path)
+	}
+
+	u.Host = "git.apache.org"
+	u.Path = v[2]
+
+	if u.Scheme != "" {
+		if !validateVCSScheme(u.Scheme, "git") {
+			return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme)
+		}
+		return maybeGitSource{url: u}, nil
+	}
+
+	mb := make(maybeSources, len(gitSchemes))
+	for k, scheme := range gitSchemes {
+		u2 := *u
+		u2.Scheme = scheme
+		mb[k] = maybeGitSource{url: &u2}
+	}
+
+	return mb, nil
+}
+
+type vcsExtensionDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m vcsExtensionDeducer) deduceRoot(path string) (string, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s contains no vcs extension hints for matching", path)
+	}
+
+	return v[1], nil
+}
+
+func (m vcsExtensionDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s contains no vcs extension hints for matching", path)
+	}
+
+	switch v[4] {
+	case "git", "hg", "bzr":
+		x := strings.SplitN(v[1], "/", 2)
+		// TODO(sdboyer) is this actually correct for bzr?
+		u.Host = x[0]
+		u.Path = "/" + x[1]
+
+		if u.Scheme != "" {
+			if !validateVCSScheme(u.Scheme, v[4]) {
+				return nil, fmt.Errorf("%s is not a valid scheme for accessing %s repositories (path %s)", u.Scheme, v[4], path)
+			}
+
+			switch v[4] {
+			case "git":
+				return maybeGitSource{url: u}, nil
+			case "bzr":
+				return maybeBzrSource{url: u}, nil
+			case "hg":
+				return maybeHgSource{url: u}, nil
+			}
+		}
+
+		var schemes []string
+		var mb maybeSources
+		var f func(k int, u *url.URL)
+
+		switch v[4] {
+		case "git":
+			schemes = gitSchemes
+			f = func(k int, u *url.URL) {
+				mb[k] = maybeGitSource{url: u}
+			}
+		case "bzr":
+			schemes = bzrSchemes
+			f = func(k int, u *url.URL) {
+				mb[k] = maybeBzrSource{url: u}
+			}
+		case "hg":
+			schemes = hgSchemes
+			f = func(k int, u *url.URL) {
+				mb[k] = maybeHgSource{url: u}
+			}
+		}
+
+		mb = make(maybeSources, len(schemes))
+		for k, scheme := range schemes {
+			u2 := *u
+			u2.Scheme = scheme
+			f(k, &u2)
+		}
+
+		return mb, nil
+	default:
+		return nil, fmt.Errorf("unknown repository type: %q", v[4])
+	}
+}
+
+type stringFuture func() (string, error)
+type sourceFuture func() (source, string, error)
+type partialSourceFuture func(string, ProjectAnalyzer) sourceFuture
+
+type deductionFuture struct {
+	// rslow indicates that the root future may be a slow call (that it has to
+	// hit the network for some reason)
+	rslow bool
+	root  stringFuture
+	psf   partialSourceFuture
+}
+
+// deduceFromPath takes an import path and attempts to deduce various
+// metadata about it - what type of source should handle it, and where its
+// "root" is (for vcs repositories, the repository root).
+//
+// The results are wrapped in futures, as most of these operations require at
+// least some network activity to complete. For the first return value, network
+// activity will be triggered when the future is called. For the second,
+// network activity is triggered only when calling the sourceFuture returned
+// from the partialSourceFuture.
+func (sm *SourceMgr) deduceFromPath(path string) (deductionFuture, error) {
+	opath := path
+	u, path, err := normalizeURI(path)
+	if err != nil {
+		return deductionFuture{}, err
+	}
+
+	// Helpers to futurize the results from deducers
+	strfut := func(s string) stringFuture {
+		return func() (string, error) {
+			return s, nil
+		}
+	}
+
+	srcfut := func(mb maybeSource) partialSourceFuture {
+		return func(cachedir string, an ProjectAnalyzer) sourceFuture {
+			var src source
+			var ident string
+			var err error
+
+			c := make(chan struct{}, 1)
+			go func() {
+				defer close(c)
+				src, ident, err = mb.try(cachedir, an)
+			}()
+
+			return func() (source, string, error) {
+				<-c
+				return src, ident, err
+			}
+		}
+	}
+
+	// First, try the root path-based matches
+	if _, mtchi, has := sm.dxt.LongestPrefix(path); has {
+		mtch := mtchi.(pathDeducer)
+		root, err := mtch.deduceRoot(path)
+		if err != nil {
+			return deductionFuture{}, err
+		}
+		mb, err := mtch.deduceSource(path, u)
+		if err != nil {
+			return deductionFuture{}, err
+		}
+
+		return deductionFuture{
+			rslow: false,
+			root:  strfut(root),
+			psf:   srcfut(mb),
+		}, nil
+	}
+
+	// Next, try the vcs extension-based (infix) matcher
+	exm := vcsExtensionDeducer{regexp: vcsExtensionRegex}
+	if root, err := exm.deduceRoot(path); err == nil {
+		mb, err := exm.deduceSource(path, u)
+		if err != nil {
+			return deductionFuture{}, err
+		}
+
+		return deductionFuture{
+			rslow: false,
+			root:  strfut(root),
+			psf:   srcfut(mb),
+		}, nil
+	}
+
+	// No luck so far. maybe it's one of them vanity imports?
+	// We have to get a little fancier for the metadata lookup by chaining the
+	// source future onto the metadata future
+
+	// Declare these out here so they're available for the source future
+	var vcs string
+	var ru *url.URL
+
+	// Kick off the vanity metadata fetch
+	var importroot string
+	var futerr error
+	c := make(chan struct{}, 1)
+	go func() {
+		defer close(c)
+		var reporoot string
+		importroot, vcs, reporoot, futerr = parseMetadata(path)
+		if futerr != nil {
+			futerr = fmt.Errorf("unable to deduce repository and source type for: %q", opath)
+			return
+		}
+
+		// If we got something back at all, then it supercedes the actual input for
+		// the real URL to hit
+		ru, futerr = url.Parse(reporoot)
+		if futerr != nil {
+			futerr = fmt.Errorf("server returned bad URL when searching for vanity import: %q", reporoot)
+			importroot = ""
+			return
+		}
+	}()
+
+	// Set up the root func to catch the result
+	root := func() (string, error) {
+		<-c
+		return importroot, futerr
+	}
+
+	src := func(cachedir string, an ProjectAnalyzer) sourceFuture {
+		var src source
+		var ident string
+		var err error
+
+		c := make(chan struct{}, 1)
+		go func() {
+			defer close(c)
+			// make sure the metadata future is finished (without errors), thus
+			// guaranteeing that ru and vcs will be populated
+			_, err := root()
+			if err != nil {
+				return
+			}
+			ident = ru.String()
+
+			var m maybeSource
+			switch vcs {
+			case "git":
+				m = maybeGitSource{url: ru}
+			case "bzr":
+				m = maybeBzrSource{url: ru}
+			case "hg":
+				m = maybeHgSource{url: ru}
+			}
+
+			if m != nil {
+				src, ident, err = m.try(cachedir, an)
+			} else {
+				err = fmt.Errorf("unsupported vcs type %s", vcs)
+			}
+		}()
+
+		return func() (source, string, error) {
+			<-c
+			return src, ident, err
+		}
+	}
+
+	return deductionFuture{
+		rslow: true,
+		root:  root,
+		psf:   src,
+	}, nil
+}
+
+func normalizeURI(p string) (u *url.URL, newpath string, err error) {
+	if m := scpSyntaxRe.FindStringSubmatch(p); m != nil {
+		// Match SCP-like syntax and convert it to a URL.
+		// Eg, "git@github.com:user/repo" becomes
+		// "ssh://git@github.com/user/repo".
+		u = &url.URL{
+			Scheme: "ssh",
+			User:   url.User(m[1]),
+			Host:   m[2],
+			Path:   "/" + m[3],
+			// TODO(sdboyer) This is what stdlib sets; grok why better
+			//RawPath: m[3],
+		}
+	} else {
+		u, err = url.Parse(p)
+		if err != nil {
+			return nil, "", fmt.Errorf("%q is not a valid URI", p)
+		}
+	}
+
+	// If no scheme was passed, then the entire path will have been put into
+	// u.Path. Either way, construct the normalized path correctly.
+	if u.Host == "" {
+		newpath = p
+	} else {
+		newpath = path.Join(u.Host, u.Path)
+	}
+
+	if !pathvld.MatchString(newpath) {
+		return nil, "", fmt.Errorf("%q is not a valid import path", newpath)
+	}
+
+	return
+}
+
+// fetchMetadata fetches the remote metadata for path.
+func fetchMetadata(path string) (rc io.ReadCloser, err error) {
+	defer func() {
+		if err != nil {
+			err = fmt.Errorf("unable to determine remote metadata protocol: %s", err)
+		}
+	}()
+
+	// try https first
+	rc, err = doFetchMetadata("https", path)
+	if err == nil {
+		return
+	}
+
+	rc, err = doFetchMetadata("http", path)
+	return
+}
+
+func doFetchMetadata(scheme, path string) (io.ReadCloser, error) {
+	url := fmt.Sprintf("%s://%s?go-get=1", scheme, path)
+	switch scheme {
+	case "https", "http":
+		resp, err := http.Get(url)
+		if err != nil {
+			return nil, fmt.Errorf("failed to access url %q", url)
+		}
+		return resp.Body, nil
+	default:
+		return nil, fmt.Errorf("unknown remote protocol scheme: %q", scheme)
+	}
+}
+
+// parseMetadata fetches and decodes remote metadata for path.
+func parseMetadata(path string) (string, string, string, error) {
+	rc, err := fetchMetadata(path)
+	if err != nil {
+		return "", "", "", err
+	}
+	defer rc.Close()
+
+	imports, err := parseMetaGoImports(rc)
+	if err != nil {
+		return "", "", "", err
+	}
+	match := -1
+	for i, im := range imports {
+		if !strings.HasPrefix(path, im.Prefix) {
+			continue
+		}
+		if match != -1 {
+			return "", "", "", fmt.Errorf("multiple meta tags match import path %q", path)
+		}
+		match = i
+	}
+	if match == -1 {
+		return "", "", "", fmt.Errorf("go-import metadata not found")
+	}
+	return imports[match].Prefix, imports[match].VCS, imports[match].RepoRoot, nil
+}
diff --git a/vendor/github.com/sdboyer/gps/deduce_test.go b/vendor/github.com/sdboyer/gps/deduce_test.go
new file mode 100644
index 0000000..23ffe38
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/deduce_test.go
@@ -0,0 +1,619 @@
+package gps
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"net/url"
+	"reflect"
+	"sync"
+	"testing"
+)
+
+type pathDeductionFixture struct {
+	in     string
+	root   string
+	rerr   error
+	mb     maybeSource
+	srcerr error
+}
+
+// helper func to generate testing *url.URLs, panicking on err
+func mkurl(s string) (u *url.URL) {
+	var err error
+	u, err = url.Parse(s)
+	if err != nil {
+		panic(fmt.Sprint("string is not a valid URL:", s))
+	}
+	return
+}
+
+var pathDeductionFixtures = map[string][]pathDeductionFixture{
+	"github": []pathDeductionFixture{
+		{
+			in:   "github.com/sdboyer/gps",
+			root: "github.com/sdboyer/gps",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+			},
+		},
+		{
+			in:   "github.com/sdboyer/gps/foo",
+			root: "github.com/sdboyer/gps",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+			},
+		},
+		{
+			// TODO(sdboyer) is this a problem for enforcing uniqueness? do we
+			// need to collapse these extensions?
+			in:   "github.com/sdboyer/gps.git/foo",
+			root: "github.com/sdboyer/gps.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps.git")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps.git")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps.git")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps.git")},
+			},
+		},
+		{
+			in:   "git@github.com:sdboyer/gps",
+			root: "github.com/sdboyer/gps",
+			mb:   maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+		},
+		{
+			in:   "https://github.com/sdboyer/gps",
+			root: "github.com/sdboyer/gps",
+			mb:   maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+		},
+		{
+			in:   "https://github.com/sdboyer/gps/foo/bar",
+			root: "github.com/sdboyer/gps",
+			mb:   maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+		},
+		// some invalid github username patterns
+		{
+			in:   "github.com/-sdboyer/gps/foo",
+			rerr: errors.New("github.com/-sdboyer/gps/foo is not a valid path for a source on github.com"),
+		},
+		{
+			in:   "github.com/sdboyer-/gps/foo",
+			rerr: errors.New("github.com/sdboyer-/gps/foo is not a valid path for a source on github.com"),
+		},
+		{
+			in:   "github.com/sdbo.yer/gps/foo",
+			rerr: errors.New("github.com/sdbo.yer/gps/foo is not a valid path for a source on github.com"),
+		},
+		{
+			in:   "github.com/sdbo_yer/gps/foo",
+			rerr: errors.New("github.com/sdbo_yer/gps/foo is not a valid path for a source on github.com"),
+		},
+		// Regression - gh does allow two-letter usernames
+		{
+			in:   "github.com/kr/pretty",
+			root: "github.com/kr/pretty",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/kr/pretty")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/kr/pretty")},
+				maybeGitSource{url: mkurl("git://github.com/kr/pretty")},
+				maybeGitSource{url: mkurl("http://github.com/kr/pretty")},
+			},
+		},
+	},
+	"gopkg.in": []pathDeductionFixture{
+		{
+			in:   "gopkg.in/sdboyer/gps.v0",
+			root: "gopkg.in/sdboyer/gps.v0",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+			},
+		},
+		{
+			in:   "gopkg.in/sdboyer/gps.v0/foo",
+			root: "gopkg.in/sdboyer/gps.v0",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+			},
+		},
+		{
+			in:   "gopkg.in/sdboyer/gps.v1/foo/bar",
+			root: "gopkg.in/sdboyer/gps.v1",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+			},
+		},
+		{
+			in:   "gopkg.in/yaml.v1",
+			root: "gopkg.in/yaml.v1",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("git://github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("http://github.com/go-yaml/yaml")},
+			},
+		},
+		{
+			in:   "gopkg.in/yaml.v1/foo/bar",
+			root: "gopkg.in/yaml.v1",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("git://github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("http://github.com/go-yaml/yaml")},
+			},
+		},
+		{
+			in:   "gopkg.in/inf.v0",
+			root: "gopkg.in/inf.v0",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/go-inf/inf")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/go-inf/inf")},
+				maybeGitSource{url: mkurl("git://github.com/go-inf/inf")},
+				maybeGitSource{url: mkurl("http://github.com/go-inf/inf")},
+			},
+		},
+		{
+			// gopkg.in only allows specifying major version in import path
+			in:   "gopkg.in/yaml.v1.2",
+			rerr: errors.New("gopkg.in/yaml.v1.2 is not a valid import path; gopkg.in only allows major versions (\"v1\" instead of \"v1.2\")"),
+		},
+	},
+	"jazz": []pathDeductionFixture{
+		// IBM hub devops services - fixtures borrowed from go get
+		{
+			in:   "hub.jazz.net/git/user1/pkgname",
+			root: "hub.jazz.net/git/user1/pkgname",
+			mb:   maybeGitSource{url: mkurl("https://hub.jazz.net/git/user1/pkgname")},
+		},
+		{
+			in:   "hub.jazz.net/git/user1/pkgname/submodule/submodule/submodule",
+			root: "hub.jazz.net/git/user1/pkgname",
+			mb:   maybeGitSource{url: mkurl("https://hub.jazz.net/git/user1/pkgname")},
+		},
+		{
+			in:   "hub.jazz.net/someotherprefix",
+			rerr: errors.New("hub.jazz.net/someotherprefix is not a valid path for a source on hub.jazz.net"),
+		},
+		{
+			in:   "hub.jazz.net/someotherprefix/user1/packagename",
+			rerr: errors.New("hub.jazz.net/someotherprefix/user1/packagename is not a valid path for a source on hub.jazz.net"),
+		},
+		// Spaces are not valid in user names or package names
+		{
+			in:   "hub.jazz.net/git/User 1/pkgname",
+			rerr: errors.New("hub.jazz.net/git/User 1/pkgname is not a valid path for a source on hub.jazz.net"),
+		},
+		{
+			in:   "hub.jazz.net/git/user1/pkg name",
+			rerr: errors.New("hub.jazz.net/git/user1/pkg name is not a valid path for a source on hub.jazz.net"),
+		},
+		// Dots are not valid in user names
+		{
+			in:   "hub.jazz.net/git/user.1/pkgname",
+			rerr: errors.New("hub.jazz.net/git/user.1/pkgname is not a valid path for a source on hub.jazz.net"),
+		},
+		{
+			in:   "hub.jazz.net/git/user1/pkg.name",
+			root: "hub.jazz.net/git/user1/pkg.name",
+			mb:   maybeGitSource{url: mkurl("https://hub.jazz.net/git/user1/pkg.name")},
+		},
+		// User names cannot have uppercase letters
+		{
+			in:   "hub.jazz.net/git/USER/pkgname",
+			rerr: errors.New("hub.jazz.net/git/USER/pkgname is not a valid path for a source on hub.jazz.net"),
+		},
+	},
+	"bitbucket": []pathDeductionFixture{
+		{
+			in:   "bitbucket.org/sdboyer/reporoot",
+			root: "bitbucket.org/sdboyer/reporoot",
+			mb: maybeSources{
+				maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+				maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot")},
+				maybeHgSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("git://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")},
+			},
+		},
+		{
+			in:   "bitbucket.org/sdboyer/reporoot/foo/bar",
+			root: "bitbucket.org/sdboyer/reporoot",
+			mb: maybeSources{
+				maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+				maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot")},
+				maybeHgSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("git://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")},
+			},
+		},
+		{
+			in:   "https://bitbucket.org/sdboyer/reporoot/foo/bar",
+			root: "bitbucket.org/sdboyer/reporoot",
+			mb: maybeSources{
+				maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+			},
+		},
+		// Less standard behaviors possible due to the hg/git ambiguity
+		{
+			in:   "bitbucket.org/sdboyer/reporoot.git",
+			root: "bitbucket.org/sdboyer/reporoot.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot.git")},
+				maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot.git")},
+				maybeGitSource{url: mkurl("git://bitbucket.org/sdboyer/reporoot.git")},
+				maybeGitSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot.git")},
+			},
+		},
+		{
+			in:   "git@bitbucket.org:sdboyer/reporoot.git",
+			root: "bitbucket.org/sdboyer/reporoot.git",
+			mb:   maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot.git")},
+		},
+		{
+			in:   "bitbucket.org/sdboyer/reporoot.hg",
+			root: "bitbucket.org/sdboyer/reporoot.hg",
+			mb: maybeSources{
+				maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot.hg")},
+				maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot.hg")},
+				maybeHgSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot.hg")},
+			},
+		},
+		{
+			in:   "hg@bitbucket.org:sdboyer/reporoot",
+			root: "bitbucket.org/sdboyer/reporoot",
+			mb:   maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot")},
+		},
+		{
+			in:     "git://bitbucket.org/sdboyer/reporoot.hg",
+			root:   "bitbucket.org/sdboyer/reporoot.hg",
+			srcerr: errors.New("git is not a valid scheme for accessing an hg repository"),
+		},
+	},
+	"launchpad": []pathDeductionFixture{
+		// tests for launchpad, mostly bazaar
+		// TODO(sdboyer) need more tests to deal w/launchpad's oddities
+		{
+			in:   "launchpad.net/govcstestbzrrepo",
+			root: "launchpad.net/govcstestbzrrepo",
+			mb: maybeSources{
+				maybeBzrSource{url: mkurl("https://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("bzr+ssh://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("bzr://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("http://launchpad.net/govcstestbzrrepo")},
+			},
+		},
+		{
+			in:   "launchpad.net/govcstestbzrrepo/foo/bar",
+			root: "launchpad.net/govcstestbzrrepo",
+			mb: maybeSources{
+				maybeBzrSource{url: mkurl("https://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("bzr+ssh://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("bzr://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("http://launchpad.net/govcstestbzrrepo")},
+			},
+		},
+		{
+			in:   "launchpad.net/repo root",
+			rerr: errors.New("launchpad.net/repo root is not a valid path for a source on launchpad.net"),
+		},
+	},
+	"git.launchpad": []pathDeductionFixture{
+		{
+			in:   "git.launchpad.net/reporoot",
+			root: "git.launchpad.net/reporoot",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("ssh://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("git://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("http://git.launchpad.net/reporoot")},
+			},
+		},
+		{
+			in:   "git.launchpad.net/reporoot/foo/bar",
+			root: "git.launchpad.net/reporoot",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("ssh://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("git://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("http://git.launchpad.net/reporoot")},
+			},
+		},
+		{
+			in:   "git.launchpad.net/repo root",
+			rerr: errors.New("git.launchpad.net/repo root is not a valid path for a source on launchpad.net"),
+		},
+	},
+	"apache": []pathDeductionFixture{
+		{
+			in:   "git.apache.org/package-name.git",
+			root: "git.apache.org/package-name.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("ssh://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("git://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("http://git.apache.org/package-name.git")},
+			},
+		},
+		{
+			in:   "git.apache.org/package-name.git/foo/bar",
+			root: "git.apache.org/package-name.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("ssh://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("git://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("http://git.apache.org/package-name.git")},
+			},
+		},
+	},
+	"vcsext": []pathDeductionFixture{
+		// VCS extension-based syntax
+		{
+			in:   "foobar.com/baz.git",
+			root: "foobar.com/baz.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("ssh://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("git://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("http://foobar.com/baz.git")},
+			},
+		},
+		{
+			in:   "foobar.com/baz.git/extra/path",
+			root: "foobar.com/baz.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("ssh://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("git://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("http://foobar.com/baz.git")},
+			},
+		},
+		{
+			in:   "foobar.com/baz.bzr",
+			root: "foobar.com/baz.bzr",
+			mb: maybeSources{
+				maybeBzrSource{url: mkurl("https://foobar.com/baz.bzr")},
+				maybeBzrSource{url: mkurl("bzr+ssh://foobar.com/baz.bzr")},
+				maybeBzrSource{url: mkurl("bzr://foobar.com/baz.bzr")},
+				maybeBzrSource{url: mkurl("http://foobar.com/baz.bzr")},
+			},
+		},
+		{
+			in:   "foo-bar.com/baz.hg",
+			root: "foo-bar.com/baz.hg",
+			mb: maybeSources{
+				maybeHgSource{url: mkurl("https://foo-bar.com/baz.hg")},
+				maybeHgSource{url: mkurl("ssh://foo-bar.com/baz.hg")},
+				maybeHgSource{url: mkurl("http://foo-bar.com/baz.hg")},
+			},
+		},
+		{
+			in:   "git@foobar.com:baz.git",
+			root: "foobar.com/baz.git",
+			mb:   maybeGitSource{url: mkurl("ssh://git@foobar.com/baz.git")},
+		},
+		{
+			in:   "bzr+ssh://foobar.com/baz.bzr",
+			root: "foobar.com/baz.bzr",
+			mb:   maybeBzrSource{url: mkurl("bzr+ssh://foobar.com/baz.bzr")},
+		},
+		{
+			in:   "ssh://foobar.com/baz.bzr",
+			root: "foobar.com/baz.bzr",
+			mb:   maybeBzrSource{url: mkurl("ssh://foobar.com/baz.bzr")},
+		},
+		{
+			in:   "https://foobar.com/baz.hg",
+			root: "foobar.com/baz.hg",
+			mb:   maybeHgSource{url: mkurl("https://foobar.com/baz.hg")},
+		},
+		{
+			in:     "git://foobar.com/baz.hg",
+			root:   "foobar.com/baz.hg",
+			srcerr: errors.New("git is not a valid scheme for accessing hg repositories (path foobar.com/baz.hg)"),
+		},
+		// who knows why anyone would do this, but having a second vcs ext
+		// shouldn't throw us off - only the first one counts
+		{
+			in:   "foobar.com/baz.git/quark/quizzle.bzr/quorum",
+			root: "foobar.com/baz.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("ssh://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("git://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("http://foobar.com/baz.git")},
+			},
+		},
+	},
+	"vanity": []pathDeductionFixture{
+		// Vanity imports
+		{
+			in:   "golang.org/x/exp",
+			root: "golang.org/x/exp",
+			mb:   maybeGitSource{url: mkurl("https://go.googlesource.com/exp")},
+		},
+		{
+			in:   "golang.org/x/exp/inotify",
+			root: "golang.org/x/exp",
+			mb:   maybeGitSource{url: mkurl("https://go.googlesource.com/exp")},
+		},
+		{
+			in:   "rsc.io/pdf",
+			root: "rsc.io/pdf",
+			mb:   maybeGitSource{url: mkurl("https://github.com/rsc/pdf")},
+		},
+	},
+}
+
+func TestDeduceFromPath(t *testing.T) {
+	for typ, fixtures := range pathDeductionFixtures {
+		var deducer pathDeducer
+		switch typ {
+		case "github":
+			deducer = githubDeducer{regexp: ghRegex}
+		case "gopkg.in":
+			deducer = gopkginDeducer{regexp: gpinNewRegex}
+		case "jazz":
+			deducer = jazzDeducer{regexp: jazzRegex}
+		case "bitbucket":
+			deducer = bitbucketDeducer{regexp: bbRegex}
+		case "launchpad":
+			deducer = launchpadDeducer{regexp: lpRegex}
+		case "git.launchpad":
+			deducer = launchpadGitDeducer{regexp: glpRegex}
+		case "apache":
+			deducer = apacheDeducer{regexp: apacheRegex}
+		case "vcsext":
+			deducer = vcsExtensionDeducer{regexp: vcsExtensionRegex}
+		default:
+			// Should just be the vanity imports, which we do elsewhere
+			continue
+		}
+
+		var printmb func(mb maybeSource) string
+		printmb = func(mb maybeSource) string {
+			switch tmb := mb.(type) {
+			case maybeSources:
+				var buf bytes.Buffer
+				fmt.Fprintf(&buf, "%v maybeSources:", len(tmb))
+				for _, elem := range tmb {
+					fmt.Fprintf(&buf, "\n\t\t%s", printmb(elem))
+				}
+				return buf.String()
+			case maybeGitSource:
+				return fmt.Sprintf("%T: %s", tmb, ufmt(tmb.url))
+			case maybeBzrSource:
+				return fmt.Sprintf("%T: %s", tmb, ufmt(tmb.url))
+			case maybeHgSource:
+				return fmt.Sprintf("%T: %s", tmb, ufmt(tmb.url))
+			default:
+				t.Errorf("Unknown maybeSource type: %T", mb)
+				t.FailNow()
+			}
+			return ""
+		}
+
+		for _, fix := range fixtures {
+			u, in, uerr := normalizeURI(fix.in)
+			if uerr != nil {
+				if fix.rerr == nil {
+					t.Errorf("(in: %s) bad input URI %s", fix.in, uerr)
+				}
+				continue
+			}
+
+			root, rerr := deducer.deduceRoot(in)
+			if fix.rerr != nil {
+				if rerr == nil {
+					t.Errorf("(in: %s, %T) Expected error on deducing root, got none:\n\t(WNT) %s", in, deducer, fix.rerr)
+				} else if fix.rerr.Error() != rerr.Error() {
+					t.Errorf("(in: %s, %T) Got unexpected error on deducing root:\n\t(GOT) %s\n\t(WNT) %s", in, deducer, rerr, fix.rerr)
+				}
+			} else if rerr != nil {
+				t.Errorf("(in: %s, %T) Got unexpected error on deducing root:\n\t(GOT) %s", in, deducer, rerr)
+			} else if root != fix.root {
+				t.Errorf("(in: %s, %T) Deducer did not return expected root:\n\t(GOT) %s\n\t(WNT) %s", in, deducer, root, fix.root)
+			}
+
+			mb, mberr := deducer.deduceSource(in, u)
+			if fix.srcerr != nil {
+				if mberr == nil {
+					t.Errorf("(in: %s, %T) Expected error on deducing source, got none:\n\t(WNT) %s", in, deducer, fix.srcerr)
+				} else if fix.srcerr.Error() != mberr.Error() {
+					t.Errorf("(in: %s, %T) Got unexpected error on deducing source:\n\t(GOT) %s\n\t(WNT) %s", in, deducer, mberr, fix.srcerr)
+				}
+			} else if mberr != nil {
+				// don't complain the fix already expected an rerr
+				if fix.rerr == nil {
+					t.Errorf("(in: %s, %T) Got unexpected error on deducing source:\n\t(GOT) %s", in, deducer, mberr)
+				}
+			} else if !reflect.DeepEqual(mb, fix.mb) {
+				if mb == nil {
+					t.Errorf("(in: %s, %T) Deducer returned source maybes, but none expected:\n\t(GOT) (none)\n\t(WNT) %s", in, deducer, printmb(fix.mb))
+				} else if fix.mb == nil {
+					t.Errorf("(in: %s, %T) Deducer returned source maybes, but none expected:\n\t(GOT) %s\n\t(WNT) (none)", in, deducer, printmb(mb))
+				} else {
+					t.Errorf("(in: %s, %T) Deducer did not return expected source:\n\t(GOT) %s\n\t(WNT) %s", in, deducer, printmb(mb), printmb(fix.mb))
+				}
+			}
+		}
+	}
+}
+
+func TestVanityDeduction(t *testing.T) {
+	if testing.Short() {
+		t.Skip("Skipping slow test in short mode")
+	}
+
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	vanities := pathDeductionFixtures["vanity"]
+	wg := &sync.WaitGroup{}
+	wg.Add(len(vanities))
+
+	for _, fix := range vanities {
+		go func(fix pathDeductionFixture) {
+			defer wg.Done()
+			pr, err := sm.DeduceProjectRoot(fix.in)
+			if err != nil {
+				t.Errorf("(in: %s) Unexpected err on deducing project root: %s", fix.in, err)
+				return
+			} else if string(pr) != fix.root {
+				t.Errorf("(in: %s) Deducer did not return expected root:\n\t(GOT) %s\n\t(WNT) %s", fix.in, pr, fix.root)
+			}
+
+			_, srcf, err := sm.deducePathAndProcess(fix.in)
+			if err != nil {
+				t.Errorf("(in: %s) Unexpected err on deducing source: %s", fix.in, err)
+				return
+			}
+
+			_, ident, err := srcf()
+			if err != nil {
+				t.Errorf("(in: %s) Unexpected err on executing source future: %s", fix.in, err)
+				return
+			}
+
+			ustr := fix.mb.(maybeGitSource).url.String()
+			if ident != ustr {
+				t.Errorf("(in: %s) Deduced repo ident does not match fixture:\n\t(GOT) %s\n\t(WNT) %s", fix.in, ident, ustr)
+			}
+		}(fix)
+	}
+
+	wg.Wait()
+}
+
+// borrow from stdlib
+// more useful string for debugging than fmt's struct printer
+func ufmt(u *url.URL) string {
+	var user, pass interface{}
+	if u.User != nil {
+		user = u.User.Username()
+		if p, ok := u.User.Password(); ok {
+			pass = p
+		}
+	}
+	return fmt.Sprintf("host=%q, path=%q, opaque=%q, scheme=%q, user=%#v, pass=%#v, rawpath=%q, rawq=%q, frag=%q",
+		u.Host, u.Path, u.Opaque, u.Scheme, user, pass, u.RawPath, u.RawQuery, u.Fragment)
+}
diff --git a/vendor/github.com/sdboyer/gps/errors.go b/vendor/github.com/sdboyer/gps/errors.go
deleted file mode 100644
index 26c8413..0000000
--- a/vendor/github.com/sdboyer/gps/errors.go
+++ /dev/null
@@ -1,405 +0,0 @@
-package gps
-
-import (
-	"bytes"
-	"fmt"
-	"strings"
-)
-
-type errorLevel uint8
-
-// TODO(sdboyer) consistent, sensible way of handling 'type' and 'severity' - or figure
-// out that they're not orthogonal and collapse into just 'type'
-
-const (
-	warning errorLevel = 1 << iota
-	mustResolve
-	cannotResolve
-)
-
-type traceError interface {
-	traceString() string
-}
-
-type solveError struct {
-	lvl errorLevel
-	msg string
-}
-
-func newSolveError(msg string, lvl errorLevel) error {
-	return &solveError{msg: msg, lvl: lvl}
-}
-
-func (e *solveError) Error() string {
-	return e.msg
-}
-
-type noVersionError struct {
-	pn    ProjectIdentifier
-	fails []failedVersion
-}
-
-func (e *noVersionError) Error() string {
-	if len(e.fails) == 0 {
-		return fmt.Sprintf("No versions found for project %q.", e.pn.ProjectRoot)
-	}
-
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "No versions of %s met constraints:", e.pn.ProjectRoot)
-	for _, f := range e.fails {
-		fmt.Fprintf(&buf, "\n\t%s: %s", f.v, f.f.Error())
-	}
-
-	return buf.String()
-}
-
-func (e *noVersionError) traceString() string {
-	if len(e.fails) == 0 {
-		return fmt.Sprintf("No versions found")
-	}
-
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "No versions of %s met constraints:", e.pn.ProjectRoot)
-	for _, f := range e.fails {
-		if te, ok := f.f.(traceError); ok {
-			fmt.Fprintf(&buf, "\n  %s: %s", f.v, te.traceString())
-		} else {
-			fmt.Fprintf(&buf, "\n  %s: %s", f.v, f.f.Error())
-		}
-	}
-
-	return buf.String()
-}
-
-type disjointConstraintFailure struct {
-	goal      dependency
-	failsib   []dependency
-	nofailsib []dependency
-	c         Constraint
-}
-
-func (e *disjointConstraintFailure) Error() string {
-	if len(e.failsib) == 1 {
-		str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which has no overlap with existing constraint %s from %s at %s"
-		return fmt.Sprintf(str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String(), e.failsib[0].dep.Constraint.String(), e.failsib[0].depender.id.errString(), e.failsib[0].depender.v)
-	}
-
-	var buf bytes.Buffer
-
-	var sibs []dependency
-	if len(e.failsib) > 1 {
-		sibs = e.failsib
-
-		str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which has no overlap with the following existing constraints:\n"
-		fmt.Fprintf(&buf, str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String())
-	} else {
-		sibs = e.nofailsib
-
-		str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which does not overlap with the intersection of existing constraints from other currently selected packages:\n"
-		fmt.Fprintf(&buf, str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String())
-	}
-
-	for _, c := range sibs {
-		fmt.Fprintf(&buf, "\t%s from %s at %s\n", c.dep.Constraint.String(), c.depender.id.errString(), c.depender.v)
-	}
-
-	return buf.String()
-}
-
-func (e *disjointConstraintFailure) traceString() string {
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "constraint %s on %s disjoint with other dependers:\n", e.goal.dep.Constraint.String(), e.goal.dep.Ident.errString())
-	for _, f := range e.failsib {
-		fmt.Fprintf(&buf, "%s from %s at %s (no overlap)\n", f.dep.Constraint.String(), f.depender.id.ProjectRoot, f.depender.v)
-	}
-	for _, f := range e.nofailsib {
-		fmt.Fprintf(&buf, "%s from %s at %s (some overlap)\n", f.dep.Constraint.String(), f.depender.id.ProjectRoot, f.depender.v)
-	}
-
-	return buf.String()
-}
-
-// Indicates that an atom could not be introduced because one of its dep
-// constraints does not admit the currently-selected version of the target
-// project.
-type constraintNotAllowedFailure struct {
-	goal dependency
-	v    Version
-}
-
-func (e *constraintNotAllowedFailure) Error() string {
-	str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which does not allow the currently selected version of %s"
-	return fmt.Sprintf(str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint, e.v)
-}
-
-func (e *constraintNotAllowedFailure) traceString() string {
-	str := "%s at %s depends on %s with %s, but that's already selected at %s"
-	return fmt.Sprintf(str, e.goal.depender.id.ProjectRoot, e.goal.depender.v, e.goal.dep.Ident.ProjectRoot, e.goal.dep.Constraint, e.v)
-}
-
-type versionNotAllowedFailure struct {
-	goal       atom
-	failparent []dependency
-	c          Constraint
-}
-
-func (e *versionNotAllowedFailure) Error() string {
-	if len(e.failparent) == 1 {
-		str := "Could not introduce %s at %s, as it is not allowed by constraint %s from project %s."
-		return fmt.Sprintf(str, e.goal.id.errString(), e.goal.v, e.failparent[0].dep.Constraint.String(), e.failparent[0].depender.id.errString())
-	}
-
-	var buf bytes.Buffer
-
-	str := "Could not introduce %s at %s, as it is not allowed by constraints from the following projects:\n"
-	fmt.Fprintf(&buf, str, e.goal.id.errString(), e.goal.v)
-
-	for _, f := range e.failparent {
-		fmt.Fprintf(&buf, "\t%s from %s at %s\n", f.dep.Constraint.String(), f.depender.id.errString(), f.depender.v)
-	}
-
-	return buf.String()
-}
-
-func (e *versionNotAllowedFailure) traceString() string {
-	var buf bytes.Buffer
-
-	fmt.Fprintf(&buf, "%s at %s not allowed by constraint %s:\n", e.goal.id.ProjectRoot, e.goal.v, e.c.String())
-	for _, f := range e.failparent {
-		fmt.Fprintf(&buf, "  %s from %s at %s\n", f.dep.Constraint.String(), f.depender.id.ProjectRoot, f.depender.v)
-	}
-
-	return buf.String()
-}
-
-type missingSourceFailure struct {
-	goal ProjectIdentifier
-	prob string
-}
-
-func (e *missingSourceFailure) Error() string {
-	return fmt.Sprintf(e.prob, e.goal)
-}
-
-type badOptsFailure string
-
-func (e badOptsFailure) Error() string {
-	return string(e)
-}
-
-type sourceMismatchFailure struct {
-	shared            ProjectRoot
-	sel               []dependency
-	current, mismatch string
-	prob              atom
-}
-
-func (e *sourceMismatchFailure) Error() string {
-	var cur []string
-	for _, c := range e.sel {
-		cur = append(cur, string(c.depender.id.ProjectRoot))
-	}
-
-	str := "Could not introduce %s at %s, as it depends on %s from %s, but %s is already marked as coming from %s by %s"
-	return fmt.Sprintf(str, e.prob.id.errString(), e.prob.v, e.shared, e.mismatch, e.shared, e.current, strings.Join(cur, ", "))
-}
-
-func (e *sourceMismatchFailure) traceString() string {
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "disagreement on network addr for %s:\n", e.shared)
-
-	fmt.Fprintf(&buf, "  %s from %s\n", e.mismatch, e.prob.id.errString())
-	for _, dep := range e.sel {
-		fmt.Fprintf(&buf, "  %s from %s\n", e.current, dep.depender.id.errString())
-	}
-
-	return buf.String()
-}
-
-type errDeppers struct {
-	err     error
-	deppers []atom
-}
-type checkeeHasProblemPackagesFailure struct {
-	goal    atom
-	failpkg map[string]errDeppers
-}
-
-func (e *checkeeHasProblemPackagesFailure) Error() string {
-	var buf bytes.Buffer
-	indent := ""
-
-	if len(e.failpkg) > 1 {
-		indent = "\t"
-		fmt.Fprintf(
-			&buf, "Could not introduce %s at %s due to multiple problematic subpackages:\n",
-			e.goal.id.errString(),
-			e.goal.v,
-		)
-	}
-
-	for pkg, errdep := range e.failpkg {
-		var cause string
-		if errdep.err == nil {
-			cause = "is missing"
-		} else {
-			cause = fmt.Sprintf("does not contain usable Go code (%T).", errdep.err)
-		}
-
-		if len(e.failpkg) == 1 {
-			fmt.Fprintf(
-				&buf, "Could not introduce %s at %s, as its subpackage %s %s.",
-				e.goal.id.errString(),
-				e.goal.v,
-				pkg,
-				cause,
-			)
-		} else {
-			fmt.Fprintf(&buf, "\tSubpackage %s %s.", pkg, cause)
-		}
-
-		if len(errdep.deppers) == 1 {
-			fmt.Fprintf(
-				&buf, " (Package is required by %s at %s.)",
-				errdep.deppers[0].id.errString(),
-				errdep.deppers[0].v,
-			)
-		} else {
-			fmt.Fprintf(&buf, " Package is required by:")
-			for _, pa := range errdep.deppers {
-				fmt.Fprintf(&buf, "\n%s\t%s at %s", indent, pa.id.errString(), pa.v)
-			}
-		}
-	}
-
-	return buf.String()
-}
-
-func (e *checkeeHasProblemPackagesFailure) traceString() string {
-	var buf bytes.Buffer
-
-	fmt.Fprintf(&buf, "%s at %s has problem subpkg(s):\n", e.goal.id.ProjectRoot, e.goal.v)
-	for pkg, errdep := range e.failpkg {
-		if errdep.err == nil {
-			fmt.Fprintf(&buf, "\t%s is missing; ", pkg)
-		} else {
-			fmt.Fprintf(&buf, "\t%s has err (%T); ", pkg, errdep.err)
-		}
-
-		if len(errdep.deppers) == 1 {
-			fmt.Fprintf(
-				&buf, "required by %s at %s.",
-				errdep.deppers[0].id.errString(),
-				errdep.deppers[0].v,
-			)
-		} else {
-			fmt.Fprintf(&buf, " required by:")
-			for _, pa := range errdep.deppers {
-				fmt.Fprintf(&buf, "\n\t\t%s at %s", pa.id.errString(), pa.v)
-			}
-		}
-	}
-
-	return buf.String()
-}
-
-type depHasProblemPackagesFailure struct {
-	goal dependency
-	v    Version
-	pl   []string
-	prob map[string]error
-}
-
-func (e *depHasProblemPackagesFailure) Error() string {
-	fcause := func(pkg string) string {
-		var cause string
-		if err, has := e.prob[pkg]; has {
-			cause = fmt.Sprintf("does not contain usable Go code (%T).", err)
-		} else {
-			cause = "is missing."
-		}
-		return cause
-	}
-
-	if len(e.pl) == 1 {
-		return fmt.Sprintf(
-			"Could not introduce %s at %s, as it requires package %s from %s, but in version %s that package %s",
-			e.goal.depender.id.errString(),
-			e.goal.depender.v,
-			e.pl[0],
-			e.goal.dep.Ident.errString(),
-			e.v,
-			fcause(e.pl[0]),
-		)
-	}
-
-	var buf bytes.Buffer
-	fmt.Fprintf(
-		&buf, "Could not introduce %s at %s, as it requires problematic packages from %s (current version %s):",
-		e.goal.depender.id.errString(),
-		e.goal.depender.v,
-		e.goal.dep.Ident.errString(),
-		e.v,
-	)
-
-	for _, pkg := range e.pl {
-		fmt.Fprintf(&buf, "\t%s %s", pkg, fcause(pkg))
-	}
-
-	return buf.String()
-}
-
-func (e *depHasProblemPackagesFailure) traceString() string {
-	var buf bytes.Buffer
-	fcause := func(pkg string) string {
-		var cause string
-		if err, has := e.prob[pkg]; has {
-			cause = fmt.Sprintf("has parsing err (%T).", err)
-		} else {
-			cause = "is missing"
-		}
-		return cause
-	}
-
-	fmt.Fprintf(
-		&buf, "%s at %s depping on %s at %s has problem subpkg(s):",
-		e.goal.depender.id.errString(),
-		e.goal.depender.v,
-		e.goal.dep.Ident.errString(),
-		e.v,
-	)
-
-	for _, pkg := range e.pl {
-		fmt.Fprintf(&buf, "\t%s %s", pkg, fcause(pkg))
-	}
-
-	return buf.String()
-}
-
-// nonexistentRevisionFailure indicates that a revision constraint was specified
-// for a given project, but that that revision does not exist in the source
-// repository.
-type nonexistentRevisionFailure struct {
-	goal dependency
-	r    Revision
-}
-
-func (e *nonexistentRevisionFailure) Error() string {
-	return fmt.Sprintf(
-		"Could not introduce %s at %s, as it requires %s at revision %s, but that revision does not exist",
-		e.goal.depender.id.errString(),
-		e.goal.depender.v,
-		e.goal.dep.Ident.errString(),
-		e.r,
-	)
-}
-
-func (e *nonexistentRevisionFailure) traceString() string {
-	return fmt.Sprintf(
-		"%s at %s wants missing rev %s of %s",
-		e.goal.depender.id.errString(),
-		e.goal.depender.v,
-		e.r,
-		e.goal.dep.Ident.errString(),
-	)
-}
diff --git a/vendor/github.com/sdboyer/gps/example.go b/vendor/github.com/sdboyer/gps/example.go
index 1a5a31a..2bbbe2c 100644
--- a/vendor/github.com/sdboyer/gps/example.go
+++ b/vendor/github.com/sdboyer/gps/example.go
@@ -9,7 +9,8 @@
 	"path/filepath"
 	"strings"
 
-	gps "github.com/sdboyer/gps"
+	"github.com/Masterminds/semver"
+	"github.com/sdboyer/gps"
 )
 
 // This is probably the simplest possible implementation of gps. It does the
@@ -18,8 +19,8 @@
 //  2. It prefers semver tags (if available) over branches
 //  3. It removes any vendor directories nested within dependencies
 //
-//  This will compile and work...and then blow away the vendor directory present
-//  in the cwd, if any. Be careful!
+//  This will compile and work...and then blow away any vendor directory present
+//  in the cwd. Be careful!
 func main() {
 	// Operate on the current directory
 	root, _ := os.Getwd()
@@ -47,12 +48,21 @@
 		// If no failure, blow away the vendor dir and write a new one out,
 		// stripping nested vendor directories as we go.
 		os.RemoveAll(filepath.Join(root, "vendor"))
-		gps.CreateVendorTree(filepath.Join(root, "vendor"), solution, sourcemgr, true)
+		gps.WriteDepTree(filepath.Join(root, "vendor"), solution, sourcemgr, true)
 	}
 }
 
 type NaiveAnalyzer struct{}
 
-func (a NaiveAnalyzer) GetInfo(path string, n gps.ProjectRoot) (gps.Manifest, gps.Lock, error) {
+// DeriveManifestAndLock gets called when the solver needs manifest/lock data
+// for a particular project (the gps.ProjectRoot parameter) at a particular
+// version. That version will be checked out in a directory rooted at path.
+func (a NaiveAnalyzer) DeriveManifestAndLock(path string, n gps.ProjectRoot) (gps.Manifest, gps.Lock, error) {
 	return nil, nil, nil
 }
+
+// Reports the name and version of the analyzer. This is mostly irrelevant.
+func (a NaiveAnalyzer) Info() (name string, version *semver.Version) {
+	v, _ := semver.NewVersion("v0.0.1")
+	return "example-analyzer", v
+}
diff --git a/vendor/github.com/sdboyer/gps/flags.go b/vendor/github.com/sdboyer/gps/flags.go
index a7172c1..d9a3a1d 100644
--- a/vendor/github.com/sdboyer/gps/flags.go
+++ b/vendor/github.com/sdboyer/gps/flags.go
@@ -1,7 +1,7 @@
 package gps
 
-// projectExistence values represent the extent to which a project "exists."
-type projectExistence uint8
+// sourceExistence values represent the extent to which a project "exists."
+type sourceExistence uint8
 
 const (
 	// ExistsInVendorRoot indicates that a project exists in a vendor directory
@@ -19,7 +19,7 @@
 	//
 	// In short, the information encoded in this flag should not be construed as
 	// exhaustive.
-	existsInVendorRoot projectExistence = 1 << iota
+	existsInVendorRoot sourceExistence = 1 << iota
 
 	// ExistsInCache indicates that a project exists on-disk in the local cache.
 	// It does not guarantee that an upstream exists, thus it cannot imply
diff --git a/vendor/github.com/sdboyer/gps/hash.go b/vendor/github.com/sdboyer/gps/hash.go
index 9e27bcd..893c34e 100644
--- a/vendor/github.com/sdboyer/gps/hash.go
+++ b/vendor/github.com/sdboyer/gps/hash.go
@@ -6,8 +6,8 @@
 	"sort"
 )
 
-// HashInputs computes a hash digest of all data in a SolveOpts that are as
-// function inputs to Solve().
+// HashInputs computes a hash digest of all data in SolveParams and the
+// RootManifest that act as function inputs to Solve().
 //
 // The digest returned from this function is the same as the digest that would
 // be included with a Solve() Result. As such, it's appropriate for comparison
@@ -20,17 +20,16 @@
 	// Do these checks up front before any other work is needed, as they're the
 	// only things that can cause errors
 	// Pass in magic root values, and the bridge will analyze the right thing
-	ptree, err := s.b.listPackages(ProjectIdentifier{ProjectRoot: s.params.ImportRoot}, nil)
+	ptree, err := s.b.ListPackages(ProjectIdentifier{ProjectRoot: s.params.ImportRoot}, nil)
 	if err != nil {
 		return nil, badOptsFailure(fmt.Sprintf("Error while parsing packages under %s: %s", s.params.RootDir, err.Error()))
 	}
 
-	d, dd := s.params.Manifest.DependencyConstraints(), s.params.Manifest.TestDependencyConstraints()
-	p := make(sortedDeps, len(d))
-	copy(p, d)
-	p = append(p, dd...)
-
-	sort.Stable(p)
+	c, tc := s.rm.DependencyConstraints(), s.rm.TestDependencyConstraints()
+	// Apply overrides to the constraints from the root. Otherwise, the hash
+	// would be computed on the basis of a constraint from root that doesn't
+	// actually affect solving.
+	p := s.ovr.overrideAll(pcSliceToMap(c, tc).asSortedSlice())
 
 	// We have everything we need; now, compute the hash.
 	h := sha256.New()
@@ -84,21 +83,19 @@
 		}
 	}
 
-	// TODO(sdboyer) overrides
-	// TODO(sdboyer) aliases
+	for _, pc := range s.ovr.asSortedSlice() {
+		h.Write([]byte(pc.Ident.ProjectRoot))
+		if pc.Ident.NetworkName != "" {
+			h.Write([]byte(pc.Ident.NetworkName))
+		}
+		if pc.Constraint != nil {
+			h.Write([]byte(pc.Constraint.String()))
+		}
+	}
+
+	an, av := s.b.AnalyzerInfo()
+	h.Write([]byte(an))
+	h.Write([]byte(av.String()))
+
 	return h.Sum(nil), nil
 }
-
-type sortedDeps []ProjectConstraint
-
-func (s sortedDeps) Len() int {
-	return len(s)
-}
-
-func (s sortedDeps) Swap(i, j int) {
-	s[i], s[j] = s[j], s[i]
-}
-
-func (s sortedDeps) Less(i, j int) bool {
-	return s[i].Ident.less(s[j].Ident)
-}
diff --git a/vendor/github.com/sdboyer/gps/hash_test.go b/vendor/github.com/sdboyer/gps/hash_test.go
index dc27ddf..171f377 100644
--- a/vendor/github.com/sdboyer/gps/hash_test.go
+++ b/vendor/github.com/sdboyer/gps/hash_test.go
@@ -12,8 +12,7 @@
 	params := SolveParameters{
 		RootDir:    string(fix.ds[0].n),
 		ImportRoot: fix.ds[0].n,
-		Manifest:   fix.ds[0],
-		Ignore:     []string{"foo", "bar"},
+		Manifest:   fix.rootmanifest(),
 	}
 
 	s, err := Prepare(params, newdepspecSM(fix.ds, nil))
@@ -24,7 +23,24 @@
 	}
 
 	h := sha256.New()
-	for _, v := range []string{"a", "a", "1.0.0", "b", "b", "1.0.0", stdlibPkgs, appenginePkgs, "root", "", "root", "a", "b", "bar", "foo"} {
+
+	elems := []string{
+		"a",
+		"a",
+		"1.0.0",
+		"b",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"root",
+		"a",
+		"b",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
 		h.Write([]byte(v))
 	}
 	correct := h.Sum(nil)
@@ -33,3 +49,335 @@
 		t.Errorf("Hashes are not equal")
 	}
 }
+
+func TestHashInputsIgnores(t *testing.T) {
+	fix := basicFixtures["shared dependency with overlapping constraints"]
+
+	rm := fix.rootmanifest().(simpleRootManifest)
+	rm.ig = map[string]bool{
+		"foo": true,
+		"bar": true,
+	}
+	params := SolveParameters{
+		RootDir:    string(fix.ds[0].n),
+		ImportRoot: fix.ds[0].n,
+		Manifest:   rm,
+	}
+
+	s, err := Prepare(params, newdepspecSM(fix.ds, nil))
+
+	dig, err := s.HashInputs()
+	if err != nil {
+		t.Fatalf("HashInputs returned unexpected err: %s", err)
+	}
+
+	h := sha256.New()
+
+	elems := []string{
+		"a",
+		"a",
+		"1.0.0",
+		"b",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"bar",
+		"foo",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct := h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+}
+
+func TestHashInputsOverrides(t *testing.T) {
+	fix := basicFixtures["shared dependency with overlapping constraints"]
+
+	rm := fix.rootmanifest().(simpleRootManifest)
+	// First case - override something not in the root, just with network name
+	rm.ovr = map[ProjectRoot]ProjectProperties{
+		"c": ProjectProperties{
+			NetworkName: "car",
+		},
+	}
+	params := SolveParameters{
+		RootDir:    string(fix.ds[0].n),
+		ImportRoot: fix.ds[0].n,
+		Manifest:   rm,
+	}
+
+	s, err := Prepare(params, newdepspecSM(fix.ds, nil))
+
+	dig, err := s.HashInputs()
+	if err != nil {
+		t.Fatalf("HashInputs returned unexpected err: %s", err)
+	}
+
+	h := sha256.New()
+
+	elems := []string{
+		"a",
+		"a",
+		"1.0.0",
+		"b",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"c",
+		"car",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct := h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+
+	// Override not in root, just with constraint
+	rm.ovr["d"] = ProjectProperties{
+		Constraint: NewBranch("foobranch"),
+	}
+	dig, err = s.HashInputs()
+	if err != nil {
+		t.Fatalf("HashInputs returned unexpected err: %s", err)
+	}
+
+	h = sha256.New()
+
+	elems = []string{
+		"a",
+		"a",
+		"1.0.0",
+		"b",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"c",
+		"car",
+		"d",
+		"foobranch",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct = h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+
+	// Override not in root, both constraint and network name
+	rm.ovr["e"] = ProjectProperties{
+		NetworkName: "groucho",
+		Constraint:  NewBranch("plexiglass"),
+	}
+	dig, err = s.HashInputs()
+	if err != nil {
+		t.Fatalf("HashInputs returned unexpected err: %s", err)
+	}
+
+	h = sha256.New()
+
+	elems = []string{
+		"a",
+		"a",
+		"1.0.0",
+		"b",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"c",
+		"car",
+		"d",
+		"foobranch",
+		"e",
+		"groucho",
+		"plexiglass",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct = h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+
+	// Override in root, just constraint
+	rm.ovr["a"] = ProjectProperties{
+		Constraint: NewVersion("fluglehorn"),
+	}
+	dig, err = s.HashInputs()
+	if err != nil {
+		t.Fatalf("HashInputs returned unexpected err: %s", err)
+	}
+
+	h = sha256.New()
+
+	elems = []string{
+		"a",
+		"a",
+		"fluglehorn",
+		"b",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"a",
+		"fluglehorn",
+		"c",
+		"car",
+		"d",
+		"foobranch",
+		"e",
+		"groucho",
+		"plexiglass",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct = h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+
+	// Override in root, only network name
+	rm.ovr["a"] = ProjectProperties{
+		NetworkName: "nota",
+	}
+	dig, err = s.HashInputs()
+	if err != nil {
+		t.Fatalf("HashInputs returned unexpected err: %s", err)
+	}
+
+	h = sha256.New()
+
+	elems = []string{
+		"a",
+		"nota",
+		"1.0.0",
+		"b",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"a",
+		"nota",
+		"c",
+		"car",
+		"d",
+		"foobranch",
+		"e",
+		"groucho",
+		"plexiglass",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct = h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+
+	// Override in root, network name and constraint
+	rm.ovr["a"] = ProjectProperties{
+		NetworkName: "nota",
+		Constraint:  NewVersion("fluglehorn"),
+	}
+	dig, err = s.HashInputs()
+	if err != nil {
+		t.Fatalf("HashInputs returned unexpected err: %s", err)
+	}
+
+	h = sha256.New()
+
+	elems = []string{
+		"a",
+		"nota",
+		"fluglehorn",
+		"b",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"a",
+		"nota",
+		"fluglehorn",
+		"c",
+		"car",
+		"d",
+		"foobranch",
+		"e",
+		"groucho",
+		"plexiglass",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct = h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/header.png b/vendor/github.com/sdboyer/gps/header.png
new file mode 100644
index 0000000..d39bed6
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/header.png
Binary files differ
diff --git a/vendor/github.com/sdboyer/gps/manager_test.go b/vendor/github.com/sdboyer/gps/manager_test.go
index ebc8091..439d8b4 100644
--- a/vendor/github.com/sdboyer/gps/manager_test.go
+++ b/vendor/github.com/sdboyer/gps/manager_test.go
@@ -5,8 +5,10 @@
 	"io/ioutil"
 	"os"
 	"path"
+	"path/filepath"
 	"runtime"
 	"sort"
+	"sync"
 	"testing"
 
 	"github.com/Masterminds/semver"
@@ -19,10 +21,14 @@
 // this as open/Any constraints on everything in the import graph.
 type naiveAnalyzer struct{}
 
-func (naiveAnalyzer) GetInfo(string, ProjectRoot) (Manifest, Lock, error) {
+func (naiveAnalyzer) DeriveManifestAndLock(string, ProjectRoot) (Manifest, Lock, error) {
 	return nil, nil, nil
 }
 
+func (a naiveAnalyzer) Info() (name string, version *semver.Version) {
+	return "naive-analyzer", sv("v0.0.1")
+}
+
 func sv(s string) *semver.Version {
 	sv, err := semver.NewVersion(s)
 	if err != nil {
@@ -32,6 +38,28 @@
 	return sv
 }
 
+func mkNaiveSM(t *testing.T) (*SourceMgr, func()) {
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+		t.FailNow()
+	}
+
+	sm, err := NewSourceManager(naiveAnalyzer{}, cpath, false)
+	if err != nil {
+		t.Errorf("Unexpected error on SourceManager creation: %s", err)
+		t.FailNow()
+	}
+
+	return sm, func() {
+		sm.Release()
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}
+}
+
 func init() {
 	_, filename, _, _ := runtime.Caller(1)
 	bd = path.Dir(filename)
@@ -79,23 +107,25 @@
 	cpath, err := ioutil.TempDir("", "smcache")
 	if err != nil {
 		t.Errorf("Failed to create temp dir: %s", err)
+		t.FailNow()
 	}
-	sm, err := NewSourceManager(naiveAnalyzer{}, cpath, false)
 
+	sm, err := NewSourceManager(naiveAnalyzer{}, cpath, false)
 	if err != nil {
 		t.Errorf("Unexpected error on SourceManager creation: %s", err)
 		t.FailNow()
 	}
+
 	defer func() {
+		sm.Release()
 		err := removeAll(cpath)
 		if err != nil {
 			t.Errorf("removeAll failed: %s", err)
 		}
 	}()
-	defer sm.Release()
 
-	pn := ProjectRoot("github.com/Masterminds/VCSTestRepo")
-	v, err := sm.ListVersions(pn)
+	id := mkPI("github.com/Masterminds/VCSTestRepo")
+	v, err := sm.ListVersions(id)
 	if err != nil {
 		t.Errorf("Unexpected error during initial project setup/fetching %s", err)
 	}
@@ -122,15 +152,15 @@
 	}
 
 	// Two birds, one stone - make sure the internal ProjectManager vlist cache
-	// works by asking for the versions again, and do it through smcache to
-	// ensure its sorting works, as well.
+	// works (or at least doesn't not work) by asking for the versions again,
+	// and do it through smcache to ensure its sorting works, as well.
 	smc := &bridge{
 		sm:     sm,
-		vlists: make(map[ProjectRoot][]Version),
+		vlists: make(map[ProjectIdentifier][]Version),
 		s:      &solver{},
 	}
 
-	v, err = smc.listVersions(ProjectIdentifier{ProjectRoot: pn})
+	v, err = smc.ListVersions(id)
 	if err != nil {
 		t.Errorf("Unexpected error during initial project setup/fetching %s", err)
 	}
@@ -152,149 +182,96 @@
 		}
 	}
 
+	// use ListPackages to ensure the repo is actually on disk
+	// TODO(sdboyer) ugh, maybe we do need an explicit prefetch method
+	smc.ListPackages(id, NewVersion("1.0.0"))
+
 	// Ensure that the appropriate cache dirs and files exist
-	_, err = os.Stat(path.Join(cpath, "src", "github.com", "Masterminds", "VCSTestRepo", ".git"))
+	_, err = os.Stat(filepath.Join(cpath, "sources", "https---github.com-Masterminds-VCSTestRepo", ".git"))
 	if err != nil {
 		t.Error("Cache repo does not exist in expected location")
 	}
 
-	_, err = os.Stat(path.Join(cpath, "metadata", "github.com", "Masterminds", "VCSTestRepo", "cache.json"))
+	_, err = os.Stat(filepath.Join(cpath, "metadata", "github.com", "Masterminds", "VCSTestRepo", "cache.json"))
 	if err != nil {
-		// TODO(sdboyer) temporarily disabled until we turn caching back on
+		// TODO(sdboyer) disabled until we get caching working
 		//t.Error("Metadata cache json file does not exist in expected location")
 	}
 
-	// Ensure project existence values are what we expect
+	// Ensure source existence values are what we expect
 	var exists bool
-	exists, err = sm.RepoExists(pn)
+	exists, err = sm.SourceExists(id)
 	if err != nil {
-		t.Errorf("Error on checking RepoExists: %s", err)
+		t.Errorf("Error on checking SourceExists: %s", err)
 	}
 	if !exists {
-		t.Error("Repo should exist after non-erroring call to ListVersions")
-	}
-
-	// Now reach inside the black box
-	pms, err := sm.getProjectManager(pn)
-	if err != nil {
-		t.Errorf("Error on grabbing project manager obj: %s", err)
-	}
-
-	// Check upstream existence flag
-	if !pms.pm.CheckExistence(existsUpstream) {
-		t.Errorf("ExistsUpstream flag not being correctly set the project")
+		t.Error("Source should exist after non-erroring call to ListVersions")
 	}
 }
 
-func TestRepoVersionFetching(t *testing.T) {
-	// This test is quite slow, skip it on -short
+func TestGetSources(t *testing.T) {
+	// This test is a tad slow, skip it on -short
 	if testing.Short() {
-		t.Skip("Skipping repo version fetching test in short mode")
+		t.Skip("Skipping source setup test in short mode")
 	}
 
-	cpath, err := ioutil.TempDir("", "smcache")
-	if err != nil {
-		t.Errorf("Failed to create temp dir: %s", err)
+	sm, clean := mkNaiveSM(t)
+
+	pil := []ProjectIdentifier{
+		mkPI("github.com/Masterminds/VCSTestRepo"),
+		mkPI("bitbucket.org/mattfarina/testhgrepo"),
+		mkPI("launchpad.net/govcstestbzrrepo"),
 	}
 
-	sm, err := NewSourceManager(naiveAnalyzer{}, cpath, false)
-	if err != nil {
-		t.Errorf("Unexpected error on SourceManager creation: %s", err)
-		t.FailNow()
+	wg := &sync.WaitGroup{}
+	wg.Add(3)
+	for _, pi := range pil {
+		go func(lpi ProjectIdentifier) {
+			nn := lpi.netName()
+			src, err := sm.getSourceFor(lpi)
+			if err != nil {
+				t.Errorf("(src %q) unexpected error setting up source: %s", nn, err)
+				return
+			}
+
+			// Re-get the same, make sure they are the same
+			src2, err := sm.getSourceFor(lpi)
+			if err != nil {
+				t.Errorf("(src %q) unexpected error re-getting source: %s", nn, err)
+			} else if src != src2 {
+				t.Errorf("(src %q) first and second sources are not eq", nn)
+			}
+
+			// All of them _should_ select https, so this should work
+			lpi.NetworkName = "https://" + lpi.NetworkName
+			src3, err := sm.getSourceFor(lpi)
+			if err != nil {
+				t.Errorf("(src %q) unexpected error getting explicit https source: %s", nn, err)
+			} else if src != src3 {
+				t.Errorf("(src %q) explicit https source should reuse autodetected https source", nn)
+			}
+
+			// Now put in http, and they should differ
+			lpi.NetworkName = "http://" + string(lpi.ProjectRoot)
+			src4, err := sm.getSourceFor(lpi)
+			if err != nil {
+				t.Errorf("(src %q) unexpected error getting explicit http source: %s", nn, err)
+			} else if src == src4 {
+				t.Errorf("(src %q) explicit http source should create a new src", nn)
+			}
+
+			wg.Done()
+		}(pi)
 	}
 
-	upstreams := []ProjectRoot{
-		"github.com/Masterminds/VCSTestRepo",
-		"bitbucket.org/mattfarina/testhgrepo",
-		"launchpad.net/govcstestbzrrepo",
-	}
+	wg.Wait()
 
-	pms := make([]*projectManager, len(upstreams))
-	for k, u := range upstreams {
-		pmi, err := sm.getProjectManager(u)
-		if err != nil {
-			sm.Release()
-			removeAll(cpath)
-			t.Errorf("Unexpected error on ProjectManager creation: %s", err)
-			t.FailNow()
-		}
-		pms[k] = pmi.pm
+	// nine entries (of which three are dupes): for each vcs, raw import path,
+	// the https url, and the http url
+	if len(sm.srcs) != 9 {
+		t.Errorf("Should have nine discrete entries in the srcs map, got %v", len(sm.srcs))
 	}
-
-	defer func() {
-		err := removeAll(cpath)
-		if err != nil {
-			t.Errorf("removeAll failed: %s", err)
-		}
-	}()
-	defer sm.Release()
-
-	// test git first
-	vlist, exbits, err := pms[0].crepo.getCurrentVersionPairs()
-	if err != nil {
-		t.Errorf("Unexpected error getting version pairs from git repo: %s", err)
-	}
-	if exbits != existsUpstream {
-		t.Errorf("git pair fetch should only set upstream existence bits, but got %v", exbits)
-	}
-	if len(vlist) != 3 {
-		t.Errorf("git test repo should've produced three versions, got %v", len(vlist))
-	} else {
-		v := NewBranch("master").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
-		if vlist[0] != v {
-			t.Errorf("git pair fetch reported incorrect first version, got %s", vlist[0])
-		}
-
-		v = NewBranch("test").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
-		if vlist[1] != v {
-			t.Errorf("git pair fetch reported incorrect second version, got %s", vlist[1])
-		}
-
-		v = NewVersion("1.0.0").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
-		if vlist[2] != v {
-			t.Errorf("git pair fetch reported incorrect third version, got %s", vlist[2])
-		}
-	}
-
-	// now hg
-	vlist, exbits, err = pms[1].crepo.getCurrentVersionPairs()
-	if err != nil {
-		t.Errorf("Unexpected error getting version pairs from hg repo: %s", err)
-	}
-	if exbits != existsUpstream|existsInCache {
-		t.Errorf("hg pair fetch should set upstream and cache existence bits, but got %v", exbits)
-	}
-	if len(vlist) != 2 {
-		t.Errorf("hg test repo should've produced two versions, got %v", len(vlist))
-	} else {
-		v := NewVersion("1.0.0").Is(Revision("d680e82228d206935ab2eaa88612587abe68db07"))
-		if vlist[0] != v {
-			t.Errorf("hg pair fetch reported incorrect first version, got %s", vlist[0])
-		}
-
-		v = NewBranch("test").Is(Revision("6c44ee3fe5d87763616c19bf7dbcadb24ff5a5ce"))
-		if vlist[1] != v {
-			t.Errorf("hg pair fetch reported incorrect second version, got %s", vlist[1])
-		}
-	}
-
-	// bzr last
-	vlist, exbits, err = pms[2].crepo.getCurrentVersionPairs()
-	if err != nil {
-		t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err)
-	}
-	if exbits != existsUpstream|existsInCache {
-		t.Errorf("bzr pair fetch should set upstream and cache existence bits, but got %v", exbits)
-	}
-	if len(vlist) != 1 {
-		t.Errorf("bzr test repo should've produced one version, got %v", len(vlist))
-	} else {
-		v := NewVersion("1.0.0").Is(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68"))
-		if vlist[0] != v {
-			t.Errorf("bzr pair fetch reported incorrect first version, got %s", vlist[0])
-		}
-	}
-	// no svn for now, because...svn
+	clean()
 }
 
 // Regression test for #32
@@ -304,34 +281,19 @@
 		t.Skip("Skipping slow test in short mode")
 	}
 
-	cpath, err := ioutil.TempDir("", "smcache")
-	if err != nil {
-		t.Errorf("Failed to create temp dir: %s", err)
-	}
-	sm, err := NewSourceManager(naiveAnalyzer{}, cpath, false)
-
-	if err != nil {
-		t.Errorf("Unexpected error on SourceManager creation: %s", err)
-		t.FailNow()
-	}
-	defer func() {
-		err := removeAll(cpath)
-		if err != nil {
-			t.Errorf("removeAll failed: %s", err)
-		}
-	}()
-	defer sm.Release()
+	sm, clean := mkNaiveSM(t)
+	defer clean()
 
 	// setup done, now do the test
 
-	pn := ProjectRoot("github.com/Masterminds/VCSTestRepo")
+	id := mkPI("github.com/Masterminds/VCSTestRepo")
 
-	_, _, err = sm.GetProjectInfo(pn, NewVersion("1.0.0"))
+	_, _, err := sm.GetManifestAndLock(id, NewVersion("1.0.0"))
 	if err != nil {
 		t.Errorf("Unexpected error from GetInfoAt %s", err)
 	}
 
-	v, err := sm.ListVersions(pn)
+	v, err := sm.ListVersions(id)
 	if err != nil {
 		t.Errorf("Unexpected error from ListVersions %s", err)
 	}
@@ -340,3 +302,154 @@
 		t.Errorf("Expected three results from ListVersions, got %v", len(v))
 	}
 }
+
+func TestDeduceProjectRoot(t *testing.T) {
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	in := "github.com/sdboyer/gps"
+	pr, err := sm.DeduceProjectRoot(in)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", in, err)
+	}
+	if string(pr) != in {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 1 {
+		t.Errorf("Root path trie should have one element after one deduction, has %v", sm.rootxt.Len())
+	}
+
+	pr, err = sm.DeduceProjectRoot(in)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", in, err)
+	} else if string(pr) != in {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 1 {
+		t.Errorf("Root path trie should still have one element after performing the same deduction twice; has %v", sm.rootxt.Len())
+	}
+
+	// Now do a subpath
+	sub := path.Join(in, "foo")
+	pr, err = sm.DeduceProjectRoot(sub)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", sub, err)
+	} else if string(pr) != in {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 2 {
+		t.Errorf("Root path trie should have two elements, one for root and one for subpath; has %v", sm.rootxt.Len())
+	}
+
+	// Now do a fully different root, but still on github
+	in2 := "github.com/bagel/lox"
+	sub2 := path.Join(in2, "cheese")
+	pr, err = sm.DeduceProjectRoot(sub2)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", sub2, err)
+	} else if string(pr) != in2 {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 4 {
+		t.Errorf("Root path trie should have four elements, one for each unique root and subpath; has %v", sm.rootxt.Len())
+	}
+
+	// Ensure that our prefixes are bounded by path separators
+	in4 := "github.com/bagel/loxx"
+	pr, err = sm.DeduceProjectRoot(in4)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", in4, err)
+	} else if string(pr) != in4 {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 5 {
+		t.Errorf("Root path trie should have five elements, one for each unique root and subpath; has %v", sm.rootxt.Len())
+	}
+
+	// Ensure that vcs extension-based matching comes through
+	in5 := "ffffrrrraaaaaapppppdoesnotresolve.com/baz.git"
+	pr, err = sm.DeduceProjectRoot(in5)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", in5, err)
+	} else if string(pr) != in5 {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 6 {
+		t.Errorf("Root path trie should have six elements, one for each unique root and subpath; has %v", sm.rootxt.Len())
+	}
+}
+
+// Test that the future returned from SourceMgr.deducePathAndProcess() is safe
+// to call concurrently.
+//
+// Obviously, this is just a heuristic; passage does not guarantee correctness
+// (though failure does guarantee incorrectness)
+func TestMultiDeduceThreadsafe(t *testing.T) {
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	in := "github.com/sdboyer/gps"
+	rootf, srcf, err := sm.deducePathAndProcess(in)
+	if err != nil {
+		t.Errorf("Known-good path %q had unexpected basic deduction error: %s", in, err)
+		t.FailNow()
+	}
+
+	cnum := 50
+	wg := &sync.WaitGroup{}
+
+	// Set up channel for everything else to block on
+	c := make(chan struct{}, 1)
+	f := func(rnum int) {
+		defer func() {
+			wg.Done()
+			if e := recover(); e != nil {
+				t.Errorf("goroutine number %v panicked with err: %s", rnum, e)
+			}
+		}()
+		<-c
+		_, err := rootf()
+		if err != nil {
+			t.Errorf("err was non-nil on root detection in goroutine number %v: %s", rnum, err)
+		}
+	}
+
+	for k := range make([]struct{}, cnum) {
+		wg.Add(1)
+		go f(k)
+		runtime.Gosched()
+	}
+	close(c)
+	wg.Wait()
+	if sm.rootxt.Len() != 1 {
+		t.Errorf("Root path trie should have just one element; has %v", sm.rootxt.Len())
+	}
+
+	// repeat for srcf
+	wg2 := &sync.WaitGroup{}
+	c = make(chan struct{}, 1)
+	f = func(rnum int) {
+		defer func() {
+			wg2.Done()
+			if e := recover(); e != nil {
+				t.Errorf("goroutine number %v panicked with err: %s", rnum, e)
+			}
+		}()
+		<-c
+		_, _, err := srcf()
+		if err != nil {
+			t.Errorf("err was non-nil on root detection in goroutine number %v: %s", rnum, err)
+		}
+	}
+
+	for k := range make([]struct{}, cnum) {
+		wg2.Add(1)
+		go f(k)
+		runtime.Gosched()
+	}
+	close(c)
+	wg2.Wait()
+	if len(sm.srcs) != 2 {
+		t.Errorf("Sources map should have just two elements, but has %v", len(sm.srcs))
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/manifest.go b/vendor/github.com/sdboyer/gps/manifest.go
index 83fd9d7..86d06cc 100644
--- a/vendor/github.com/sdboyer/gps/manifest.go
+++ b/vendor/github.com/sdboyer/gps/manifest.go
@@ -16,11 +16,36 @@
 type Manifest interface {
 	// Returns a list of project-level constraints.
 	DependencyConstraints() []ProjectConstraint
-	// Returns a list of constraints applicable to test imports. Note that this
-	// will only be consulted for root manifests.
+
+	// Returns a list of constraints applicable to test imports.
+	//
+	// These are applied only when tests are incorporated. Typically, that
+	// will only be for root manifests.
 	TestDependencyConstraints() []ProjectConstraint
 }
 
+// RootManifest extends Manifest to add special controls over solving that are
+// only afforded to the root project.
+type RootManifest interface {
+	Manifest
+
+	// Overrides returns a list of ProjectConstraints that will unconditionally
+	// supercede any ProjectConstraint declarations made in either the root
+	// manifest, or in any dependency's manifest.
+	//
+	// Overrides are a special control afforded only to root manifests. Tool
+	// users should be encouraged to use them only as a last resort; they do not
+	// "play well with others" (that is their express goal), and overreliance on
+	// them can harm the ecosystem as a whole.
+	Overrides() ProjectConstraints
+
+	// IngorePackages returns a set of import paths to ignore. These import
+	// paths can be within the root project, or part of other projects. Ignoring
+	// a package means that both it and its (unique) imports will be disregarded
+	// by all relevant solver operations.
+	IgnorePackages() map[string]bool
+}
+
 // SimpleManifest is a helper for tools to enumerate manifest data. It's
 // generally intended for ephemeral manifests, such as those Analyzers create on
 // the fly for projects with no manifest metadata, or metadata through a foreign
@@ -42,6 +67,30 @@
 	return m.TestDeps
 }
 
+// simpleRootManifest exists so that we have a safe value to swap into solver
+// params when a nil Manifest is provided.
+//
+// Also, for tests.
+type simpleRootManifest struct {
+	c   []ProjectConstraint
+	tc  []ProjectConstraint
+	ovr ProjectConstraints
+	ig  map[string]bool
+}
+
+func (m simpleRootManifest) DependencyConstraints() []ProjectConstraint {
+	return m.c
+}
+func (m simpleRootManifest) TestDependencyConstraints() []ProjectConstraint {
+	return m.tc
+}
+func (m simpleRootManifest) Overrides() ProjectConstraints {
+	return m.ovr
+}
+func (m simpleRootManifest) IgnorePackages() map[string]bool {
+	return m.ig
+}
+
 // prepManifest ensures a manifest is prepared and safe for use by the solver.
 // This entails two things:
 //
diff --git a/vendor/github.com/sdboyer/gps/marker-header.png b/vendor/github.com/sdboyer/gps/marker-header.png
deleted file mode 100644
index 66965c5..0000000
--- a/vendor/github.com/sdboyer/gps/marker-header.png
+++ /dev/null
Binary files differ
diff --git a/vendor/github.com/sdboyer/gps/maybe_source.go b/vendor/github.com/sdboyer/gps/maybe_source.go
new file mode 100644
index 0000000..34fd5d5
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/maybe_source.go
@@ -0,0 +1,153 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"net/url"
+	"path/filepath"
+
+	"github.com/Masterminds/vcs"
+)
+
+type maybeSource interface {
+	try(cachedir string, an ProjectAnalyzer) (source, string, error)
+}
+
+type maybeSources []maybeSource
+
+func (mbs maybeSources) try(cachedir string, an ProjectAnalyzer) (source, string, error) {
+	var e sourceFailures
+	for _, mb := range mbs {
+		src, ident, err := mb.try(cachedir, an)
+		if err == nil {
+			return src, ident, nil
+		}
+		e = append(e, sourceSetupFailure{
+			ident: ident,
+			err:   err,
+		})
+	}
+	return nil, "", e
+}
+
+type sourceSetupFailure struct {
+	ident string
+	err   error
+}
+
+func (e sourceSetupFailure) Error() string {
+	return fmt.Sprintf("failed to set up %q, error %s", e.ident, e.err.Error())
+}
+
+type sourceFailures []sourceSetupFailure
+
+func (sf sourceFailures) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "No valid source could be created:\n")
+	for _, e := range sf {
+		fmt.Fprintf(&buf, "\t%s", e.Error())
+	}
+
+	return buf.String()
+}
+
+type maybeGitSource struct {
+	url *url.URL
+}
+
+func (m maybeGitSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) {
+	ustr := m.url.String()
+	path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr))
+	r, err := vcs.NewGitRepo(ustr, path)
+	if err != nil {
+		return nil, "", err
+	}
+
+	src := &gitSource{
+		baseVCSSource: baseVCSSource{
+			an: an,
+			dc: newMetaCache(),
+			crepo: &repo{
+				r:     r,
+				rpath: path,
+			},
+		},
+	}
+
+	src.baseVCSSource.lvfunc = src.listVersions
+
+	_, err = src.listVersions()
+	if err != nil {
+		return nil, "", err
+	}
+
+	return src, ustr, nil
+}
+
+type maybeBzrSource struct {
+	url *url.URL
+}
+
+func (m maybeBzrSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) {
+	ustr := m.url.String()
+	path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr))
+	r, err := vcs.NewBzrRepo(ustr, path)
+	if err != nil {
+		return nil, "", err
+	}
+	if !r.Ping() {
+		return nil, "", fmt.Errorf("Remote repository at %s does not exist, or is inaccessible", ustr)
+	}
+
+	src := &bzrSource{
+		baseVCSSource: baseVCSSource{
+			an: an,
+			dc: newMetaCache(),
+			ex: existence{
+				s: existsUpstream,
+				f: existsUpstream,
+			},
+			crepo: &repo{
+				r:     r,
+				rpath: path,
+			},
+		},
+	}
+	src.baseVCSSource.lvfunc = src.listVersions
+
+	return src, ustr, nil
+}
+
+type maybeHgSource struct {
+	url *url.URL
+}
+
+func (m maybeHgSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) {
+	ustr := m.url.String()
+	path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr))
+	r, err := vcs.NewHgRepo(ustr, path)
+	if err != nil {
+		return nil, "", err
+	}
+	if !r.Ping() {
+		return nil, "", fmt.Errorf("Remote repository at %s does not exist, or is inaccessible", ustr)
+	}
+
+	src := &hgSource{
+		baseVCSSource: baseVCSSource{
+			an: an,
+			dc: newMetaCache(),
+			ex: existence{
+				s: existsUpstream,
+				f: existsUpstream,
+			},
+			crepo: &repo{
+				r:     r,
+				rpath: path,
+			},
+		},
+	}
+	src.baseVCSSource.lvfunc = src.listVersions
+
+	return src, ustr, nil
+}
diff --git a/vendor/github.com/sdboyer/gps/project_manager.go b/vendor/github.com/sdboyer/gps/project_manager.go
deleted file mode 100644
index e174fde..0000000
--- a/vendor/github.com/sdboyer/gps/project_manager.go
+++ /dev/null
@@ -1,584 +0,0 @@
-package gps
-
-import (
-	"bytes"
-	"fmt"
-	"go/build"
-	"os"
-	"os/exec"
-	"path"
-	"path/filepath"
-	"strings"
-	"sync"
-
-	"github.com/Masterminds/vcs"
-	"github.com/termie/go-shutil"
-)
-
-type projectManager struct {
-	// The identifier of the project. At this level, corresponds to the
-	// '$GOPATH/src'-relative path, *and* the network name.
-	n ProjectRoot
-
-	// build.Context to use in any analysis, and to pass to the analyzer
-	ctx build.Context
-
-	// Object for the cache repository
-	crepo *repo
-
-	// Indicates the extent to which we have searched for, and verified, the
-	// existence of the project/repo.
-	ex existence
-
-	// Analyzer, injected by way of the SourceManager and originally from the
-	// sm's creator
-	an ProjectAnalyzer
-
-	// Whether the cache has the latest info on versions
-	cvsync bool
-
-	// The project metadata cache. This is persisted to disk, for reuse across
-	// solver runs.
-	// TODO(sdboyer) protect with mutex
-	dc *projectDataCache
-}
-
-type existence struct {
-	// The existence levels for which a search/check has been performed
-	s projectExistence
-
-	// The existence levels verified to be present through searching
-	f projectExistence
-}
-
-// TODO(sdboyer) figure out shape of versions, then implement marshaling/unmarshaling
-type projectDataCache struct {
-	Version  string                   `json:"version"` // TODO(sdboyer) use this
-	Infos    map[Revision]projectInfo `json:"infos"`
-	Packages map[Revision]PackageTree `json:"packages"`
-	VMap     map[Version]Revision     `json:"vmap"`
-	RMap     map[Revision][]Version   `json:"rmap"`
-}
-
-// projectInfo holds manifest and lock
-type projectInfo struct {
-	Manifest
-	Lock
-}
-
-type repo struct {
-	// Path to the root of the default working copy (NOT the repo itself)
-	rpath string
-
-	// Mutex controlling general access to the repo
-	mut sync.RWMutex
-
-	// Object for direct repo interaction
-	r vcs.Repo
-
-	// Whether or not the cache repo is in sync (think dvcs) with upstream
-	synced bool
-}
-
-func (pm *projectManager) GetInfoAt(v Version) (Manifest, Lock, error) {
-	if err := pm.ensureCacheExistence(); err != nil {
-		return nil, nil, err
-	}
-
-	if r, exists := pm.dc.VMap[v]; exists {
-		if pi, exists := pm.dc.Infos[r]; exists {
-			return pi.Manifest, pi.Lock, nil
-		}
-	}
-
-	pm.crepo.mut.Lock()
-	var err error
-	if !pm.crepo.synced {
-		err = pm.crepo.r.Update()
-		if err != nil {
-			return nil, nil, fmt.Errorf("Could not fetch latest updates into repository")
-		}
-		pm.crepo.synced = true
-	}
-
-	// Always prefer a rev, if it's available
-	if pv, ok := v.(PairedVersion); ok {
-		err = pm.crepo.r.UpdateVersion(pv.Underlying().String())
-	} else {
-		err = pm.crepo.r.UpdateVersion(v.String())
-	}
-	pm.crepo.mut.Unlock()
-	if err != nil {
-		// TODO(sdboyer) More-er proper-er error
-		panic(fmt.Sprintf("canary - why is checkout/whatever failing: %s %s %s", pm.n, v.String(), err))
-	}
-
-	pm.crepo.mut.RLock()
-	m, l, err := pm.an.GetInfo(filepath.Join(pm.ctx.GOPATH, "src", string(pm.n)), pm.n)
-	// TODO(sdboyer) cache results
-	pm.crepo.mut.RUnlock()
-
-	if err == nil {
-		if l != nil {
-			l = prepLock(l)
-		}
-
-		// If m is nil, prepManifest will provide an empty one.
-		pi := projectInfo{
-			Manifest: prepManifest(m),
-			Lock:     l,
-		}
-
-		// TODO(sdboyer) this just clobbers all over and ignores the paired/unpaired
-		// distinction; serious fix is needed
-		if r, exists := pm.dc.VMap[v]; exists {
-			pm.dc.Infos[r] = pi
-		}
-
-		return pi.Manifest, pi.Lock, nil
-	}
-
-	return nil, nil, err
-}
-
-func (pm *projectManager) ListPackages(v Version) (ptree PackageTree, err error) {
-	if err = pm.ensureCacheExistence(); err != nil {
-		return
-	}
-
-	// See if we can find it in the cache
-	var r Revision
-	switch v.(type) {
-	case Revision, PairedVersion:
-		var ok bool
-		if r, ok = v.(Revision); !ok {
-			r = v.(PairedVersion).Underlying()
-		}
-
-		if ptree, cached := pm.dc.Packages[r]; cached {
-			return ptree, nil
-		}
-	default:
-		var has bool
-		if r, has = pm.dc.VMap[v]; has {
-			if ptree, cached := pm.dc.Packages[r]; cached {
-				return ptree, nil
-			}
-		}
-	}
-
-	// TODO(sdboyer) handle the case where we have a version w/out rev, and not in cache
-
-	// Not in the cache; check out the version and do the analysis
-	pm.crepo.mut.Lock()
-	// Check out the desired version for analysis
-	if r != "" {
-		// Always prefer a rev, if it's available
-		err = pm.crepo.r.UpdateVersion(string(r))
-	} else {
-		// If we don't have a rev, ensure the repo is up to date, otherwise we
-		// could have a desync issue
-		if !pm.crepo.synced {
-			err = pm.crepo.r.Update()
-			if err != nil {
-				return PackageTree{}, fmt.Errorf("Could not fetch latest updates into repository: %s", err)
-			}
-			pm.crepo.synced = true
-		}
-		err = pm.crepo.r.UpdateVersion(v.String())
-	}
-
-	ptree, err = listPackages(filepath.Join(pm.ctx.GOPATH, "src", string(pm.n)), string(pm.n))
-	pm.crepo.mut.Unlock()
-
-	// TODO(sdboyer) cache errs?
-	if err != nil {
-		pm.dc.Packages[r] = ptree
-	}
-
-	return
-}
-
-func (pm *projectManager) ensureCacheExistence() error {
-	// Technically, methods could could attempt to return straight from the
-	// metadata cache even if the repo cache doesn't exist on disk. But that
-	// would allow weird state inconsistencies (cache exists, but no repo...how
-	// does that even happen?) that it'd be better to just not allow so that we
-	// don't have to think about it elsewhere
-	if !pm.CheckExistence(existsInCache) {
-		if pm.CheckExistence(existsUpstream) {
-			pm.crepo.mut.Lock()
-			err := pm.crepo.r.Get()
-			pm.crepo.mut.Unlock()
-
-			if err != nil {
-				return fmt.Errorf("failed to create repository cache for %s", pm.n)
-			}
-			pm.ex.s |= existsInCache
-			pm.ex.f |= existsInCache
-		} else {
-			return fmt.Errorf("project %s does not exist upstream", pm.n)
-		}
-	}
-
-	return nil
-}
-
-func (pm *projectManager) ListVersions() (vlist []Version, err error) {
-	if !pm.cvsync {
-		// This check only guarantees that the upstream exists, not the cache
-		pm.ex.s |= existsUpstream
-		vpairs, exbits, err := pm.crepo.getCurrentVersionPairs()
-		// But it *may* also check the local existence
-		pm.ex.s |= exbits
-		pm.ex.f |= exbits
-
-		if err != nil {
-			// TODO(sdboyer) More-er proper-er error
-			fmt.Println(err)
-			return nil, err
-		}
-
-		vlist = make([]Version, len(vpairs))
-		// mark our cache as synced if we got ExistsUpstream back
-		if exbits&existsUpstream == existsUpstream {
-			pm.cvsync = true
-		}
-
-		// Process the version data into the cache
-		// TODO(sdboyer) detect out-of-sync data as we do this?
-		for k, v := range vpairs {
-			pm.dc.VMap[v] = v.Underlying()
-			pm.dc.RMap[v.Underlying()] = append(pm.dc.RMap[v.Underlying()], v)
-			vlist[k] = v
-		}
-	} else {
-		vlist = make([]Version, len(pm.dc.VMap))
-		k := 0
-		// TODO(sdboyer) key type of VMap should be string; recombine here
-		//for v, r := range pm.dc.VMap {
-		for v := range pm.dc.VMap {
-			vlist[k] = v
-			k++
-		}
-	}
-
-	return
-}
-
-func (pm *projectManager) RevisionPresentIn(r Revision) (bool, error) {
-	// First and fastest path is to check the data cache to see if the rev is
-	// present. This could give us false positives, but the cases where that can
-	// occur would require a type of cache staleness that seems *exceedingly*
-	// unlikely to occur.
-	if _, has := pm.dc.Infos[r]; has {
-		return true, nil
-	} else if _, has := pm.dc.RMap[r]; has {
-		return true, nil
-	}
-
-	// For now at least, just run GetInfoAt(); it basically accomplishes the
-	// same thing.
-	if _, _, err := pm.GetInfoAt(r); err != nil {
-		return false, err
-	}
-	return true, nil
-}
-
-// CheckExistence provides a direct method for querying existence levels of the
-// project. It will only perform actual searching (local fs or over the network)
-// if no previous attempt at that search has been made.
-//
-// Note that this may perform read-ish operations on the cache repo, and it
-// takes a lock accordingly. Deadlock may result from calling it during a
-// segment where the cache repo mutex is already write-locked.
-func (pm *projectManager) CheckExistence(ex projectExistence) bool {
-	if pm.ex.s&ex != ex {
-		if ex&existsInVendorRoot != 0 && pm.ex.s&existsInVendorRoot == 0 {
-			panic("should now be implemented in bridge")
-		}
-		if ex&existsInCache != 0 && pm.ex.s&existsInCache == 0 {
-			pm.crepo.mut.RLock()
-			pm.ex.s |= existsInCache
-			if pm.crepo.r.CheckLocal() {
-				pm.ex.f |= existsInCache
-			}
-			pm.crepo.mut.RUnlock()
-		}
-		if ex&existsUpstream != 0 && pm.ex.s&existsUpstream == 0 {
-			pm.crepo.mut.RLock()
-			pm.ex.s |= existsUpstream
-			if pm.crepo.r.Ping() {
-				pm.ex.f |= existsUpstream
-			}
-			pm.crepo.mut.RUnlock()
-		}
-	}
-
-	return ex&pm.ex.f == ex
-}
-
-func (pm *projectManager) ExportVersionTo(v Version, to string) error {
-	return pm.crepo.exportVersionTo(v, to)
-}
-
-func (r *repo) getCurrentVersionPairs() (vlist []PairedVersion, exbits projectExistence, err error) {
-	r.mut.Lock()
-	defer r.mut.Unlock()
-
-	switch r.r.(type) {
-	case *vcs.GitRepo:
-		var out []byte
-		c := exec.Command("git", "ls-remote", r.r.Remote())
-		// Ensure no terminal prompting for PWs
-		c.Env = mergeEnvLists([]string{"GIT_TERMINAL_PROMPT=0"}, os.Environ())
-		out, err = c.CombinedOutput()
-
-		all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
-		if err != nil || len(all) == 0 {
-			// TODO(sdboyer) remove this path? it really just complicates things, for
-			// probably not much benefit
-
-			// ls-remote failed, probably due to bad communication or a faulty
-			// upstream implementation. So fetch updates, then build the list
-			// locally
-			err = r.r.Update()
-			if err != nil {
-				// Definitely have a problem, now - bail out
-				return
-			}
-
-			// Upstream and cache must exist, so add that to exbits
-			exbits |= existsUpstream | existsInCache
-			// Also, local is definitely now synced
-			r.synced = true
-
-			out, err = r.r.RunFromDir("git", "show-ref", "--dereference")
-			if err != nil {
-				return
-			}
-
-			all = bytes.Split(bytes.TrimSpace(out), []byte("\n"))
-		}
-		// Local cache may not actually exist here, but upstream definitely does
-		exbits |= existsUpstream
-
-		tmap := make(map[string]PairedVersion)
-		for _, pair := range all {
-			var v PairedVersion
-			if string(pair[46:51]) == "heads" {
-				v = NewBranch(string(pair[52:])).Is(Revision(pair[:40])).(PairedVersion)
-				vlist = append(vlist, v)
-			} else if string(pair[46:50]) == "tags" {
-				vstr := string(pair[51:])
-				if strings.HasSuffix(vstr, "^{}") {
-					// If the suffix is there, then we *know* this is the rev of
-					// the underlying commit object that we actually want
-					vstr = strings.TrimSuffix(vstr, "^{}")
-				} else if _, exists := tmap[vstr]; exists {
-					// Already saw the deref'd version of this tag, if one
-					// exists, so skip this.
-					continue
-					// Can only hit this branch if we somehow got the deref'd
-					// version first. Which should be impossible, but this
-					// covers us in case of weirdness, anyway.
-				}
-				v = NewVersion(vstr).Is(Revision(pair[:40])).(PairedVersion)
-				tmap[vstr] = v
-			}
-		}
-
-		// Append all the deref'd (if applicable) tags into the list
-		for _, v := range tmap {
-			vlist = append(vlist, v)
-		}
-	case *vcs.BzrRepo:
-		var out []byte
-		// Update the local first
-		err = r.r.Update()
-		if err != nil {
-			return
-		}
-		// Upstream and cache must exist, so add that to exbits
-		exbits |= existsUpstream | existsInCache
-		// Also, local is definitely now synced
-		r.synced = true
-
-		// Now, list all the tags
-		out, err = r.r.RunFromDir("bzr", "tags", "--show-ids", "-v")
-		if err != nil {
-			return
-		}
-
-		all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
-		for _, line := range all {
-			idx := bytes.IndexByte(line, 32) // space
-			v := NewVersion(string(line[:idx])).Is(Revision(bytes.TrimSpace(line[idx:]))).(PairedVersion)
-			vlist = append(vlist, v)
-		}
-
-	case *vcs.HgRepo:
-		var out []byte
-		err = r.r.Update()
-		if err != nil {
-			return
-		}
-
-		// Upstream and cache must exist, so add that to exbits
-		exbits |= existsUpstream | existsInCache
-		// Also, local is definitely now synced
-		r.synced = true
-
-		out, err = r.r.RunFromDir("hg", "tags", "--debug", "--verbose")
-		if err != nil {
-			return
-		}
-
-		all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
-		lbyt := []byte("local")
-		nulrev := []byte("0000000000000000000000000000000000000000")
-		for _, line := range all {
-			if bytes.Equal(lbyt, line[len(line)-len(lbyt):]) {
-				// Skip local tags
-				continue
-			}
-
-			// tip is magic, don't include it
-			if bytes.HasPrefix(line, []byte("tip")) {
-				continue
-			}
-
-			// Split on colon; this gets us the rev and the tag plus local revno
-			pair := bytes.Split(line, []byte(":"))
-			if bytes.Equal(nulrev, pair[1]) {
-				// null rev indicates this tag is marked for deletion
-				continue
-			}
-
-			idx := bytes.IndexByte(pair[0], 32) // space
-			v := NewVersion(string(pair[0][:idx])).Is(Revision(pair[1])).(PairedVersion)
-			vlist = append(vlist, v)
-		}
-
-		out, err = r.r.RunFromDir("hg", "branches", "--debug", "--verbose")
-		if err != nil {
-			// better nothing than incomplete
-			vlist = nil
-			return
-		}
-
-		all = bytes.Split(bytes.TrimSpace(out), []byte("\n"))
-		lbyt = []byte("(inactive)")
-		for _, line := range all {
-			if bytes.Equal(lbyt, line[len(line)-len(lbyt):]) {
-				// Skip inactive branches
-				continue
-			}
-
-			// Split on colon; this gets us the rev and the branch plus local revno
-			pair := bytes.Split(line, []byte(":"))
-			idx := bytes.IndexByte(pair[0], 32) // space
-			v := NewBranch(string(pair[0][:idx])).Is(Revision(pair[1])).(PairedVersion)
-			vlist = append(vlist, v)
-		}
-	case *vcs.SvnRepo:
-		// TODO(sdboyer) is it ok to return empty vlist and no error?
-		// TODO(sdboyer) ...gotta do something for svn, right?
-	default:
-		panic("unknown repo type")
-	}
-
-	return
-}
-
-func (r *repo) exportVersionTo(v Version, to string) error {
-	r.mut.Lock()
-	defer r.mut.Unlock()
-
-	switch r.r.(type) {
-	case *vcs.GitRepo:
-		// Back up original index
-		idx, bak := path.Join(r.rpath, ".git", "index"), path.Join(r.rpath, ".git", "origindex")
-		err := os.Rename(idx, bak)
-		if err != nil {
-			return err
-		}
-
-		// TODO(sdboyer) could have an err here
-		defer os.Rename(bak, idx)
-
-		vstr := v.String()
-		if rv, ok := v.(PairedVersion); ok {
-			vstr = rv.Underlying().String()
-		}
-		_, err = r.r.RunFromDir("git", "read-tree", vstr)
-		if err != nil {
-			return err
-		}
-
-		// Ensure we have exactly one trailing slash
-		to = strings.TrimSuffix(to, string(os.PathSeparator)) + string(os.PathSeparator)
-		// Checkout from our temporary index to the desired target location on disk;
-		// now it's git's job to make it fast. Sadly, this approach *does* also
-		// write out vendor dirs. There doesn't appear to be a way to make
-		// checkout-index respect sparse checkout rules (-a supercedes it);
-		// the alternative is using plain checkout, though we have a bunch of
-		// housekeeping to do to set up, then tear down, the sparse checkout
-		// controls, as well as restore the original index and HEAD.
-		_, err = r.r.RunFromDir("git", "checkout-index", "-a", "--prefix="+to)
-		return err
-	default:
-		// TODO(sdboyer) This is a dumb, slow approach, but we're punting on making these
-		// fast for now because git is the OVERWHELMING case
-		r.r.UpdateVersion(v.String())
-
-		cfg := &shutil.CopyTreeOptions{
-			Symlinks:     true,
-			CopyFunction: shutil.Copy,
-			Ignore: func(src string, contents []os.FileInfo) (ignore []string) {
-				for _, fi := range contents {
-					if !fi.IsDir() {
-						continue
-					}
-					n := fi.Name()
-					switch n {
-					case "vendor", ".bzr", ".svn", ".hg":
-						ignore = append(ignore, n)
-					}
-				}
-
-				return
-			},
-		}
-
-		return shutil.CopyTree(r.rpath, to, cfg)
-	}
-}
-
-// This func copied from Masterminds/vcs so we can exec our own commands
-func mergeEnvLists(in, out []string) []string {
-NextVar:
-	for _, inkv := range in {
-		k := strings.SplitAfterN(inkv, "=", 2)[0]
-		for i, outkv := range out {
-			if strings.HasPrefix(outkv, k) {
-				out[i] = inkv
-				continue NextVar
-			}
-		}
-		out = append(out, inkv)
-	}
-	return out
-}
-
-func stripVendor(path string, info os.FileInfo, err error) error {
-	if info.Name() == "vendor" {
-		if _, err := os.Lstat(path); err == nil {
-			if info.IsDir() {
-				return removeAll(path)
-			}
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/sdboyer/gps/remote.go b/vendor/github.com/sdboyer/gps/remote.go
deleted file mode 100644
index c808d9a..0000000
--- a/vendor/github.com/sdboyer/gps/remote.go
+++ /dev/null
@@ -1,306 +0,0 @@
-package gps
-
-import (
-	"fmt"
-	"io"
-	"net/http"
-	"net/url"
-	"regexp"
-	"strings"
-)
-
-// A remoteRepo represents a potential remote repository resource.
-//
-// RemoteRepos are based purely on lexical analysis; successfully constructing
-// one is not a guarantee that the resource it identifies actually exists or is
-// accessible.
-type remoteRepo struct {
-	Base     string
-	RelPkg   string
-	CloneURL *url.URL
-	Schemes  []string
-	VCS      []string
-}
-
-//type remoteResult struct {
-//r   remoteRepo
-//err error
-//}
-
-// TODO(sdboyer) sync access to this map
-//var remoteCache = make(map[string]remoteResult)
-
-// Regexes for the different known import path flavors
-var (
-	// This regex allowed some usernames that github currently disallows. They
-	// may have allowed them in the past; keeping it in case we need to revert.
-	//ghRegex      = regexp.MustCompile(`^(?P<root>github\.com/([A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`)
-	ghRegex      = regexp.MustCompile(`^(?P<root>github\.com/([A-Za-z0-9][-A-Za-z0-9]*[A-Za-z0-9]/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
-	gpinNewRegex = regexp.MustCompile(`^(?P<root>gopkg\.in/(?:([a-zA-Z0-9][-a-zA-Z0-9]+)/)?([a-zA-Z][-.a-zA-Z0-9]*)\.((?:v0|v[1-9][0-9]*)(?:\.0|\.[1-9][0-9]*){0,2}(-unstable)?)(?:\.git)?)((?:/[a-zA-Z0-9][-.a-zA-Z0-9]*)*)$`)
-	//gpinOldRegex = regexp.MustCompile(`^(?P<root>gopkg\.in/(?:([a-z0-9][-a-z0-9]+)/)?((?:v0|v[1-9][0-9]*)(?:\.0|\.[1-9][0-9]*){0,2}(-unstable)?)/([a-zA-Z][-a-zA-Z0-9]*)(?:\.git)?)((?:/[a-zA-Z][-a-zA-Z0-9]*)*)$`)
-	bbRegex = regexp.MustCompile(`^(?P<root>bitbucket\.org/(?P<bitname>[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
-	//lpRegex = regexp.MustCompile(`^(?P<root>launchpad\.net/([A-Za-z0-9-._]+)(/[A-Za-z0-9-._]+)?)(/.+)?`)
-	lpRegex = regexp.MustCompile(`^(?P<root>launchpad\.net/([A-Za-z0-9-._]+))((?:/[A-Za-z0-9_.\-]+)*)?`)
-	//glpRegex = regexp.MustCompile(`^(?P<root>git\.launchpad\.net/([A-Za-z0-9_.\-]+)|~[A-Za-z0-9_.\-]+/(\+git|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+)$`)
-	glpRegex = regexp.MustCompile(`^(?P<root>git\.launchpad\.net/([A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
-	//gcRegex      = regexp.MustCompile(`^(?P<root>code\.google\.com/[pr]/(?P<project>[a-z0-9\-]+)(\.(?P<subrepo>[a-z0-9\-]+))?)(/[A-Za-z0-9_.\-]+)*$`)
-	jazzRegex    = regexp.MustCompile(`^(?P<root>hub\.jazz\.net/(git/[a-z0-9]+/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
-	apacheRegex  = regexp.MustCompile(`^(?P<root>git\.apache\.org/([a-z0-9_.\-]+\.git))((?:/[A-Za-z0-9_.\-]+)*)$`)
-	genericRegex = regexp.MustCompile(`^(?P<root>(?P<repo>([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?/[A-Za-z0-9_.\-/~]*?)\.(?P<vcs>bzr|git|hg|svn))((?:/[A-Za-z0-9_.\-]+)*)$`)
-)
-
-// Other helper regexes
-var (
-	scpSyntaxRe = regexp.MustCompile(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`)
-	pathvld     = regexp.MustCompile(`^([A-Za-z0-9-]+)(\.[A-Za-z0-9-]+)+(/[A-Za-z0-9-_.~]+)*$`)
-)
-
-// deduceRemoteRepo takes a potential import path and returns a RemoteRepo
-// representing the remote location of the source of an import path. Remote
-// repositories can be bare import paths, or urls including a checkout scheme.
-func deduceRemoteRepo(path string) (rr *remoteRepo, err error) {
-	rr = &remoteRepo{}
-	if m := scpSyntaxRe.FindStringSubmatch(path); m != nil {
-		// Match SCP-like syntax and convert it to a URL.
-		// Eg, "git@github.com:user/repo" becomes
-		// "ssh://git@github.com/user/repo".
-		rr.CloneURL = &url.URL{
-			Scheme: "ssh",
-			User:   url.User(m[1]),
-			Host:   m[2],
-			Path:   "/" + m[3],
-			// TODO(sdboyer) This is what stdlib sets; grok why better
-			//RawPath: m[3],
-		}
-	} else {
-		rr.CloneURL, err = url.Parse(path)
-		if err != nil {
-			return nil, fmt.Errorf("%q is not a valid import path", path)
-		}
-	}
-
-	if rr.CloneURL.Host != "" {
-		path = rr.CloneURL.Host + "/" + strings.TrimPrefix(rr.CloneURL.Path, "/")
-	} else {
-		path = rr.CloneURL.Path
-	}
-
-	if !pathvld.MatchString(path) {
-		return nil, fmt.Errorf("%q is not a valid import path", path)
-	}
-
-	if rr.CloneURL.Scheme != "" {
-		rr.Schemes = []string{rr.CloneURL.Scheme}
-	}
-
-	// TODO(sdboyer) instead of a switch, encode base domain in radix tree and pick
-	// detector from there; if failure, then fall back on metadata work
-
-	switch {
-	case ghRegex.MatchString(path):
-		v := ghRegex.FindStringSubmatch(path)
-
-		rr.CloneURL.Host = "github.com"
-		rr.CloneURL.Path = v[2]
-		rr.Base = v[1]
-		rr.RelPkg = strings.TrimPrefix(v[3], "/")
-		rr.VCS = []string{"git"}
-
-		return
-
-	case gpinNewRegex.MatchString(path):
-		v := gpinNewRegex.FindStringSubmatch(path)
-		// Duplicate some logic from the gopkg.in server in order to validate
-		// the import path string without having to hit the server
-		if strings.Contains(v[4], ".") {
-			return nil, fmt.Errorf("%q is not a valid import path; gopkg.in only allows major versions (%q instead of %q)",
-				path, v[4][:strings.Index(v[4], ".")], v[4])
-		}
-
-		// gopkg.in is always backed by github
-		rr.CloneURL.Host = "github.com"
-		// If the third position is empty, it's the shortened form that expands
-		// to the go-pkg github user
-		if v[2] == "" {
-			rr.CloneURL.Path = "go-pkg/" + v[3]
-		} else {
-			rr.CloneURL.Path = v[2] + "/" + v[3]
-		}
-		rr.Base = v[1]
-		rr.RelPkg = strings.TrimPrefix(v[6], "/")
-		rr.VCS = []string{"git"}
-
-		return
-	//case gpinOldRegex.MatchString(path):
-
-	case bbRegex.MatchString(path):
-		v := bbRegex.FindStringSubmatch(path)
-
-		rr.CloneURL.Host = "bitbucket.org"
-		rr.CloneURL.Path = v[2]
-		rr.Base = v[1]
-		rr.RelPkg = strings.TrimPrefix(v[3], "/")
-		rr.VCS = []string{"git", "hg"}
-
-		return
-
-	//case gcRegex.MatchString(path):
-	//v := gcRegex.FindStringSubmatch(path)
-
-	//rr.CloneURL.Host = "code.google.com"
-	//rr.CloneURL.Path = "p/" + v[2]
-	//rr.Base = v[1]
-	//rr.RelPkg = strings.TrimPrefix(v[5], "/")
-	//rr.VCS = []string{"hg", "git"}
-
-	//return
-
-	case lpRegex.MatchString(path):
-		// TODO(sdboyer) lp handling is nasty - there's ambiguities which can only really
-		// be resolved with a metadata request. See https://github.com/golang/go/issues/11436
-		v := lpRegex.FindStringSubmatch(path)
-
-		rr.CloneURL.Host = "launchpad.net"
-		rr.CloneURL.Path = v[2]
-		rr.Base = v[1]
-		rr.RelPkg = strings.TrimPrefix(v[3], "/")
-		rr.VCS = []string{"bzr"}
-
-		return
-
-	case glpRegex.MatchString(path):
-		// TODO(sdboyer) same ambiguity issues as with normal bzr lp
-		v := glpRegex.FindStringSubmatch(path)
-
-		rr.CloneURL.Host = "git.launchpad.net"
-		rr.CloneURL.Path = v[2]
-		rr.Base = v[1]
-		rr.RelPkg = strings.TrimPrefix(v[3], "/")
-		rr.VCS = []string{"git"}
-
-		return
-
-	case jazzRegex.MatchString(path):
-		v := jazzRegex.FindStringSubmatch(path)
-
-		rr.CloneURL.Host = "hub.jazz.net"
-		rr.CloneURL.Path = v[2]
-		rr.Base = v[1]
-		rr.RelPkg = strings.TrimPrefix(v[3], "/")
-		rr.VCS = []string{"git"}
-
-		return
-
-	case apacheRegex.MatchString(path):
-		v := apacheRegex.FindStringSubmatch(path)
-
-		rr.CloneURL.Host = "git.apache.org"
-		rr.CloneURL.Path = v[2]
-		rr.Base = v[1]
-		rr.RelPkg = strings.TrimPrefix(v[3], "/")
-		rr.VCS = []string{"git"}
-
-		return
-
-	// try the general syntax
-	case genericRegex.MatchString(path):
-		v := genericRegex.FindStringSubmatch(path)
-		switch v[5] {
-		case "git", "hg", "bzr":
-			x := strings.SplitN(v[1], "/", 2)
-			// TODO(sdboyer) is this actually correct for bzr?
-			rr.CloneURL.Host = x[0]
-			rr.CloneURL.Path = x[1]
-			rr.VCS = []string{v[5]}
-			rr.Base = v[1]
-			rr.RelPkg = strings.TrimPrefix(v[6], "/")
-			return
-		default:
-			return nil, fmt.Errorf("unknown repository type: %q", v[5])
-		}
-	}
-
-	// No luck so far. maybe it's one of them vanity imports?
-	importroot, vcs, reporoot, err := parseMetadata(path)
-	if err != nil {
-		return nil, fmt.Errorf("unable to deduce repository and source type for: %q", path)
-	}
-
-	// If we got something back at all, then it supercedes the actual input for
-	// the real URL to hit
-	rr.CloneURL, err = url.Parse(reporoot)
-	if err != nil {
-		return nil, fmt.Errorf("server returned bad URL when searching for vanity import: %q", reporoot)
-	}
-
-	// We have a real URL. Set the other values and return.
-	rr.Base = importroot
-	rr.RelPkg = strings.TrimPrefix(path[len(importroot):], "/")
-
-	rr.VCS = []string{vcs}
-	if rr.CloneURL.Scheme != "" {
-		rr.Schemes = []string{rr.CloneURL.Scheme}
-	}
-
-	return rr, nil
-}
-
-// fetchMetadata fetchs the remote metadata for path.
-func fetchMetadata(path string) (rc io.ReadCloser, err error) {
-	defer func() {
-		if err != nil {
-			err = fmt.Errorf("unable to determine remote metadata protocol: %s", err)
-		}
-	}()
-
-	// try https first
-	rc, err = doFetchMetadata("https", path)
-	if err == nil {
-		return
-	}
-
-	rc, err = doFetchMetadata("http", path)
-	return
-}
-
-func doFetchMetadata(scheme, path string) (io.ReadCloser, error) {
-	url := fmt.Sprintf("%s://%s?go-get=1", scheme, path)
-	switch scheme {
-	case "https", "http":
-		resp, err := http.Get(url)
-		if err != nil {
-			return nil, fmt.Errorf("failed to access url %q", url)
-		}
-		return resp.Body, nil
-	default:
-		return nil, fmt.Errorf("unknown remote protocol scheme: %q", scheme)
-	}
-}
-
-// parseMetadata fetches and decodes remote metadata for path.
-func parseMetadata(path string) (string, string, string, error) {
-	rc, err := fetchMetadata(path)
-	if err != nil {
-		return "", "", "", err
-	}
-	defer rc.Close()
-
-	imports, err := parseMetaGoImports(rc)
-	if err != nil {
-		return "", "", "", err
-	}
-	match := -1
-	for i, im := range imports {
-		if !strings.HasPrefix(path, im.Prefix) {
-			continue
-		}
-		if match != -1 {
-			return "", "", "", fmt.Errorf("multiple meta tags match import path %q", path)
-		}
-		match = i
-	}
-	if match == -1 {
-		return "", "", "", fmt.Errorf("go-import metadata not found")
-	}
-	return imports[match].Prefix, imports[match].VCS, imports[match].RepoRoot, nil
-}
diff --git a/vendor/github.com/sdboyer/gps/remote_test.go b/vendor/github.com/sdboyer/gps/remote_test.go
deleted file mode 100644
index 17de00f..0000000
--- a/vendor/github.com/sdboyer/gps/remote_test.go
+++ /dev/null
@@ -1,478 +0,0 @@
-package gps
-
-import (
-	"fmt"
-	"net/url"
-	"reflect"
-	"testing"
-)
-
-func TestDeduceRemotes(t *testing.T) {
-	if testing.Short() {
-		t.Skip("Skipping remote deduction test in short mode")
-	}
-
-	fixtures := []struct {
-		path string
-		want *remoteRepo
-	}{
-		{
-			"github.com/sdboyer/gps",
-			&remoteRepo{
-				Base:   "github.com/sdboyer/gps",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "github.com",
-					Path: "sdboyer/gps",
-				},
-				Schemes: nil,
-				VCS:     []string{"git"},
-			},
-		},
-		{
-			"github.com/sdboyer/gps/foo",
-			&remoteRepo{
-				Base:   "github.com/sdboyer/gps",
-				RelPkg: "foo",
-				CloneURL: &url.URL{
-					Host: "github.com",
-					Path: "sdboyer/gps",
-				},
-				Schemes: nil,
-				VCS:     []string{"git"},
-			},
-		},
-		{
-			"git@github.com:sdboyer/gps",
-			&remoteRepo{
-				Base:   "github.com/sdboyer/gps",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Scheme: "ssh",
-					User:   url.User("git"),
-					Host:   "github.com",
-					Path:   "sdboyer/gps",
-				},
-				Schemes: []string{"ssh"},
-				VCS:     []string{"git"},
-			},
-		},
-		{
-			"https://github.com/sdboyer/gps/foo",
-			&remoteRepo{
-				Base:   "github.com/sdboyer/gps",
-				RelPkg: "foo",
-				CloneURL: &url.URL{
-					Scheme: "https",
-					Host:   "github.com",
-					Path:   "sdboyer/gps",
-				},
-				Schemes: []string{"https"},
-				VCS:     []string{"git"},
-			},
-		},
-		{
-			"https://github.com/sdboyer/gps/foo/bar",
-			&remoteRepo{
-				Base:   "github.com/sdboyer/gps",
-				RelPkg: "foo/bar",
-				CloneURL: &url.URL{
-					Scheme: "https",
-					Host:   "github.com",
-					Path:   "sdboyer/gps",
-				},
-				Schemes: []string{"https"},
-				VCS:     []string{"git"},
-			},
-		},
-		// some invalid github username patterns
-		{
-			"github.com/-sdboyer/gps/foo",
-			nil,
-		},
-		{
-			"github.com/sdboyer-/gps/foo",
-			nil,
-		},
-		{
-			"github.com/sdbo.yer/gps/foo",
-			nil,
-		},
-		{
-			"github.com/sdbo_yer/gps/foo",
-			nil,
-		},
-		{
-			"gopkg.in/sdboyer/gps.v0",
-			&remoteRepo{
-				Base:   "gopkg.in/sdboyer/gps.v0",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "github.com",
-					Path: "sdboyer/gps",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			"gopkg.in/sdboyer/gps.v0/foo",
-			&remoteRepo{
-				Base:   "gopkg.in/sdboyer/gps.v0",
-				RelPkg: "foo",
-				CloneURL: &url.URL{
-					Host: "github.com",
-					Path: "sdboyer/gps",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			"gopkg.in/sdboyer/gps.v0/foo/bar",
-			&remoteRepo{
-				Base:   "gopkg.in/sdboyer/gps.v0",
-				RelPkg: "foo/bar",
-				CloneURL: &url.URL{
-					Host: "github.com",
-					Path: "sdboyer/gps",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			"gopkg.in/yaml.v1",
-			&remoteRepo{
-				Base:   "gopkg.in/yaml.v1",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "github.com",
-					Path: "go-pkg/yaml",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			"gopkg.in/yaml.v1/foo/bar",
-			&remoteRepo{
-				Base:   "gopkg.in/yaml.v1",
-				RelPkg: "foo/bar",
-				CloneURL: &url.URL{
-					Host: "github.com",
-					Path: "go-pkg/yaml",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			// gopkg.in only allows specifying major version in import path
-			"gopkg.in/yaml.v1.2",
-			nil,
-		},
-		// IBM hub devops services - fixtures borrowed from go get
-		{
-			"hub.jazz.net/git/user1/pkgname",
-			&remoteRepo{
-				Base:   "hub.jazz.net/git/user1/pkgname",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "hub.jazz.net",
-					Path: "git/user1/pkgname",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			"hub.jazz.net/git/user1/pkgname/submodule/submodule/submodule",
-			&remoteRepo{
-				Base:   "hub.jazz.net/git/user1/pkgname",
-				RelPkg: "submodule/submodule/submodule",
-				CloneURL: &url.URL{
-					Host: "hub.jazz.net",
-					Path: "git/user1/pkgname",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			"hub.jazz.net",
-			nil,
-		},
-		{
-			"hub2.jazz.net",
-			nil,
-		},
-		{
-			"hub.jazz.net/someotherprefix",
-			nil,
-		},
-		{
-			"hub.jazz.net/someotherprefix/user1/pkgname",
-			nil,
-		},
-		// Spaces are not valid in user names or package names
-		{
-			"hub.jazz.net/git/User 1/pkgname",
-			nil,
-		},
-		{
-			"hub.jazz.net/git/user1/pkg name",
-			nil,
-		},
-		// Dots are not valid in user names
-		{
-			"hub.jazz.net/git/user.1/pkgname",
-			nil,
-		},
-		{
-			"hub.jazz.net/git/user/pkg.name",
-			&remoteRepo{
-				Base:   "hub.jazz.net/git/user/pkg.name",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "hub.jazz.net",
-					Path: "git/user/pkg.name",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		// User names cannot have uppercase letters
-		{
-			"hub.jazz.net/git/USER/pkgname",
-			nil,
-		},
-		{
-			"bitbucket.org/sdboyer/reporoot",
-			&remoteRepo{
-				Base:   "bitbucket.org/sdboyer/reporoot",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "bitbucket.org",
-					Path: "sdboyer/reporoot",
-				},
-				VCS: []string{"git", "hg"},
-			},
-		},
-		{
-			"bitbucket.org/sdboyer/reporoot/foo/bar",
-			&remoteRepo{
-				Base:   "bitbucket.org/sdboyer/reporoot",
-				RelPkg: "foo/bar",
-				CloneURL: &url.URL{
-					Host: "bitbucket.org",
-					Path: "sdboyer/reporoot",
-				},
-				VCS: []string{"git", "hg"},
-			},
-		},
-		{
-			"https://bitbucket.org/sdboyer/reporoot/foo/bar",
-			&remoteRepo{
-				Base:   "bitbucket.org/sdboyer/reporoot",
-				RelPkg: "foo/bar",
-				CloneURL: &url.URL{
-					Scheme: "https",
-					Host:   "bitbucket.org",
-					Path:   "sdboyer/reporoot",
-				},
-				Schemes: []string{"https"},
-				VCS:     []string{"git", "hg"},
-			},
-		},
-		{
-			"launchpad.net/govcstestbzrrepo",
-			&remoteRepo{
-				Base:   "launchpad.net/govcstestbzrrepo",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "launchpad.net",
-					Path: "govcstestbzrrepo",
-				},
-				VCS: []string{"bzr"},
-			},
-		},
-		{
-			"launchpad.net/govcstestbzrrepo/foo/bar",
-			&remoteRepo{
-				Base:   "launchpad.net/govcstestbzrrepo",
-				RelPkg: "foo/bar",
-				CloneURL: &url.URL{
-					Host: "launchpad.net",
-					Path: "govcstestbzrrepo",
-				},
-				VCS: []string{"bzr"},
-			},
-		},
-		{
-			"launchpad.net/repo root",
-			nil,
-		},
-		{
-			"git.launchpad.net/reporoot",
-			&remoteRepo{
-				Base:   "git.launchpad.net/reporoot",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "git.launchpad.net",
-					Path: "reporoot",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			"git.launchpad.net/reporoot/foo/bar",
-			&remoteRepo{
-				Base:   "git.launchpad.net/reporoot",
-				RelPkg: "foo/bar",
-				CloneURL: &url.URL{
-					Host: "git.launchpad.net",
-					Path: "reporoot",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			"git.launchpad.net/reporoot",
-			&remoteRepo{
-				Base:   "git.launchpad.net/reporoot",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "git.launchpad.net",
-					Path: "reporoot",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			"git.launchpad.net/repo root",
-			nil,
-		},
-		{
-			"git.apache.org/package-name.git",
-			&remoteRepo{
-				Base:   "git.apache.org/package-name.git",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "git.apache.org",
-					Path: "package-name.git",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		{
-			"git.apache.org/package-name.git/foo/bar",
-			&remoteRepo{
-				Base:   "git.apache.org/package-name.git",
-				RelPkg: "foo/bar",
-				CloneURL: &url.URL{
-					Host: "git.apache.org",
-					Path: "package-name.git",
-				},
-				VCS: []string{"git"},
-			},
-		},
-		// Vanity imports
-		{
-			"golang.org/x/exp",
-			&remoteRepo{
-				Base:   "golang.org/x/exp",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Scheme: "https",
-					Host:   "go.googlesource.com",
-					Path:   "/exp",
-				},
-				Schemes: []string{"https"},
-				VCS:     []string{"git"},
-			},
-		},
-		{
-			"golang.org/x/exp/inotify",
-			&remoteRepo{
-				Base:   "golang.org/x/exp",
-				RelPkg: "inotify",
-				CloneURL: &url.URL{
-					Scheme: "https",
-					Host:   "go.googlesource.com",
-					Path:   "/exp",
-				},
-				Schemes: []string{"https"},
-				VCS:     []string{"git"},
-			},
-		},
-		{
-			"rsc.io/pdf",
-			&remoteRepo{
-				Base:   "rsc.io/pdf",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Scheme: "https",
-					Host:   "github.com",
-					Path:   "/rsc/pdf",
-				},
-				Schemes: []string{"https"},
-				VCS:     []string{"git"},
-			},
-		},
-		// Regression - gh does allow two-letter usernames
-		{
-			"github.com/kr/pretty",
-			&remoteRepo{
-				Base:   "github.com/kr/pretty",
-				RelPkg: "",
-				CloneURL: &url.URL{
-					Host: "github.com",
-					Path: "kr/pretty",
-				},
-				Schemes: nil,
-				VCS:     []string{"git"},
-			},
-		},
-	}
-
-	for _, fix := range fixtures {
-		got, err := deduceRemoteRepo(fix.path)
-		want := fix.want
-
-		if want == nil {
-			if err == nil {
-				t.Errorf("deduceRemoteRepo(%q): Error expected but not received", fix.path)
-			}
-			continue
-		}
-
-		if err != nil {
-			t.Errorf("deduceRemoteRepo(%q): %v", fix.path, err)
-			continue
-		}
-
-		if got.Base != want.Base {
-			t.Errorf("deduceRemoteRepo(%q): Base was %s, wanted %s", fix.path, got.Base, want.Base)
-		}
-		if got.RelPkg != want.RelPkg {
-			t.Errorf("deduceRemoteRepo(%q): RelPkg was %s, wanted %s", fix.path, got.RelPkg, want.RelPkg)
-		}
-		if !reflect.DeepEqual(got.CloneURL, want.CloneURL) {
-			// misspelling things is cool when it makes columns line up
-			t.Errorf("deduceRemoteRepo(%q): CloneURL disagreement:\n(GOT) %s\n(WNT) %s", fix.path, ufmt(got.CloneURL), ufmt(want.CloneURL))
-		}
-		if !reflect.DeepEqual(got.VCS, want.VCS) {
-			t.Errorf("deduceRemoteRepo(%q): VCS was %s, wanted %s", fix.path, got.VCS, want.VCS)
-		}
-		if !reflect.DeepEqual(got.Schemes, want.Schemes) {
-			t.Errorf("deduceRemoteRepo(%q): Schemes was %s, wanted %s", fix.path, got.Schemes, want.Schemes)
-		}
-	}
-}
-
-// borrow from stdlib
-// more useful string for debugging than fmt's struct printer
-func ufmt(u *url.URL) string {
-	var user, pass interface{}
-	if u.User != nil {
-		user = u.User.Username()
-		if p, ok := u.User.Password(); ok {
-			pass = p
-		}
-	}
-	return fmt.Sprintf("host=%q, path=%q, opaque=%q, scheme=%q, user=%#v, pass=%#v, rawpath=%q, rawq=%q, frag=%q",
-		u.Host, u.Path, u.Opaque, u.Scheme, user, pass, u.RawPath, u.RawQuery, u.Fragment)
-}
diff --git a/vendor/github.com/sdboyer/gps/result.go b/vendor/github.com/sdboyer/gps/result.go
index e601de9..00dac45 100644
--- a/vendor/github.com/sdboyer/gps/result.go
+++ b/vendor/github.com/sdboyer/gps/result.go
@@ -25,13 +25,13 @@
 	hd []byte
 }
 
-// CreateVendorTree takes a basedir and a Lock, and exports all the projects
+// WriteDepTree takes a basedir and a Lock, and exports all the projects
 // listed in the lock to the appropriate target location within the basedir.
 //
 // It requires a SourceManager to do the work, and takes a flag indicating
 // whether or not to strip vendor directories contained in the exported
 // dependencies.
-func CreateVendorTree(basedir string, l Lock, sm SourceManager, sv bool) error {
+func WriteDepTree(basedir string, l Lock, sm SourceManager, sv bool) error {
 	err := os.MkdirAll(basedir, 0777)
 	if err != nil {
 		return err
@@ -46,7 +46,7 @@
 			return err
 		}
 
-		err = sm.ExportProject(p.Ident().ProjectRoot, p.Version(), to)
+		err = sm.ExportProject(p.Ident(), p.Version(), to)
 		if err != nil {
 			removeAll(basedir)
 			return fmt.Errorf("Error while exporting %s: %s", p.Ident().ProjectRoot, err)
diff --git a/vendor/github.com/sdboyer/gps/result_test.go b/vendor/github.com/sdboyer/gps/result_test.go
index 1aed83b..2ae07ec 100644
--- a/vendor/github.com/sdboyer/gps/result_test.go
+++ b/vendor/github.com/sdboyer/gps/result_test.go
@@ -48,12 +48,10 @@
 	tmp := path.Join(os.TempDir(), "vsolvtest")
 	os.RemoveAll(tmp)
 
-	sm, err := NewSourceManager(naiveAnalyzer{}, path.Join(tmp, "cache"), false)
-	if err != nil {
-		t.Errorf("NewSourceManager errored unexpectedly: %q", err)
-	}
+	sm, clean := mkNaiveSM(t)
+	defer clean()
 
-	err = CreateVendorTree(path.Join(tmp, "export"), r, sm, true)
+	err := WriteDepTree(path.Join(tmp, "export"), r, sm, true)
 	if err != nil {
 		t.Errorf("Unexpected error while creating vendor tree: %s", err)
 	}
@@ -77,7 +75,7 @@
 
 	// Prefetch the projects before timer starts
 	for _, lp := range r.p {
-		_, _, err := sm.GetProjectInfo(lp.Ident().ProjectRoot, lp.Version())
+		_, _, err := sm.GetManifestAndLock(lp.Ident(), lp.Version())
 		if err != nil {
 			b.Errorf("failed getting project info during prefetch: %s", err)
 			clean = false
@@ -93,7 +91,7 @@
 			// ease manual inspection
 			os.RemoveAll(exp)
 			b.StartTimer()
-			err = CreateVendorTree(exp, r, sm, true)
+			err = WriteDepTree(exp, r, sm, true)
 			b.StopTimer()
 			if err != nil {
 				b.Errorf("unexpected error after %v iterations: %s", i, err)
diff --git a/vendor/github.com/sdboyer/gps/satisfy.go b/vendor/github.com/sdboyer/gps/satisfy.go
index 8c99f47..ef9e688 100644
--- a/vendor/github.com/sdboyer/gps/satisfy.go
+++ b/vendor/github.com/sdboyer/gps/satisfy.go
@@ -1,9 +1,12 @@
 package gps
 
-// checkProject performs all constraint checks on a new project (with packages)
-// that we want to select. It determines if selecting the atom would result in
-// a state where all solver requirements are still satisfied.
-func (s *solver) checkProject(a atomWithPackages) error {
+// check performs constraint checks on the provided atom. The set of checks
+// differ slightly depending on whether the atom is pkgonly, or if it's the
+// entire project being added for the first time.
+//
+// The goal is to determine whether selecting the atom would result in a state
+// where all the solver requirements are still satisfied.
+func (s *solver) check(a atomWithPackages, pkgonly bool) error {
 	pa := a.a
 	if nilpa == pa {
 		// This shouldn't be able to happen, but if it does, it unequivocally
@@ -11,44 +14,52 @@
 		panic("canary - checking version of empty ProjectAtom")
 	}
 
-	if err := s.checkAtomAllowable(pa); err != nil {
-		s.logSolve(err)
-		return err
+	// If we're pkgonly, then base atom was already determined to be allowable,
+	// so we can skip the checkAtomAllowable step.
+	if !pkgonly {
+		if err := s.checkAtomAllowable(pa); err != nil {
+			s.traceInfo(err)
+			return err
+		}
 	}
 
 	if err := s.checkRequiredPackagesExist(a); err != nil {
-		s.logSolve(err)
+		s.traceInfo(err)
 		return err
 	}
 
 	deps, err := s.getImportsAndConstraintsOf(a)
 	if err != nil {
 		// An err here would be from the package fetcher; pass it straight back
-		// TODO(sdboyer) can we logSolve this?
+		// TODO(sdboyer) can we traceInfo this?
 		return err
 	}
 
+	// TODO(sdboyer) this deps list contains only packages not already selected
+	// from the target atom (assuming one is selected at all). It's fine for
+	// now, but won't be good enough when we get around to doing static
+	// analysis.
 	for _, dep := range deps {
 		if err := s.checkIdentMatches(a, dep); err != nil {
-			s.logSolve(err)
+			s.traceInfo(err)
 			return err
 		}
 		if err := s.checkDepsConstraintsAllowable(a, dep); err != nil {
-			s.logSolve(err)
+			s.traceInfo(err)
 			return err
 		}
 		if err := s.checkDepsDisallowsSelected(a, dep); err != nil {
-			s.logSolve(err)
+			s.traceInfo(err)
 			return err
 		}
 		// TODO(sdboyer) decide how to refactor in order to re-enable this. Checking for
 		// revision existence is important...but kinda obnoxious.
 		//if err := s.checkRevisionExists(a, dep); err != nil {
-		//s.logSolve(err)
+		//s.traceInfo(err)
 		//return err
 		//}
 		if err := s.checkPackageImportsFromDepExist(a, dep); err != nil {
-			s.logSolve(err)
+			s.traceInfo(err)
 			return err
 		}
 
@@ -58,53 +69,6 @@
 	return nil
 }
 
-// checkPackages performs all constraint checks for new packages being added to
-// an already-selected project. It determines if selecting the packages would
-// result in a state where all solver requirements are still satisfied.
-func (s *solver) checkPackage(a atomWithPackages) error {
-	if nilpa == a.a {
-		// This shouldn't be able to happen, but if it does, it unequivocally
-		// indicates a logical bug somewhere, so blowing up is preferable
-		panic("canary - checking version of empty ProjectAtom")
-	}
-
-	// The base atom was already validated, so we can skip the
-	// checkAtomAllowable step.
-	deps, err := s.getImportsAndConstraintsOf(a)
-	if err != nil {
-		// An err here would be from the package fetcher; pass it straight back
-		// TODO(sdboyer) can we logSolve this?
-		return err
-	}
-
-	for _, dep := range deps {
-		if err := s.checkIdentMatches(a, dep); err != nil {
-			s.logSolve(err)
-			return err
-		}
-		if err := s.checkDepsConstraintsAllowable(a, dep); err != nil {
-			s.logSolve(err)
-			return err
-		}
-		if err := s.checkDepsDisallowsSelected(a, dep); err != nil {
-			s.logSolve(err)
-			return err
-		}
-		// TODO(sdboyer) decide how to refactor in order to re-enable this. Checking for
-		// revision existence is important...but kinda obnoxious.
-		//if err := s.checkRevisionExists(a, dep); err != nil {
-		//s.logSolve(err)
-		//return err
-		//}
-		if err := s.checkPackageImportsFromDepExist(a, dep); err != nil {
-			s.logSolve(err)
-			return err
-		}
-	}
-
-	return nil
-}
-
 // checkAtomAllowable ensures that an atom itself is acceptable with respect to
 // the constraints established by the current solution.
 func (s *solver) checkAtomAllowable(pa atom) error {
@@ -135,7 +99,7 @@
 // checkRequiredPackagesExist ensures that all required packages enumerated by
 // existing dependencies on this atom are actually present in the atom.
 func (s *solver) checkRequiredPackagesExist(a atomWithPackages) error {
-	ptree, err := s.b.listPackages(a.a.id, a.a.v)
+	ptree, err := s.b.ListPackages(a.a.id, a.a.v)
 	if err != nil {
 		// TODO(sdboyer) handle this more gracefully
 		return err
@@ -175,7 +139,7 @@
 // checkDepsConstraintsAllowable checks that the constraints of an atom on a
 // given dep are valid with respect to existing constraints.
 func (s *solver) checkDepsConstraintsAllowable(a atomWithPackages, cdep completeDep) error {
-	dep := cdep.ProjectConstraint
+	dep := cdep.workingConstraint
 	constraint := s.sel.getConstraint(dep.Ident)
 	// Ensure the constraint expressed by the dep has at least some possible
 	// intersection with the intersection of existing constraints.
@@ -208,7 +172,7 @@
 // dep are not incompatible with the version of that dep that's already been
 // selected.
 func (s *solver) checkDepsDisallowsSelected(a atomWithPackages, cdep completeDep) error {
-	dep := cdep.ProjectConstraint
+	dep := cdep.workingConstraint
 	selected, exists := s.sel.selected(dep.Ident)
 	if exists && !s.b.matches(dep.Ident, dep.Constraint, selected.a.v) {
 		s.fail(dep.Ident)
@@ -229,7 +193,7 @@
 // identifiers with the same local name, but that disagree about where their
 // network source is.
 func (s *solver) checkIdentMatches(a atomWithPackages, cdep completeDep) error {
-	dep := cdep.ProjectConstraint
+	dep := cdep.workingConstraint
 	if cur, exists := s.names[dep.Ident.ProjectRoot]; exists {
 		if cur != dep.Ident.netName() {
 			deps := s.sel.getDependenciesOn(a.a.id)
@@ -255,13 +219,13 @@
 // checkPackageImportsFromDepExist ensures that, if the dep is already selected,
 // the newly-required set of packages being placed on it exist and are valid.
 func (s *solver) checkPackageImportsFromDepExist(a atomWithPackages, cdep completeDep) error {
-	sel, is := s.sel.selected(cdep.ProjectConstraint.Ident)
+	sel, is := s.sel.selected(cdep.workingConstraint.Ident)
 	if !is {
 		// dep is not already selected; nothing to do
 		return nil
 	}
 
-	ptree, err := s.b.listPackages(sel.a.id, sel.a.v)
+	ptree, err := s.b.ListPackages(sel.a.id, sel.a.v)
 	if err != nil {
 		// TODO(sdboyer) handle this more gracefully
 		return err
@@ -279,14 +243,15 @@
 	for _, pkg := range cdep.pl {
 		perr, has := ptree.Packages[pkg]
 		if !has || perr.Err != nil {
-			e.pl = append(e.pl, pkg)
 			if has {
 				e.prob[pkg] = perr.Err
+			} else {
+				e.prob[pkg] = nil
 			}
 		}
 	}
 
-	if len(e.pl) > 0 {
+	if len(e.prob) > 0 {
 		return e
 	}
 	return nil
@@ -301,7 +266,7 @@
 		return nil
 	}
 
-	present, _ := s.b.revisionPresentIn(cdep.Ident, r)
+	present, _ := s.b.RevisionPresentIn(cdep.Ident, r)
 	if present {
 		return nil
 	}
diff --git a/vendor/github.com/sdboyer/gps/selection.go b/vendor/github.com/sdboyer/gps/selection.go
index 6d84643..9362fb0 100644
--- a/vendor/github.com/sdboyer/gps/selection.go
+++ b/vendor/github.com/sdboyer/gps/selection.go
@@ -22,10 +22,10 @@
 // pushSelection pushes a new atomWithPackages onto the selection stack, along
 // with an indicator as to whether this selection indicates a new project *and*
 // packages, or merely some new packages on a project that was already selected.
-func (s *selection) pushSelection(a atomWithPackages, first bool) {
+func (s *selection) pushSelection(a atomWithPackages, pkgonly bool) {
 	s.projects = append(s.projects, selected{
 		a:     a,
-		first: first,
+		first: !pkgonly,
 	})
 }
 
diff --git a/vendor/github.com/sdboyer/gps/solve_basic_test.go b/vendor/github.com/sdboyer/gps/solve_basic_test.go
index 055ecc8..6b6a092 100644
--- a/vendor/github.com/sdboyer/gps/solve_basic_test.go
+++ b/vendor/github.com/sdboyer/gps/solve_basic_test.go
@@ -81,6 +81,16 @@
 // should be provided in this case. It is an error (and will panic) to try to
 // pass a revision with an underlying revision.
 func mkAtom(info string) atom {
+	// if info is "root", special case it to use the root "version"
+	if info == "root" {
+		return atom{
+			id: ProjectIdentifier{
+				ProjectRoot: ProjectRoot("root"),
+			},
+			v: rootRev,
+		}
+	}
+
 	id, ver, rev := nvrSplit(info)
 
 	var v Version
@@ -113,7 +123,7 @@
 	}
 }
 
-// mkPDep splits the input string on a space, and uses the first two elements
+// mkPCstrnt splits the input string on a space, and uses the first two elements
 // as the project identifier and constraint body, respectively.
 //
 // The constraint body may have a leading character indicating the type of
@@ -124,7 +134,7 @@
 //  r: create a revision.
 //
 // If no leading character is used, a semver constraint is assumed.
-func mkPDep(info string) ProjectConstraint {
+func mkPCstrnt(info string) ProjectConstraint {
 	id, ver, rev := nvrSplit(info)
 
 	var c Constraint
@@ -164,6 +174,21 @@
 	}
 }
 
+// mkCDep composes a completeDep struct from the inputs.
+//
+// The only real work here is passing the initial string to mkPDep. All the
+// other args are taken as package names.
+func mkCDep(pdep string, pl ...string) completeDep {
+	pc := mkPCstrnt(pdep)
+	return completeDep{
+		workingConstraint: workingConstraint{
+			Ident:      pc.Ident,
+			Constraint: pc.Constraint,
+		},
+		pl: pl,
+	}
+}
+
 // A depspec is a fixture representing all the information a SourceManager would
 // ordinarily glean directly from interrogating a repository.
 type depspec struct {
@@ -204,12 +229,53 @@
 			sl = &ds.deps
 		}
 
-		*sl = append(*sl, mkPDep(dep))
+		*sl = append(*sl, mkPCstrnt(dep))
 	}
 
 	return ds
 }
 
+func mkDep(atom, pdep string, pl ...string) dependency {
+	return dependency{
+		depender: mkAtom(atom),
+		dep:      mkCDep(pdep, pl...),
+	}
+}
+
+func mkADep(atom, pdep string, c Constraint, pl ...string) dependency {
+	return dependency{
+		depender: mkAtom(atom),
+		dep: completeDep{
+			workingConstraint: workingConstraint{
+				Ident: ProjectIdentifier{
+					ProjectRoot: ProjectRoot(pdep),
+					NetworkName: pdep,
+				},
+				Constraint: c,
+			},
+			pl: pl,
+		},
+	}
+}
+
+// mkPI creates a ProjectIdentifier with the ProjectRoot as the provided
+// string, and with the NetworkName normalized to be the same.
+func mkPI(root string) ProjectIdentifier {
+	return ProjectIdentifier{
+		ProjectRoot: ProjectRoot(root),
+		NetworkName: root,
+	}
+}
+
+// mkSVC creates a new semver constraint, panicking if an error is returned.
+func mkSVC(body string) Constraint {
+	c, err := NewSemverConstraint(body)
+	if err != nil {
+		panic(fmt.Sprintf("Error while trying to create semver constraint from %s: %s", body, err.Error()))
+	}
+	return c
+}
+
 // mklock makes a fixLock, suitable to act as a lock file
 func mklock(pairs ...string) fixLock {
 	l := make(fixLock, 0)
@@ -287,10 +353,11 @@
 
 type specfix interface {
 	name() string
+	rootmanifest() RootManifest
 	specs() []depspec
 	maxTries() int
-	expectErrs() []string
 	solution() map[string]Version
+	failure() error
 }
 
 // A basicFixture is a declarative test fixture that can cover a wide variety of
@@ -320,8 +387,10 @@
 	downgrade bool
 	// lock file simulator, if one's to be used at all
 	l fixLock
-	// projects expected to have errors, if any
-	errp []string
+	// solve failure expected, if any
+	fail error
+	// overrides, if any
+	ovr ProjectConstraints
 	// request up/downgrade to all projects
 	changeall bool
 }
@@ -338,14 +407,22 @@
 	return f.maxAttempts
 }
 
-func (f basicFixture) expectErrs() []string {
-	return f.errp
-}
-
 func (f basicFixture) solution() map[string]Version {
 	return f.r
 }
 
+func (f basicFixture) rootmanifest() RootManifest {
+	return simpleRootManifest{
+		c:   f.ds[0].deps,
+		tc:  f.ds[0].devdeps,
+		ovr: f.ovr,
+	}
+}
+
+func (f basicFixture) failure() error {
+	return f.fail
+}
+
 // A table of basicFixtures, used in the basic solving test set.
 var basicFixtures = map[string]basicFixture{
 	// basic fixtures
@@ -448,8 +525,21 @@
 			mkDepspec("foo 1.0.0", "bar from baz 1.0.0"),
 			mkDepspec("bar 1.0.0"),
 		},
-		// TODO(sdboyer) ugh; do real error comparison instead of shitty abstraction
-		errp: []string{"foo", "foo", "root"},
+		fail: &noVersionError{
+			pn: mkPI("foo"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &sourceMismatchFailure{
+						shared:   ProjectRoot("bar"),
+						current:  "bar",
+						mismatch: "baz",
+						prob:     mkAtom("foo 1.0.0"),
+						sel:      []dependency{mkDep("root", "foo 1.0.0", "foo")},
+					},
+				},
+			},
+		},
 	},
 	// fixtures with locks
 	"with compatible locked dependency": {
@@ -679,7 +769,27 @@
 			mkDepspec("foo 2.0.0"),
 			mkDepspec("foo 2.1.3"),
 		},
-		errp: []string{"foo", "root"},
+		fail: &noVersionError{
+			pn: mkPI("foo"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("2.1.3"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("foo 2.1.3"),
+						failparent: []dependency{mkDep("root", "foo ^1.0.0", "foo")},
+						c:          mkSVC("^1.0.0"),
+					},
+				},
+				{
+					v: NewVersion("2.0.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("foo 2.0.0"),
+						failparent: []dependency{mkDep("root", "foo ^1.0.0", "foo")},
+						c:          mkSVC("^1.0.0"),
+					},
+				},
+			},
+		},
 	},
 	"no version that matches combined constraint": {
 		ds: []depspec{
@@ -689,7 +799,27 @@
 			mkDepspec("shared 2.5.0"),
 			mkDepspec("shared 3.5.0"),
 		},
-		errp: []string{"shared", "foo", "bar"},
+		fail: &noVersionError{
+			pn: mkPI("shared"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("3.5.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("shared 3.5.0"),
+						failparent: []dependency{mkDep("foo 1.0.0", "shared >=2.0.0, <3.0.0", "shared")},
+						c:          mkSVC(">=2.9.0, <3.0.0"),
+					},
+				},
+				{
+					v: NewVersion("2.5.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("shared 2.5.0"),
+						failparent: []dependency{mkDep("bar 1.0.0", "shared >=2.9.0, <4.0.0", "shared")},
+						c:          mkSVC(">=2.9.0, <3.0.0"),
+					},
+				},
+			},
+		},
 	},
 	"disjoint constraints": {
 		ds: []depspec{
@@ -699,8 +829,20 @@
 			mkDepspec("shared 2.0.0"),
 			mkDepspec("shared 4.0.0"),
 		},
-		//errp: []string{"shared", "foo", "bar"}, // dart's has this...
-		errp: []string{"foo", "bar"},
+		fail: &noVersionError{
+			pn: mkPI("foo"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &disjointConstraintFailure{
+						goal:      mkDep("foo 1.0.0", "shared <=2.0.0", "shared"),
+						failsib:   []dependency{mkDep("bar 1.0.0", "shared >3.0.0", "shared")},
+						nofailsib: nil,
+						c:         mkSVC(">3.0.0"),
+					},
+				},
+			},
+		},
 	},
 	"no valid solution": {
 		ds: []depspec{
@@ -710,8 +852,26 @@
 			mkDepspec("b 1.0.0", "a 2.0.0"),
 			mkDepspec("b 2.0.0", "a 1.0.0"),
 		},
-		errp:        []string{"b", "a"},
-		maxAttempts: 2,
+		fail: &noVersionError{
+			pn: mkPI("b"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("2.0.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("b 2.0.0"),
+						failparent: []dependency{mkDep("a 1.0.0", "b 1.0.0", "b")},
+						c:          mkSVC("1.0.0"),
+					},
+				},
+				{
+					v: NewVersion("1.0.0"),
+					f: &constraintNotAllowedFailure{
+						goal: mkDep("b 1.0.0", "a 2.0.0", "a"),
+						v:    NewVersion("1.0.0"),
+					},
+				},
+			},
+		},
 	},
 	"no version that matches while backtracking": {
 		ds: []depspec{
@@ -719,7 +879,19 @@
 			mkDepspec("a 1.0.0"),
 			mkDepspec("b 1.0.0"),
 		},
-		errp: []string{"b", "root"},
+		fail: &noVersionError{
+			pn: mkPI("b"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("b 1.0.0"),
+						failparent: []dependency{mkDep("root", "b >1.0.0", "b")},
+						c:          mkSVC(">1.0.0"),
+					},
+				},
+			},
+		},
 	},
 	// The latest versions of a and b disagree on c. An older version of either
 	// will resolve the problem. This test validates that b, which is farther
@@ -829,8 +1001,19 @@
 			mkDepspec("bar 3.0.0"),
 			mkDepspec("none 1.0.0"),
 		},
-		errp:        []string{"none", "foo"},
-		maxAttempts: 1,
+		fail: &noVersionError{
+			pn: mkPI("none"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("none 1.0.0"),
+						failparent: []dependency{mkDep("foo 1.0.0", "none 2.0.0", "none")},
+						c:          mkSVC("2.0.0"),
+					},
+				},
+			},
+		},
 	},
 	// If there"s a disjoint constraint on a package, then selecting other
 	// versions of it is a waste of time: no possible versions can match. We
@@ -866,6 +1049,59 @@
 			"foo r123abc",
 		),
 	},
+	// Some basic override checks
+	"override root's own constraint": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "b *"),
+			mkDepspec("a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 2.0.0", "b 1.0.0"),
+			mkDepspec("b 1.0.0"),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("a"): ProjectProperties{
+				Constraint: NewVersion("1.0.0"),
+			},
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+		),
+	},
+	"override dep's constraint": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *"),
+			mkDepspec("a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 2.0.0", "b 1.0.0"),
+			mkDepspec("b 1.0.0"),
+			mkDepspec("b 2.0.0"),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("b"): ProjectProperties{
+				Constraint: NewVersion("2.0.0"),
+			},
+		},
+		r: mksolution(
+			"a 2.0.0",
+			"b 2.0.0",
+		),
+	},
+	"overridden mismatched net addrs, alt in dep, back to default": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.0", "bar from baz 1.0.0"),
+			mkDepspec("bar 1.0.0"),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("bar"): ProjectProperties{
+				NetworkName: "bar",
+			},
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+		),
+	},
+
 	// TODO(sdboyer) decide how to refactor the solver in order to re-enable these.
 	// Checking for revision existence is important...but kinda obnoxious.
 	//{
@@ -966,37 +1202,43 @@
 	}
 }
 
-func (sm *depspecSourceManager) GetProjectInfo(n ProjectRoot, v Version) (Manifest, Lock, error) {
+func (sm *depspecSourceManager) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
 	for _, ds := range sm.specs {
-		if n == ds.n && v.Matches(ds.v) {
+		if id.ProjectRoot == ds.n && v.Matches(ds.v) {
 			return ds, dummyLock{}, nil
 		}
 	}
 
 	// TODO(sdboyer) proper solver-type errors
-	return nil, nil, fmt.Errorf("Project %s at version %s could not be found", n, v)
+	return nil, nil, fmt.Errorf("Project %s at version %s could not be found", id.errString(), v)
 }
 
-func (sm *depspecSourceManager) ExternalReach(n ProjectRoot, v Version) (map[string][]string, error) {
-	id := pident{n: n, v: v}
-	if m, exists := sm.rm[id]; exists {
+func (sm *depspecSourceManager) AnalyzerInfo() (string, *semver.Version) {
+	return "depspec-sm-builtin", sv("v1.0.0")
+}
+
+func (sm *depspecSourceManager) ExternalReach(id ProjectIdentifier, v Version) (map[string][]string, error) {
+	pid := pident{n: id.ProjectRoot, v: v}
+	if m, exists := sm.rm[pid]; exists {
 		return m, nil
 	}
-	return nil, fmt.Errorf("No reach data for %s at version %s", n, v)
+	return nil, fmt.Errorf("No reach data for %s at version %s", id.errString(), v)
 }
 
-func (sm *depspecSourceManager) ListExternal(n ProjectRoot, v Version) ([]string, error) {
+func (sm *depspecSourceManager) ListExternal(id ProjectIdentifier, v Version) ([]string, error) {
 	// This should only be called for the root
-	id := pident{n: n, v: v}
-	if r, exists := sm.rm[id]; exists {
-		return r[string(n)], nil
+	pid := pident{n: id.ProjectRoot, v: v}
+	if r, exists := sm.rm[pid]; exists {
+		return r[string(id.ProjectRoot)], nil
 	}
-	return nil, fmt.Errorf("No reach data for %s at version %s", n, v)
+	return nil, fmt.Errorf("No reach data for %s at version %s", id.errString(), v)
 }
 
-func (sm *depspecSourceManager) ListPackages(n ProjectRoot, v Version) (PackageTree, error) {
-	id := pident{n: n, v: v}
-	if r, exists := sm.rm[id]; exists {
+func (sm *depspecSourceManager) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+	pid := pident{n: id.ProjectRoot, v: v}
+	n := id.ProjectRoot
+
+	if r, exists := sm.rm[pid]; exists {
 		ptree := PackageTree{
 			ImportRoot: string(n),
 			Packages: map[string]PackageOrErr{
@@ -1015,35 +1257,35 @@
 	return PackageTree{}, fmt.Errorf("Project %s at version %s could not be found", n, v)
 }
 
-func (sm *depspecSourceManager) ListVersions(name ProjectRoot) (pi []Version, err error) {
+func (sm *depspecSourceManager) ListVersions(id ProjectIdentifier) (pi []Version, err error) {
 	for _, ds := range sm.specs {
 		// To simulate the behavior of the real SourceManager, we do not return
 		// revisions from ListVersions().
-		if _, isrev := ds.v.(Revision); !isrev && name == ds.n {
+		if _, isrev := ds.v.(Revision); !isrev && id.ProjectRoot == ds.n {
 			pi = append(pi, ds.v)
 		}
 	}
 
 	if len(pi) == 0 {
-		err = fmt.Errorf("Project %s could not be found", name)
+		err = fmt.Errorf("Project %s could not be found", id.errString())
 	}
 
 	return
 }
 
-func (sm *depspecSourceManager) RevisionPresentIn(name ProjectRoot, r Revision) (bool, error) {
+func (sm *depspecSourceManager) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
 	for _, ds := range sm.specs {
-		if name == ds.n && r == ds.v {
+		if id.ProjectRoot == ds.n && r == ds.v {
 			return true, nil
 		}
 	}
 
-	return false, fmt.Errorf("Project %s has no revision %s", name, r)
+	return false, fmt.Errorf("Project %s has no revision %s", id.errString(), r)
 }
 
-func (sm *depspecSourceManager) RepoExists(name ProjectRoot) (bool, error) {
+func (sm *depspecSourceManager) SourceExists(id ProjectIdentifier) (bool, error) {
 	for _, ds := range sm.specs {
-		if name == ds.n {
+		if id.ProjectRoot == ds.n {
 			return true, nil
 		}
 	}
@@ -1051,16 +1293,26 @@
 	return false, nil
 }
 
-func (sm *depspecSourceManager) VendorCodeExists(name ProjectRoot) (bool, error) {
+func (sm *depspecSourceManager) VendorCodeExists(id ProjectIdentifier) (bool, error) {
 	return false, nil
 }
 
 func (sm *depspecSourceManager) Release() {}
 
-func (sm *depspecSourceManager) ExportProject(n ProjectRoot, v Version, to string) error {
+func (sm *depspecSourceManager) ExportProject(id ProjectIdentifier, v Version, to string) error {
 	return fmt.Errorf("dummy sm doesn't support exporting")
 }
 
+func (sm *depspecSourceManager) DeduceProjectRoot(ip string) (ProjectRoot, error) {
+	for _, ds := range sm.allSpecs() {
+		n := string(ds.n)
+		if ip == n || strings.HasPrefix(ip, n+"/") {
+			return ProjectRoot(n), nil
+		}
+	}
+	return "", fmt.Errorf("Could not find %s, or any parent, in list of known fixtures", ip)
+}
+
 func (sm *depspecSourceManager) rootSpec() depspec {
 	return sm.specs[0]
 }
@@ -1084,7 +1336,7 @@
 	dsm := b.sm.(fixSM)
 	root := dsm.rootSpec()
 
-	ptree, err := dsm.ListPackages(root.n, nil)
+	ptree, err := dsm.ListPackages(mkPI(string(root.n)), nil)
 	if err != nil {
 		return nil, err
 	}
@@ -1102,23 +1354,8 @@
 	return nil
 }
 
-func (b *depspecBridge) listPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
-	return b.sm.(fixSM).ListPackages(b.key(id), v)
-}
-
-// override deduceRemoteRepo on bridge to make all our pkg/project mappings work
-// as expected
-func (b *depspecBridge) deduceRemoteRepo(path string) (*remoteRepo, error) {
-	for _, ds := range b.sm.(fixSM).allSpecs() {
-		n := string(ds.n)
-		if path == n || strings.HasPrefix(path, n+"/") {
-			return &remoteRepo{
-				Base:   n,
-				RelPkg: strings.TrimPrefix(path, n+"/"),
-			}, nil
-		}
-	}
-	return nil, fmt.Errorf("Could not find %s, or any parent, in list of known fixtures", path)
+func (b *depspecBridge) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+	return b.sm.(fixSM).ListPackages(id, v)
 }
 
 // enforce interfaces
diff --git a/vendor/github.com/sdboyer/gps/solve_bimodal_test.go b/vendor/github.com/sdboyer/gps/solve_bimodal_test.go
index 09333e0..f62619d 100644
--- a/vendor/github.com/sdboyer/gps/solve_bimodal_test.go
+++ b/vendor/github.com/sdboyer/gps/solve_bimodal_test.go
@@ -320,7 +320,25 @@
 				pkg("a"),
 			),
 		},
-		errp: []string{"a", "root", "a"},
+		fail: &noVersionError{
+			pn: mkPI("a"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &checkeeHasProblemPackagesFailure{
+						goal: mkAtom("a 1.0.0"),
+						failpkg: map[string]errDeppers{
+							"a/foo": errDeppers{
+								err: nil, // nil indicates package is missing
+								deppers: []atom{
+									mkAtom("root"),
+								},
+							},
+						},
+					},
+				},
+			},
+		},
 	},
 	// Transitive deps from one project (a) get incrementally included as other
 	// deps incorporate its various packages, and fail with proper error when we
@@ -345,7 +363,21 @@
 				pkg("d", "a/nonexistent"),
 			),
 		},
-		errp: []string{"d", "a", "d"},
+		fail: &noVersionError{
+			pn: mkPI("d"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &depHasProblemPackagesFailure{
+						goal: mkADep("d 1.0.0", "a", Any(), "a/nonexistent"),
+						v:    NewVersion("1.0.0"),
+						prob: map[string]error{
+							"a/nonexistent": nil,
+						},
+					},
+				},
+			},
+		},
 	},
 	// Check ignores on the root project
 	"ignore in double-subpkg": {
@@ -466,6 +498,66 @@
 			"b 2.0.0 barrev",
 		),
 	},
+	"override unconstrained root import": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a")),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a")),
+			dsp(mkDepspec("a 2.0.0"),
+				pkg("a")),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("a"): ProjectProperties{
+				Constraint: NewVersion("1.0.0"),
+			},
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	"overridden mismatched net addrs, alt in dep": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "foo")),
+			dsp(mkDepspec("foo 1.0.0", "bar from baz 1.0.0"),
+				pkg("foo", "bar")),
+			dsp(mkDepspec("bar 1.0.0"),
+				pkg("bar")),
+			dsp(mkDepspec("baz 1.0.0"),
+				pkg("bar")),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("bar"): ProjectProperties{
+				NetworkName: "baz",
+			},
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar from baz 1.0.0",
+		),
+	},
+	"overridden mismatched net addrs, alt in root": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "bar from baz 1.0.0"),
+				pkg("root", "foo")),
+			dsp(mkDepspec("foo 1.0.0"),
+				pkg("foo", "bar")),
+			dsp(mkDepspec("bar 1.0.0"),
+				pkg("bar")),
+			dsp(mkDepspec("baz 1.0.0"),
+				pkg("bar")),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("bar"): ProjectProperties{
+				NetworkName: "baz",
+			},
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar from baz 1.0.0",
+		),
+	},
 }
 
 // tpkg is a representation of a single package. It has its own import path, as
@@ -493,8 +585,10 @@
 	// map of locks for deps, if any. keys should be of the form:
 	// "<project> <version>"
 	lm map[string]fixLock
-	// projects expected to have errors, if any
-	errp []string
+	// solve failure expected, if any
+	fail error
+	// overrides, if any
+	ovr ProjectConstraints
 	// request up/downgrade to all projects
 	changeall bool
 	// pkgs to ignore
@@ -513,14 +607,28 @@
 	return f.maxAttempts
 }
 
-func (f bimodalFixture) expectErrs() []string {
-	return f.errp
-}
-
 func (f bimodalFixture) solution() map[string]Version {
 	return f.r
 }
 
+func (f bimodalFixture) rootmanifest() RootManifest {
+	m := simpleRootManifest{
+		c:   f.ds[0].deps,
+		tc:  f.ds[0].devdeps,
+		ovr: f.ovr,
+		ig:  make(map[string]bool),
+	}
+	for _, ig := range f.ignore {
+		m.ig[ig] = true
+	}
+
+	return m
+}
+
+func (f bimodalFixture) failure() error {
+	return f.fail
+}
+
 // bmSourceManager is an SM specifically for the bimodal fixtures. It composes
 // the general depspec SM, and differs from it in how it answers static analysis
 // calls, and its support for package ignores and dep lock data.
@@ -541,12 +649,12 @@
 	return sm
 }
 
-func (sm *bmSourceManager) ListPackages(n ProjectRoot, v Version) (PackageTree, error) {
+func (sm *bmSourceManager) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
 	for k, ds := range sm.specs {
 		// Cheat for root, otherwise we blow up b/c version is empty
-		if n == ds.n && (k == 0 || ds.v.Matches(v)) {
+		if id.ProjectRoot == ds.n && (k == 0 || ds.v.Matches(v)) {
 			ptree := PackageTree{
-				ImportRoot: string(n),
+				ImportRoot: string(id.ProjectRoot),
 				Packages:   make(map[string]PackageOrErr),
 			}
 			for _, pkg := range ds.pkgs {
@@ -563,13 +671,13 @@
 		}
 	}
 
-	return PackageTree{}, fmt.Errorf("Project %s at version %s could not be found", n, v)
+	return PackageTree{}, fmt.Errorf("Project %s at version %s could not be found", id.errString(), v)
 }
 
-func (sm *bmSourceManager) GetProjectInfo(n ProjectRoot, v Version) (Manifest, Lock, error) {
+func (sm *bmSourceManager) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
 	for _, ds := range sm.specs {
-		if n == ds.n && v.Matches(ds.v) {
-			if l, exists := sm.lm[string(n)+" "+v.String()]; exists {
+		if id.ProjectRoot == ds.n && v.Matches(ds.v) {
+			if l, exists := sm.lm[string(id.ProjectRoot)+" "+v.String()]; exists {
 				return ds, l, nil
 			}
 			return ds, dummyLock{}, nil
@@ -577,7 +685,7 @@
 	}
 
 	// TODO(sdboyer) proper solver-type errors
-	return nil, nil, fmt.Errorf("Project %s at version %s could not be found", n, v)
+	return nil, nil, fmt.Errorf("Project %s at version %s could not be found", id.errString(), v)
 }
 
 // computeBimodalExternalMap takes a set of depspecs and computes an
@@ -601,10 +709,6 @@
 		workmap := make(map[string]wm)
 
 		for _, pkg := range d.pkgs {
-			if !checkPrefixSlash(filepath.Clean(pkg.path), string(d.n)) {
-				panic(fmt.Sprintf("pkg %s is not a child of %s, cannot be a part of that project", pkg.path, d.n))
-			}
-
 			w := wm{
 				ex: make(map[string]bool),
 				in: make(map[string]bool),
diff --git a/vendor/github.com/sdboyer/gps/solve_failures.go b/vendor/github.com/sdboyer/gps/solve_failures.go
new file mode 100644
index 0000000..9c144e8
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/solve_failures.go
@@ -0,0 +1,492 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+type errorLevel uint8
+
+// TODO(sdboyer) consistent, sensible way of handling 'type' and 'severity' - or figure
+// out that they're not orthogonal and collapse into just 'type'
+
+const (
+	warning errorLevel = 1 << iota
+	mustResolve
+	cannotResolve
+)
+
+func a2vs(a atom) string {
+	if a.v == rootRev || a.v == nil {
+		return "(root)"
+	}
+
+	return fmt.Sprintf("%s@%s", a.id.errString(), a.v)
+}
+
+type traceError interface {
+	traceString() string
+}
+
+type noVersionError struct {
+	pn    ProjectIdentifier
+	fails []failedVersion
+}
+
+func (e *noVersionError) Error() string {
+	if len(e.fails) == 0 {
+		return fmt.Sprintf("No versions found for project %q.", e.pn.ProjectRoot)
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "No versions of %s met constraints:", e.pn.ProjectRoot)
+	for _, f := range e.fails {
+		fmt.Fprintf(&buf, "\n\t%s: %s", f.v, f.f.Error())
+	}
+
+	return buf.String()
+}
+
+func (e *noVersionError) traceString() string {
+	if len(e.fails) == 0 {
+		return fmt.Sprintf("No versions found")
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "No versions of %s met constraints:", e.pn.ProjectRoot)
+	for _, f := range e.fails {
+		if te, ok := f.f.(traceError); ok {
+			fmt.Fprintf(&buf, "\n  %s: %s", f.v, te.traceString())
+		} else {
+			fmt.Fprintf(&buf, "\n  %s: %s", f.v, f.f.Error())
+		}
+	}
+
+	return buf.String()
+}
+
+// disjointConstraintFailure occurs when attempting to introduce an atom that
+// itself has an acceptable version, but one of its dependency constraints is
+// disjoint with one or more dependency constraints already active for that
+// identifier.
+type disjointConstraintFailure struct {
+	// goal is the dependency with the problematic constraint, forcing us to
+	// reject the atom that introduces it.
+	goal dependency
+	// failsib is the list of active dependencies that are disjoint with the
+	// goal dependency. This will be at least one, but may not be all of the
+	// active dependencies.
+	failsib []dependency
+	// nofailsib is the list of active dependencies that are NOT disjoint with
+	// the goal dependency. The total of nofailsib and failsib will always be
+	// the total number of active dependencies on target identifier.
+	nofailsib []dependency
+	// c is the current constraint on the target identifier. It is intersection
+	// of all the active dependencies' constraints.
+	c Constraint
+}
+
+func (e *disjointConstraintFailure) Error() string {
+	if len(e.failsib) == 1 {
+		str := "Could not introduce %s, as it has a dependency on %s with constraint %s, which has no overlap with existing constraint %s from %s"
+		return fmt.Sprintf(str, a2vs(e.goal.depender), e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String(), e.failsib[0].dep.Constraint.String(), a2vs(e.failsib[0].depender))
+	}
+
+	var buf bytes.Buffer
+
+	var sibs []dependency
+	if len(e.failsib) > 1 {
+		sibs = e.failsib
+
+		str := "Could not introduce %s, as it has a dependency on %s with constraint %s, which has no overlap with the following existing constraints:\n"
+		fmt.Fprintf(&buf, str, a2vs(e.goal.depender), e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String())
+	} else {
+		sibs = e.nofailsib
+
+		str := "Could not introduce %s, as it has a dependency on %s with constraint %s, which does not overlap with the intersection of existing constraints from other currently selected packages:\n"
+		fmt.Fprintf(&buf, str, a2vs(e.goal.depender), e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String())
+	}
+
+	for _, c := range sibs {
+		fmt.Fprintf(&buf, "\t%s from %s\n", c.dep.Constraint.String(), a2vs(c.depender))
+	}
+
+	return buf.String()
+}
+
+func (e *disjointConstraintFailure) traceString() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "constraint %s on %s disjoint with other dependers:\n", e.goal.dep.Constraint.String(), e.goal.dep.Ident.errString())
+	for _, f := range e.failsib {
+		fmt.Fprintf(
+			&buf,
+			"%s from %s (no overlap)\n",
+			f.dep.Constraint.String(),
+			a2vs(f.depender),
+		)
+	}
+	for _, f := range e.nofailsib {
+		fmt.Fprintf(
+			&buf,
+			"%s from %s (some overlap)\n",
+			f.dep.Constraint.String(),
+			a2vs(f.depender),
+		)
+	}
+
+	return buf.String()
+}
+
+// Indicates that an atom could not be introduced because one of its dep
+// constraints does not admit the currently-selected version of the target
+// project.
+type constraintNotAllowedFailure struct {
+	// The dependency with the problematic constraint that could not be
+	// introduced.
+	goal dependency
+	// The (currently selected) version of the target project that was not
+	// admissible by the goal dependency.
+	v Version
+}
+
+func (e *constraintNotAllowedFailure) Error() string {
+	return fmt.Sprintf(
+		"Could not introduce %s, as it has a dependency on %s with constraint %s, which does not allow the currently selected version of %s",
+		a2vs(e.goal.depender),
+		e.goal.dep.Ident.errString(),
+		e.goal.dep.Constraint,
+		e.v,
+	)
+}
+
+func (e *constraintNotAllowedFailure) traceString() string {
+	return fmt.Sprintf(
+		"%s depends on %s with %s, but that's already selected at %s",
+		a2vs(e.goal.depender),
+		e.goal.dep.Ident.ProjectRoot,
+		e.goal.dep.Constraint,
+		e.v,
+	)
+}
+
+// versionNotAllowedFailure describes a failure where an atom is rejected
+// because its version is not allowed by current constraints.
+//
+// (This is one of the more straightforward types of failures)
+type versionNotAllowedFailure struct {
+	// goal is the atom that was rejected by current constraints.
+	goal atom
+	// failparent is the list of active dependencies that caused the atom to be
+	// rejected. Note that this only includes dependencies that actually
+	// rejected the atom, which will be at least one, but may not be all the
+	// active dependencies on the atom's identifier.
+	failparent []dependency
+	// c is the current constraint on the atom's identifier. This is the intersection
+	// of all active dependencies' constraints.
+	c Constraint
+}
+
+func (e *versionNotAllowedFailure) Error() string {
+	if len(e.failparent) == 1 {
+		return fmt.Sprintf(
+			"Could not introduce %s, as it is not allowed by constraint %s from project %s.",
+			a2vs(e.goal),
+			e.failparent[0].dep.Constraint.String(),
+			e.failparent[0].depender.id.errString(),
+		)
+	}
+
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, "Could not introduce %s, as it is not allowed by constraints from the following projects:\n", a2vs(e.goal))
+
+	for _, f := range e.failparent {
+		fmt.Fprintf(&buf, "\t%s from %s\n", f.dep.Constraint.String(), a2vs(f.depender))
+	}
+
+	return buf.String()
+}
+
+func (e *versionNotAllowedFailure) traceString() string {
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, "%s not allowed by constraint %s:\n", a2vs(e.goal), e.c.String())
+	for _, f := range e.failparent {
+		fmt.Fprintf(&buf, "  %s from %s\n", f.dep.Constraint.String(), a2vs(f.depender))
+	}
+
+	return buf.String()
+}
+
+type missingSourceFailure struct {
+	goal ProjectIdentifier
+	prob string
+}
+
+func (e *missingSourceFailure) Error() string {
+	return fmt.Sprintf(e.prob, e.goal)
+}
+
+type badOptsFailure string
+
+func (e badOptsFailure) Error() string {
+	return string(e)
+}
+
+type sourceMismatchFailure struct {
+	// The ProjectRoot over which there is disagreement about where it should be
+	// sourced from
+	shared ProjectRoot
+	// The current value for the network source
+	current string
+	// The mismatched value for the network source
+	mismatch string
+	// The currently selected dependencies which have agreed upon/established
+	// the given network source
+	sel []dependency
+	// The atom with the constraint that has the new, incompatible network source
+	prob atom
+}
+
+func (e *sourceMismatchFailure) Error() string {
+	var cur []string
+	for _, c := range e.sel {
+		cur = append(cur, string(c.depender.id.ProjectRoot))
+	}
+
+	str := "Could not introduce %s, as it depends on %s from %s, but %s is already marked as coming from %s by %s"
+	return fmt.Sprintf(str, a2vs(e.prob), e.shared, e.mismatch, e.shared, e.current, strings.Join(cur, ", "))
+}
+
+func (e *sourceMismatchFailure) traceString() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "disagreement on network addr for %s:\n", e.shared)
+
+	fmt.Fprintf(&buf, "  %s from %s\n", e.mismatch, e.prob.id.errString())
+	for _, dep := range e.sel {
+		fmt.Fprintf(&buf, "  %s from %s\n", e.current, dep.depender.id.errString())
+	}
+
+	return buf.String()
+}
+
+type errDeppers struct {
+	err     error
+	deppers []atom
+}
+
+// checkeeHasProblemPackagesFailure indicates that the goal atom was rejected
+// because one or more of the packages required by its deppers had errors.
+//
+// "errors" includes package nonexistence, which is indicated by a nil err in
+// the corresponding errDeppers failpkg map value.
+//
+// checkeeHasProblemPackagesFailure complements depHasProblemPackagesFailure;
+// one or the other could appear to describe the same fundamental issue,
+// depending on the order in which dependencies were visited.
+type checkeeHasProblemPackagesFailure struct {
+	// goal is the atom that was rejected due to problematic packages.
+	goal atom
+	// failpkg is a map of package names to the error describing the problem
+	// with them, plus a list of the selected atoms that require that package.
+	failpkg map[string]errDeppers
+}
+
+func (e *checkeeHasProblemPackagesFailure) Error() string {
+	var buf bytes.Buffer
+	indent := ""
+
+	if len(e.failpkg) > 1 {
+		indent = "\t"
+		fmt.Fprintf(
+			&buf, "Could not introduce %s due to multiple problematic subpackages:\n",
+			a2vs(e.goal),
+		)
+	}
+
+	for pkg, errdep := range e.failpkg {
+		var cause string
+		if errdep.err == nil {
+			cause = "is missing"
+		} else {
+			cause = fmt.Sprintf("does not contain usable Go code (%T).", errdep.err)
+		}
+
+		if len(e.failpkg) == 1 {
+			fmt.Fprintf(
+				&buf, "Could not introduce %s, as its subpackage %s %s.",
+				a2vs(e.goal),
+				pkg,
+				cause,
+			)
+		} else {
+			fmt.Fprintf(&buf, "\tSubpackage %s %s.", pkg, cause)
+		}
+
+		if len(errdep.deppers) == 1 {
+			fmt.Fprintf(
+				&buf, " (Package is required by %s.)",
+				a2vs(errdep.deppers[0]),
+			)
+		} else {
+			fmt.Fprintf(&buf, " Package is required by:")
+			for _, pa := range errdep.deppers {
+				fmt.Fprintf(&buf, "\n%s\t%s", indent, a2vs(pa))
+			}
+		}
+	}
+
+	return buf.String()
+}
+
+func (e *checkeeHasProblemPackagesFailure) traceString() string {
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, "%s at %s has problem subpkg(s):\n", e.goal.id.ProjectRoot, e.goal.v)
+	for pkg, errdep := range e.failpkg {
+		if errdep.err == nil {
+			fmt.Fprintf(&buf, "\t%s is missing; ", pkg)
+		} else {
+			fmt.Fprintf(&buf, "\t%s has err (%T); ", pkg, errdep.err)
+		}
+
+		if len(errdep.deppers) == 1 {
+			fmt.Fprintf(&buf, "required by %s.", a2vs(errdep.deppers[0]))
+		} else {
+			fmt.Fprintf(&buf, " required by:")
+			for _, pa := range errdep.deppers {
+				fmt.Fprintf(&buf, "\n\t\t%s at %s", pa.id.errString(), pa.v)
+			}
+		}
+	}
+
+	return buf.String()
+}
+
+// depHasProblemPackagesFailure indicates that the goal dependency was rejected
+// because there were problems with one or more of the packages the dependency
+// requires in the atom currently selected for that dependency. (This failure
+// can only occur if the target dependency is already selected.)
+//
+// "errors" includes package nonexistence, which is indicated by a nil err as
+// the corresponding prob map value.
+//
+// depHasProblemPackagesFailure complements checkeeHasProblemPackagesFailure;
+// one or the other could appear to describe the same fundamental issue,
+// depending on the order in which dependencies were visited.
+type depHasProblemPackagesFailure struct {
+	// goal is the dependency that was rejected due to the atom currently
+	// selected for the dependency's target id having errors (including, and
+	// probably most commonly,
+	// nonexistence) in one or more packages named by the dependency.
+	goal dependency
+	// v is the version of the currently selected atom targeted by the goal
+	// dependency.
+	v Version
+	// prob is a map of problem packages to their specific error. It does not
+	// include missing packages.
+	prob map[string]error
+}
+
+func (e *depHasProblemPackagesFailure) Error() string {
+	fcause := func(pkg string) string {
+		if err := e.prob[pkg]; err != nil {
+			return fmt.Sprintf("does not contain usable Go code (%T).", err)
+		}
+		return "is missing."
+	}
+
+	if len(e.prob) == 1 {
+		var pkg string
+		for pkg = range e.prob {
+		}
+
+		return fmt.Sprintf(
+			"Could not introduce %s, as it requires package %s from %s, but in version %s that package %s",
+			a2vs(e.goal.depender),
+			pkg,
+			e.goal.dep.Ident.errString(),
+			e.v,
+			fcause(pkg),
+		)
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(
+		&buf, "Could not introduce %s, as it requires problematic packages from %s (current version %s):",
+		a2vs(e.goal.depender),
+		e.goal.dep.Ident.errString(),
+		e.v,
+	)
+
+	pkgs := make([]string, len(e.prob))
+	k := 0
+	for pkg := range e.prob {
+		pkgs[k] = pkg
+		k++
+	}
+	sort.Strings(pkgs)
+	for _, pkg := range pkgs {
+		fmt.Fprintf(&buf, "\t%s %s", pkg, fcause(pkg))
+	}
+
+	return buf.String()
+}
+
+func (e *depHasProblemPackagesFailure) traceString() string {
+	var buf bytes.Buffer
+	fcause := func(pkg string) string {
+		if err := e.prob[pkg]; err != nil {
+			return fmt.Sprintf("has parsing err (%T).", err)
+		}
+		return "is missing"
+	}
+
+	fmt.Fprintf(
+		&buf, "%s depping on %s at %s has problem subpkg(s):",
+		a2vs(e.goal.depender),
+		e.goal.dep.Ident.errString(),
+		e.v,
+	)
+
+	pkgs := make([]string, len(e.prob))
+	k := 0
+	for pkg := range e.prob {
+		pkgs[k] = pkg
+		k++
+	}
+	sort.Strings(pkgs)
+	for _, pkg := range pkgs {
+		fmt.Fprintf(&buf, "\t%s %s", pkg, fcause(pkg))
+	}
+
+	return buf.String()
+}
+
+// nonexistentRevisionFailure indicates that a revision constraint was specified
+// for a given project, but that that revision does not exist in the source
+// repository.
+type nonexistentRevisionFailure struct {
+	goal dependency
+	r    Revision
+}
+
+func (e *nonexistentRevisionFailure) Error() string {
+	return fmt.Sprintf(
+		"Could not introduce %s, as it requires %s at revision %s, but that revision does not exist",
+		a2vs(e.goal.depender),
+		e.goal.dep.Ident.errString(),
+		e.r,
+	)
+}
+
+func (e *nonexistentRevisionFailure) traceString() string {
+	return fmt.Sprintf(
+		"%s wants missing rev %s of %s",
+		a2vs(e.goal.depender),
+		e.r,
+		e.goal.dep.Ident.errString(),
+	)
+}
diff --git a/vendor/github.com/sdboyer/gps/solve_test.go b/vendor/github.com/sdboyer/gps/solve_test.go
index 95db023..94ed8ba 100644
--- a/vendor/github.com/sdboyer/gps/solve_test.go
+++ b/vendor/github.com/sdboyer/gps/solve_test.go
@@ -2,7 +2,6 @@
 
 import (
 	"flag"
-	"fmt"
 	"io/ioutil"
 	"log"
 	"math/rand"
@@ -31,7 +30,7 @@
 			&bridge{
 				sm:     sm,
 				s:      s,
-				vlists: make(map[ProjectRoot][]Version),
+				vlists: make(map[ProjectIdentifier][]Version),
 			},
 		}
 	}
@@ -88,7 +87,7 @@
 	params := SolveParameters{
 		RootDir:    string(fix.ds[0].n),
 		ImportRoot: ProjectRoot(fix.ds[0].n),
-		Manifest:   fix.ds[0],
+		Manifest:   fix.rootmanifest(),
 		Lock:       dummyLock{},
 		Downgrade:  fix.downgrade,
 		ChangeAll:  fix.changeall,
@@ -138,9 +137,8 @@
 	params := SolveParameters{
 		RootDir:    string(fix.ds[0].n),
 		ImportRoot: ProjectRoot(fix.ds[0].n),
-		Manifest:   fix.ds[0],
+		Manifest:   fix.rootmanifest(),
 		Lock:       dummyLock{},
-		Ignore:     fix.ignore,
 		Downgrade:  fix.downgrade,
 		ChangeAll:  fix.changeall,
 	}
@@ -154,62 +152,20 @@
 	return fixtureSolveSimpleChecks(fix, res, err, t)
 }
 
-func fixtureSolveSimpleChecks(fix specfix, res Solution, err error, t *testing.T) (Solution, error) {
+func fixtureSolveSimpleChecks(fix specfix, soln Solution, err error, t *testing.T) (Solution, error) {
+	fixfail := fix.failure()
 	if err != nil {
-		errp := fix.expectErrs()
-		if len(errp) == 0 {
-			t.Errorf("(fixture: %q) Solver failed; error was type %T, text:\n%s", fix.name(), err, err)
-			return res, err
+		if fixfail == nil {
+			t.Errorf("(fixture: %q) Solve failed unexpectedly:\n%s", fix.name(), err)
+		} else if !reflect.DeepEqual(fixfail, err) {
+			// TODO(sdboyer) reflect.DeepEqual works for now, but once we start
+			// modeling more complex cases, this should probably become more robust
+			t.Errorf("(fixture: %q) Failure mismatch:\n\t(GOT): %s\n\t(WNT): %s", fix.name(), err, fixfail)
 		}
-
-		switch fail := err.(type) {
-		case *badOptsFailure:
-			t.Errorf("(fixture: %q) Unexpected bad opts failure solve error: %s", fix.name(), err)
-		case *noVersionError:
-			if errp[0] != string(fail.pn.ProjectRoot) { // TODO(sdboyer) identifierify
-				t.Errorf("(fixture: %q) Expected failure on project %s, but was on project %s", fix.name(), errp[0], fail.pn.ProjectRoot)
-			}
-
-			ep := make(map[string]struct{})
-			for _, p := range errp[1:] {
-				ep[p] = struct{}{}
-			}
-
-			found := make(map[string]struct{})
-			for _, vf := range fail.fails {
-				for _, f := range getFailureCausingProjects(vf.f) {
-					found[f] = struct{}{}
-				}
-			}
-
-			var missing []string
-			var extra []string
-			for p := range found {
-				if _, has := ep[p]; !has {
-					extra = append(extra, p)
-				}
-			}
-			if len(extra) > 0 {
-				t.Errorf("(fixture: %q) Expected solve failures due to projects %s, but solve failures also arose from %s", fix.name(), strings.Join(errp[1:], ", "), strings.Join(extra, ", "))
-			}
-
-			for p := range ep {
-				if _, has := found[p]; !has {
-					missing = append(missing, p)
-				}
-			}
-			if len(missing) > 0 {
-				t.Errorf("(fixture: %q) Expected solve failures due to projects %s, but %s had no failures", fix.name(), strings.Join(errp[1:], ", "), strings.Join(missing, ", "))
-			}
-
-		default:
-			// TODO(sdboyer) round these out
-			panic(fmt.Sprintf("unhandled solve failure type: %s", err))
-		}
-	} else if len(fix.expectErrs()) > 0 {
-		t.Errorf("(fixture: %q) Solver succeeded, but expected failure", fix.name())
+	} else if fixfail != nil {
+		t.Errorf("(fixture: %q) Solver succeeded, but expecting failure:\n%s", fix.name(), fixfail)
 	} else {
-		r := res.(solution)
+		r := soln.(solution)
 		if fix.maxTries() > 0 && r.Attempts() > fix.maxTries() {
 			t.Errorf("(fixture: %q) Solver completed in %v attempts, but expected %v or fewer", fix.name(), r.att, fix.maxTries())
 		}
@@ -251,7 +207,7 @@
 		}
 	}
 
-	return res, err
+	return soln, err
 }
 
 // This tests that, when a root lock is underspecified (has only a version) we
@@ -293,7 +249,7 @@
 	params := SolveParameters{
 		RootDir:    string(fix.ds[0].n),
 		ImportRoot: ProjectRoot(fix.ds[0].n),
-		Manifest:   fix.ds[0],
+		Manifest:   fix.rootmanifest(),
 		Lock:       l2,
 	}
 
@@ -302,44 +258,6 @@
 	fixtureSolveSimpleChecks(fix, res, err, t)
 }
 
-func getFailureCausingProjects(err error) (projs []string) {
-	switch e := err.(type) {
-	case *noVersionError:
-		projs = append(projs, string(e.pn.ProjectRoot)) // TODO(sdboyer) identifierify
-	case *disjointConstraintFailure:
-		for _, f := range e.failsib {
-			projs = append(projs, string(f.depender.id.ProjectRoot))
-		}
-	case *versionNotAllowedFailure:
-		for _, f := range e.failparent {
-			projs = append(projs, string(f.depender.id.ProjectRoot))
-		}
-	case *constraintNotAllowedFailure:
-		// No sane way of knowing why the currently selected version is
-		// selected, so do nothing
-	case *sourceMismatchFailure:
-		projs = append(projs, string(e.prob.id.ProjectRoot))
-		for _, c := range e.sel {
-			projs = append(projs, string(c.depender.id.ProjectRoot))
-		}
-	case *checkeeHasProblemPackagesFailure:
-		projs = append(projs, string(e.goal.id.ProjectRoot))
-		for _, errdep := range e.failpkg {
-			for _, atom := range errdep.deppers {
-				projs = append(projs, string(atom.id.ProjectRoot))
-			}
-		}
-	case *depHasProblemPackagesFailure:
-		projs = append(projs, string(e.goal.depender.id.ProjectRoot), string(e.goal.dep.Ident.ProjectRoot))
-	case *nonexistentRevisionFailure:
-		projs = append(projs, string(e.goal.depender.id.ProjectRoot), string(e.goal.dep.Ident.ProjectRoot))
-	default:
-		panic(fmt.Sprintf("unknown failtype %T, msg: %s", err, err))
-	}
-
-	return
-}
-
 func TestBadSolveOpts(t *testing.T) {
 	pn := strconv.FormatInt(rand.Int63(), 36)
 	fix := basicFixtures["no dependencies"]
@@ -378,8 +296,21 @@
 	} else if !strings.Contains(err.Error(), "no logger provided") {
 		t.Error("Prepare should have given error on missing trace logger, but gave:", err)
 	}
-
 	params.TraceLogger = log.New(ioutil.Discard, "", 0)
+
+	params.Manifest = simpleRootManifest{
+		ovr: ProjectConstraints{
+			ProjectRoot("foo"): ProjectProperties{},
+		},
+	}
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Should have errored on override with empty ProjectProperties")
+	} else if !strings.Contains(err.Error(), "foo, but without any non-zero properties") {
+		t.Error("Prepare should have given error override with empty ProjectProperties, but gave:", err)
+	}
+	params.Manifest = nil
+
 	_, err = Prepare(params, sm)
 	if err != nil {
 		t.Error("Basic conditions satisfied, prepare should have completed successfully, err as:", err)
@@ -391,7 +322,7 @@
 		return &bridge{
 			sm:     sm,
 			s:      s,
-			vlists: make(map[ProjectRoot][]Version),
+			vlists: make(map[ProjectIdentifier][]Version),
 		}
 	}
 
@@ -414,27 +345,3 @@
 	// swap them back...not sure if this matters, but just in case
 	overrideMkBridge()
 }
-
-func TestIgnoreDedupe(t *testing.T) {
-	fix := basicFixtures["no dependencies"]
-
-	ig := []string{"foo", "foo", "bar"}
-	params := SolveParameters{
-		RootDir:    string(fix.ds[0].n),
-		ImportRoot: ProjectRoot(fix.ds[0].n),
-		Manifest:   fix.ds[0],
-		Ignore:     ig,
-	}
-
-	s, _ := Prepare(params, newdepspecSM(basicFixtures["no dependencies"].ds, nil))
-	ts := s.(*solver)
-
-	expect := map[string]bool{
-		"foo": true,
-		"bar": true,
-	}
-
-	if !reflect.DeepEqual(ts.ig, expect) {
-		t.Errorf("Expected solver's ignore list to be deduplicated map, got %v", ts.ig)
-	}
-}
diff --git a/vendor/github.com/sdboyer/gps/solver.go b/vendor/github.com/sdboyer/gps/solver.go
index 121bc81..d82a40c 100644
--- a/vendor/github.com/sdboyer/gps/solver.go
+++ b/vendor/github.com/sdboyer/gps/solver.go
@@ -11,6 +11,8 @@
 	"github.com/armon/go-radix"
 )
 
+var rootRev = Revision("")
+
 // SolveParameters hold all arguments to a solver run.
 //
 // Only RootDir and ImportRoot are absolutely required. A nil Manifest is
@@ -40,11 +42,12 @@
 	// A non-empty string is required.
 	ImportRoot ProjectRoot
 
-	// The root manifest. This contains all the dependencies, constraints, and
-	// other controls available to the root project.
+	// The root manifest. This contains all the dependency constraints
+	// associated with normal Manifests, as well as the particular controls
+	// afforded only to the root project.
 	//
 	// May be nil, but for most cases, that would be unwise.
-	Manifest Manifest
+	Manifest RootManifest
 
 	// The root lock. Optional. Generally, this lock is the output of a previous
 	// solve run.
@@ -53,11 +56,6 @@
 	// in the lock, unless ToChange or ChangeAll settings indicate otherwise.
 	Lock Lock
 
-	// A list of packages (import paths) to ignore. These can be in the root
-	// project, or from elsewhere. Ignoring a package means that both it and its
-	// imports will be disregarded by all relevant solver operations.
-	Ignore []string
-
 	// ToChange is a list of project names that should be changed - that is, any
 	// versions specified for those projects in the root lock file should be
 	// ignored.
@@ -88,8 +86,8 @@
 	TraceLogger *log.Logger
 }
 
-// solver is a CDCL-style SAT solver with satisfiability conditions hardcoded to
-// the needs of the Go package management problem space.
+// solver is a CDCL-style constraint solver with satisfiability conditions
+// hardcoded to the needs of the Go package management problem space.
 type solver struct {
 	// The current number of attempts made over the course of this solve. This
 	// number increments each time the algorithm completes a backtrack and
@@ -149,8 +147,13 @@
 
 	// A map of the ProjectRoot (local names) that are currently selected, and
 	// the network name to which they currently correspond.
+	// TODO(sdboyer) i think this is cruft and can be removed
 	names map[ProjectRoot]string
 
+	// A ProjectConstraints map containing the validated (guaranteed non-empty)
+	// overrides declared by the root manifest.
+	ovr ProjectConstraints
+
 	// A map of the names listed in the root's lock.
 	rlm map[ProjectIdentifier]LockedProject
 
@@ -162,9 +165,19 @@
 }
 
 // A Solver is the main workhorse of gps: given a set of project inputs, it
-// performs a constraint solving analysis to develop a complete Result that can
-// be used as a lock file, and to populate a vendor directory.
+// performs a constraint solving analysis to develop a complete Solution, or
+// else fail with an informative error.
+//
+// If a Solution is found, an implementing tool may persist it - typically into
+// what a "lock file" - and/or use it to write out a directory tree of
+// dependencies, suitable to be a vendor directory, via CreateVendorTree.
 type Solver interface {
+	// HashInputs produces a hash digest representing the unique inputs to this
+	// solver. It is guaranteed that, if the hash digest is equal to the digest
+	// from a previous Solution.InputHash(), that that Solution is valid for
+	// this Solver's inputs.
+	//
+	// In such a case, it may not be necessary to run Solve() at all.
 	HashInputs() ([]byte, error)
 	Solve() (Solution, error)
 }
@@ -175,9 +188,6 @@
 // with the inputs is detected, an error is returned. Otherwise, a Solver is
 // returned, ready to hash and check inputs or perform a solving run.
 func Prepare(params SolveParameters, sm SourceManager) (Solver, error) {
-	// local overrides would need to be handled first.
-	// TODO(sdboyer) local overrides! heh
-
 	if sm == nil {
 		return nil, badOptsFailure("must provide non-nil SourceManager")
 	}
@@ -192,23 +202,43 @@
 	}
 
 	if params.Manifest == nil {
-		params.Manifest = SimpleManifest{}
-	}
-
-	// Ensure the ignore map is at least initialized
-	ig := make(map[string]bool)
-	if len(params.Ignore) > 0 {
-		for _, pkg := range params.Ignore {
-			ig[pkg] = true
-		}
+		params.Manifest = simpleRootManifest{}
 	}
 
 	s := &solver{
 		params: params,
-		ig:     ig,
+		ig:     params.Manifest.IgnorePackages(),
+		ovr:    params.Manifest.Overrides(),
 		tl:     params.TraceLogger,
 	}
 
+	// Ensure the ignore and overrides maps are at least initialized
+	if s.ig == nil {
+		s.ig = make(map[string]bool)
+	}
+	if s.ovr == nil {
+		s.ovr = make(ProjectConstraints)
+	}
+
+	// Validate no empties in the overrides map
+	var eovr []string
+	for pr, pp := range s.ovr {
+		if pp.Constraint == nil && pp.NetworkName == "" {
+			eovr = append(eovr, string(pr))
+		}
+	}
+
+	if eovr != nil {
+		// Maybe it's a little nitpicky to do this (we COULD proceed; empty
+		// overrides have no effect), but this errs on the side of letting the
+		// tool/user know there's bad input. Purely as a principle, that seems
+		// preferable to silently allowing progress with icky input.
+		if len(eovr) > 1 {
+			return nil, badOptsFailure(fmt.Sprintf("Overrides lacked any non-zero properties for multiple project roots: %s", strings.Join(eovr, " ")))
+		}
+		return nil, badOptsFailure(fmt.Sprintf("An override was declared for %s, but without any non-zero properties", eovr[0]))
+	}
+
 	// Set up the bridge and ensure the root dir is in good, working order
 	// before doing anything else. (This call is stubbed out in tests, via
 	// overriding mkBridge(), so we can run with virtual RootDir.)
@@ -260,36 +290,33 @@
 	// Prime the queues with the root project
 	err := s.selectRoot()
 	if err != nil {
-		// TODO(sdboyer) this properly with errs, yar
 		return nil, err
 	}
 
-	// Log initial step
-	s.logSolve()
 	all, err := s.solve()
 
-	// Solver finished with an err; return that and we're done
-	if err != nil {
-		return nil, err
+	var soln solution
+	if err == nil {
+		soln = solution{
+			att: s.attempts,
+		}
+
+		// An err here is impossible; it could only be caused by a parsing error
+		// of the root tree, but that necessarily succeeded back up
+		// selectRoot(), so we can ignore this err
+		soln.hd, _ = s.HashInputs()
+
+		// Convert ProjectAtoms into LockedProjects
+		soln.p = make([]LockedProject, len(all))
+		k := 0
+		for pa, pl := range all {
+			soln.p[k] = pa2lp(pa, pl)
+			k++
+		}
 	}
 
-	r := solution{
-		att: s.attempts,
-	}
-
-	// An err here is impossible at this point; we already know the root tree is
-	// fine
-	r.hd, _ = s.HashInputs()
-
-	// Convert ProjectAtoms into LockedProjects
-	r.p = make([]LockedProject, len(all))
-	k := 0
-	for pa, pl := range all {
-		r.p[k] = pa2lp(pa, pl)
-		k++
-	}
-
-	return r, nil
+	s.traceFinish(soln, err)
+	return soln, err
 }
 
 // solve is the top-level loop for the SAT solving process.
@@ -313,10 +340,11 @@
 		if awp, is := s.sel.selected(bmi.id); !is {
 			// Analysis path for when we haven't selected the project yet - need
 			// to create a version queue.
-			s.logStart(bmi)
 			queue, err := s.createVersionQueue(bmi)
 			if err != nil {
 				// Err means a failure somewhere down the line; try backtracking.
+				s.traceStartBacktrack(bmi, err, false)
+				//s.traceBacktrack(bmi, false)
 				if s.backtrack() {
 					// backtracking succeeded, move to the next unselected id
 					continue
@@ -328,15 +356,15 @@
 				panic("canary - queue is empty, but flow indicates success")
 			}
 
-			s.selectAtomWithPackages(atomWithPackages{
+			awp := atomWithPackages{
 				a: atom{
 					id: queue.id,
 					v:  queue.current(),
 				},
 				pl: bmi.pl,
-			})
+			}
+			s.selectAtom(awp, false)
 			s.vqs = append(s.vqs, queue)
-			s.logSolve()
 		} else {
 			// We're just trying to add packages to an already-selected project.
 			// That means it's not OK to burn through the version queue for that
@@ -357,21 +385,21 @@
 				pl: bmi.pl,
 			}
 
-			s.logStart(bmi) // TODO(sdboyer) different special start logger for this path
-			err := s.checkPackage(nawp)
+			s.traceCheckPkgs(bmi)
+			err := s.check(nawp, true)
 			if err != nil {
 				// Err means a failure somewhere down the line; try backtracking.
+				s.traceStartBacktrack(bmi, err, true)
 				if s.backtrack() {
 					// backtracking succeeded, move to the next unselected id
 					continue
 				}
 				return nil, err
 			}
-			s.selectPackages(nawp)
+			s.selectAtom(nawp, true)
 			// We don't add anything to the stack of version queues because the
 			// backtracker knows not to pop the vqstack if it backtracks
 			// across a pure-package addition.
-			s.logSolve()
 		}
 	}
 
@@ -406,10 +434,10 @@
 		// It's sort of OK because the root never makes it out into the results.
 		// We may need a more elegant solution if we discover other side
 		// effects, though.
-		v: Revision(""),
+		v: rootRev,
 	}
 
-	ptree, err := s.b.listPackages(pa.id, nil)
+	ptree, err := s.b.ListPackages(pa.id, nil)
 	if err != nil {
 		return err
 	}
@@ -432,7 +460,8 @@
 
 	// If we're looking for root's deps, get it from opts and local root
 	// analysis, rather than having the sm do it
-	mdeps := append(s.rm.DependencyConstraints(), s.rm.TestDependencyConstraints()...)
+	c, tc := s.rm.DependencyConstraints(), s.rm.TestDependencyConstraints()
+	mdeps := s.ovr.overrideAll(pcSliceToMap(c, tc).asSortedSlice())
 
 	// Err is not possible at this point, as it could only come from
 	// listPackages(), which if we're here already succeeded for root
@@ -451,6 +480,7 @@
 		heap.Push(s.unsel, bimodalIdentifier{id: dep.Ident, pl: dep.pl, fromRoot: true})
 	}
 
+	s.traceSelectRoot(ptree, deps)
 	return nil
 }
 
@@ -463,12 +493,12 @@
 
 	// Work through the source manager to get project info and static analysis
 	// information.
-	m, _, err := s.b.getProjectInfo(a.a)
+	m, _, err := s.b.GetManifestAndLock(a.a.id, a.a.v)
 	if err != nil {
 		return nil, err
 	}
 
-	ptree, err := s.b.listPackages(a.a.id, a.a.v)
+	ptree, err := s.b.ListPackages(a.a.id, a.a.v)
 	if err != nil {
 		return nil, err
 	}
@@ -505,8 +535,7 @@
 		k++
 	}
 
-	deps := m.DependencyConstraints()
-	// TODO(sdboyer) add overrides here...if we impl the concept (which we should)
+	deps := s.ovr.overrideAll(m.DependencyConstraints())
 
 	return s.intersectConstraintsWithImports(deps, reach)
 }
@@ -515,7 +544,7 @@
 // externally reached packages, and creates a []completeDep that is guaranteed
 // to include all packages named by import reach, using constraints where they
 // are available, or Any() where they are not.
-func (s *solver) intersectConstraintsWithImports(deps []ProjectConstraint, reach []string) ([]completeDep, error) {
+func (s *solver) intersectConstraintsWithImports(deps []workingConstraint, reach []string) ([]completeDep, error) {
 	// Create a radix tree with all the projects we know from the manifest
 	// TODO(sdboyer) make this smarter once we allow non-root inputs as 'projects'
 	xt := radix.New()
@@ -543,8 +572,8 @@
 			// github.com/sdboyer/foo
 			// github.com/sdboyer/foobar/baz
 			//
-			// The latter would incorrectly be conflated in with the former. So,
-			// as we know we're operating on strings that describe paths, guard
+			// The latter would incorrectly be conflated with the former. So, as
+			// we know we're operating on strings that describe paths, guard
 			// against this case by verifying that either the input is the same
 			// length as the match (in which case we know they're equal), or
 			// that the next character is the is the PathSeparator.
@@ -552,13 +581,13 @@
 				// Match is valid; put it in the dmap, either creating a new
 				// completeDep or appending it to the existing one for this base
 				// project/prefix.
-				dep := idep.(ProjectConstraint)
+				dep := idep.(workingConstraint)
 				if cdep, exists := dmap[dep.Ident.ProjectRoot]; exists {
 					cdep.pl = append(cdep.pl, rp)
 					dmap[dep.Ident.ProjectRoot] = cdep
 				} else {
 					dmap[dep.Ident.ProjectRoot] = completeDep{
-						ProjectConstraint: dep,
+						workingConstraint: dep,
 						pl:                []string{rp},
 					}
 				}
@@ -567,27 +596,27 @@
 		}
 
 		// No match. Let the SourceManager try to figure out the root
-		root, err := s.b.deduceRemoteRepo(rp)
+		root, err := s.b.DeduceProjectRoot(rp)
 		if err != nil {
 			// Nothing we can do if we can't suss out a root
 			return nil, err
 		}
 
-		// Still no matches; make a new completeDep with an open constraint
-		pd := ProjectConstraint{
+		// Make a new completeDep with an open constraint, respecting overrides
+		pd := s.ovr.override(ProjectConstraint{
 			Ident: ProjectIdentifier{
-				ProjectRoot: ProjectRoot(root.Base),
-				NetworkName: root.Base,
+				ProjectRoot: root,
+				NetworkName: string(root),
 			},
 			Constraint: Any(),
-		}
+		})
 
 		// Insert the pd into the trie so that further deps from this
 		// project get caught by the prefix search
-		xt.Insert(root.Base, pd)
+		xt.Insert(string(root), pd)
 		// And also put the complete dep into the dmap
-		dmap[ProjectRoot(root.Base)] = completeDep{
-			ProjectConstraint: pd,
+		dmap[root] = completeDep{
+			workingConstraint: pd,
 			pl:                []string{rp},
 		}
 	}
@@ -610,7 +639,7 @@
 		return newVersionQueue(id, nil, nil, s.b)
 	}
 
-	exists, err := s.b.repoExists(id)
+	exists, err := s.b.SourceExists(id)
 	if err != nil {
 		return nil, err
 	}
@@ -623,7 +652,7 @@
 			// Project exists only in vendor (and in some manifest somewhere)
 			// TODO(sdboyer) mark this for special handling, somehow?
 		} else {
-			return nil, newSolveError(fmt.Sprintf("Project '%s' could not be located.", id), cannotResolve)
+			return nil, fmt.Errorf("Project '%s' could not be located.", id)
 		}
 	}
 
@@ -650,7 +679,7 @@
 				continue
 			}
 
-			_, l, err := s.b.getProjectInfo(dep.depender)
+			_, l, err := s.b.GetManifestAndLock(dep.depender.id, dep.depender.v)
 			if err != nil || l == nil {
 				// err being non-nil really shouldn't be possible, but the lock
 				// being nil is quite likely
@@ -715,6 +744,7 @@
 	}
 
 	// Having assembled the queue, search it for a valid version.
+	s.traceCheckQueue(q, bmi, false, 1)
 	return q, s.findValidVersion(q, bmi.pl)
 }
 
@@ -735,13 +765,14 @@
 
 	for {
 		cur := q.current()
-		err := s.checkProject(atomWithPackages{
+		s.traceInfo("try %s@%s", q.id.errString(), cur)
+		err := s.check(atomWithPackages{
 			a: atom{
 				id: q.id,
 				v:  cur,
 			},
 			pl: pl,
-		})
+		}, false)
 		if err == nil {
 			// we have a good version, can return safely
 			return nil
@@ -785,7 +816,7 @@
 		// to be found and attempted in the repository. If it's only in vendor,
 		// though, then we have to try to use what's in the lock, because that's
 		// the only version we'll be able to get.
-		if exist, _ := s.b.repoExists(id); exist {
+		if exist, _ := s.b.SourceExists(id); exist {
 			return nil, nil
 		}
 
@@ -833,13 +864,10 @@
 		}
 
 		if !found {
-			s.logSolve("%s in root lock, but current constraints disallow it", id.errString())
 			return nil, nil
 		}
 	}
 
-	s.logSolve("using root lock's version of %s", id.errString())
-
 	return v, nil
 }
 
@@ -865,46 +893,44 @@
 
 			// Pop selections off until we get to a project.
 			var proj bool
+			var awp atomWithPackages
 			for !proj {
-				_, proj = s.unselectLast()
+				awp, proj = s.unselectLast()
+				s.traceBacktrack(awp.bmi(), !proj)
 			}
 		}
 
 		// Grab the last versionQueue off the list of queues
 		q := s.vqs[len(s.vqs)-1]
-		// Walk back to the next project
-		var awp atomWithPackages
-		var proj bool
 
-		for !proj {
-			awp, proj = s.unselectLast()
+		// Walk back to the next project
+		awp, proj := s.unselectLast()
+		if !proj {
+			panic("canary - *should* be impossible to have a pkg-only selection here")
 		}
 
 		if !q.id.eq(awp.a.id) {
-			panic("canary - version queue stack and selected project stack are out of alignment")
+			panic("canary - version queue stack and selected project stack are misaligned")
 		}
 
 		// Advance the queue past the current version, which we know is bad
 		// TODO(sdboyer) is it feasible to make available the failure reason here?
 		if q.advance(nil) == nil && !q.isExhausted() {
 			// Search for another acceptable version of this failed dep in its queue
+			s.traceCheckQueue(q, awp.bmi(), true, 0)
 			if s.findValidVersion(q, awp.pl) == nil {
-				s.logSolve()
-
 				// Found one! Put it back on the selected queue and stop
 				// backtracking
-				s.selectAtomWithPackages(atomWithPackages{
-					a: atom{
-						id: q.id,
-						v:  q.current(),
-					},
-					pl: awp.pl,
-				})
+
+				// reusing the old awp is fine
+				awp.a.v = q.current()
+				s.selectAtom(awp, false)
 				break
 			}
 		}
 
-		s.logSolve("no more versions of %s, backtracking", q.id.errString())
+		s.traceBacktrack(awp.bmi(), false)
+		//s.traceInfo("no more versions of %s, backtracking", q.id.errString())
 
 		// No solution found; continue backtracking after popping the queue
 		// we just inspected off the list
@@ -975,8 +1001,8 @@
 	// We can safely ignore an err from ListVersions here because, if there is
 	// an actual problem, it'll be noted and handled somewhere else saner in the
 	// solving algorithm.
-	ivl, _ := s.b.listVersions(iname)
-	jvl, _ := s.b.listVersions(jname)
+	ivl, _ := s.b.ListVersions(iname)
+	jvl, _ := s.b.ListVersions(jname)
 	iv, jv := len(ivl), len(jvl)
 
 	// Packages with fewer versions to pick from are less likely to benefit from
@@ -1012,79 +1038,18 @@
 	}
 }
 
-// selectAtomWithPackages handles the selection case where a new project is
-// being added to the selection queue, alongside some number of its contained
-// packages. This method pushes them onto the selection queue, then adds any
-// new resultant deps to the unselected queue.
-func (s *solver) selectAtomWithPackages(a atomWithPackages) {
-	s.unsel.remove(bimodalIdentifier{
-		id: a.a.id,
-		pl: a.pl,
-	})
-
-	s.sel.pushSelection(a, true)
-
-	deps, err := s.getImportsAndConstraintsOf(a)
-	if err != nil {
-		// This shouldn't be possible; other checks should have ensured all
-		// packages and deps are present for any argument passed to this method.
-		panic(fmt.Sprintf("canary - shouldn't be possible %s", err))
-	}
-
-	// If this atom has a lock, pull it out so that we can potentially inject
-	// preferred versions into any bmis we enqueue
-	_, l, _ := s.b.getProjectInfo(a.a)
-	var lmap map[ProjectIdentifier]Version
-	if l != nil {
-		lmap = make(map[ProjectIdentifier]Version)
-		for _, lp := range l.Projects() {
-			lmap[lp.Ident()] = lp.Version()
-		}
-	}
-
-	for _, dep := range deps {
-		s.sel.pushDep(dependency{depender: a.a, dep: dep})
-		// Go through all the packages introduced on this dep, selecting only
-		// the ones where the only depper on them is what we pushed in. Then,
-		// put those into the unselected queue.
-		rpm := s.sel.getRequiredPackagesIn(dep.Ident)
-		var newp []string
-		for _, pkg := range dep.pl {
-			if rpm[pkg] == 1 {
-				newp = append(newp, pkg)
-			}
-		}
-
-		if len(newp) > 0 {
-			bmi := bimodalIdentifier{
-				id: dep.Ident,
-				pl: newp,
-				// This puts in a preferred version if one's in the map, else
-				// drops in the zero value (nil)
-				prefv: lmap[dep.Ident],
-			}
-			heap.Push(s.unsel, bmi)
-		}
-
-		if s.sel.depperCount(dep.Ident) == 1 {
-			s.names[dep.Ident.ProjectRoot] = dep.Ident.netName()
-		}
-	}
-}
-
-// selectPackages handles the selection case where we're just adding some new
-// packages to a project that was already selected. After pushing the selection,
-// it adds any newly-discovered deps to the unselected queue.
+// selectAtom pulls an atom into the selection stack, alongside some of
+// its contained packages. New resultant dependency requirements are added to
+// the unselected priority queue.
 //
-// It also takes an atomWithPackages because we need that same information in
-// order to enqueue the selection.
-func (s *solver) selectPackages(a atomWithPackages) {
+// Behavior is slightly diffferent if pkgonly is true.
+func (s *solver) selectAtom(a atomWithPackages, pkgonly bool) {
 	s.unsel.remove(bimodalIdentifier{
 		id: a.a.id,
 		pl: a.pl,
 	})
 
-	s.sel.pushSelection(a, false)
+	s.sel.pushSelection(a, pkgonly)
 
 	deps, err := s.getImportsAndConstraintsOf(a)
 	if err != nil {
@@ -1095,7 +1060,7 @@
 
 	// If this atom has a lock, pull it out so that we can potentially inject
 	// preferred versions into any bmis we enqueue
-	_, l, _ := s.b.getProjectInfo(a.a)
+	_, l, _ := s.b.GetManifestAndLock(a.a.id, a.a.v)
 	var lmap map[ProjectIdentifier]Version
 	if l != nil {
 		lmap = make(map[ProjectIdentifier]Version)
@@ -1132,6 +1097,8 @@
 			s.names[dep.Ident.ProjectRoot] = dep.Ident.netName()
 		}
 	}
+
+	s.traceSelect(a, pkgonly)
 }
 
 func (s *solver) unselectLast() (atomWithPackages, bool) {
@@ -1158,67 +1125,6 @@
 	return awp, first
 }
 
-func (s *solver) logStart(bmi bimodalIdentifier) {
-	if !s.params.Trace {
-		return
-	}
-
-	prefix := strings.Repeat("| ", len(s.vqs)+1)
-	// TODO(sdboyer) how...to list the packages in the limited space we have?
-	s.tl.Printf("%s\n", tracePrefix(fmt.Sprintf("? attempting %s (with %v packages)", bmi.id.errString(), len(bmi.pl)), prefix, prefix))
-}
-
-func (s *solver) logSolve(args ...interface{}) {
-	if !s.params.Trace {
-		return
-	}
-
-	preflen := len(s.vqs)
-	var msg string
-	if len(args) == 0 {
-		// Generate message based on current solver state
-		if len(s.vqs) == 0 {
-			msg = "✓ (root)"
-		} else {
-			vq := s.vqs[len(s.vqs)-1]
-			msg = fmt.Sprintf("✓ select %s at %s", vq.id.errString(), vq.current())
-		}
-	} else {
-		// Use longer prefix length for these cases, as they're the intermediate
-		// work
-		preflen++
-		switch data := args[0].(type) {
-		case string:
-			msg = tracePrefix(fmt.Sprintf(data, args[1:]), "| ", "| ")
-		case traceError:
-			// We got a special traceError, use its custom method
-			msg = tracePrefix(data.traceString(), "| ", "✗ ")
-		case error:
-			// Regular error; still use the x leader but default Error() string
-			msg = tracePrefix(data.Error(), "| ", "✗ ")
-		default:
-			// panic here because this can *only* mean a stupid internal bug
-			panic("canary - must pass a string as first arg to logSolve, or no args at all")
-		}
-	}
-
-	prefix := strings.Repeat("| ", preflen)
-	s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix))
-}
-
-func tracePrefix(msg, sep, fsep string) string {
-	parts := strings.Split(strings.TrimSuffix(msg, "\n"), "\n")
-	for k, str := range parts {
-		if k == 0 {
-			parts[k] = fmt.Sprintf("%s%s", fsep, str)
-		} else {
-			parts[k] = fmt.Sprintf("%s%s", sep, str)
-		}
-	}
-
-	return strings.Join(parts, "\n")
-}
-
 // simple (temporary?) helper just to convert atoms into locked projects
 func pa2lp(pa atom, pkgs map[string]struct{}) LockedProject {
 	lp := LockedProject{
diff --git a/vendor/github.com/sdboyer/gps/source.go b/vendor/github.com/sdboyer/gps/source.go
new file mode 100644
index 0000000..feaba15
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/source.go
@@ -0,0 +1,328 @@
+package gps
+
+import "fmt"
+
+type source interface {
+	checkExistence(sourceExistence) bool
+	exportVersionTo(Version, string) error
+	getManifestAndLock(ProjectRoot, Version) (Manifest, Lock, error)
+	listPackages(ProjectRoot, Version) (PackageTree, error)
+	listVersions() ([]Version, error)
+	revisionPresentIn(Revision) (bool, error)
+}
+
+type sourceMetaCache struct {
+	//Version  string                   // TODO(sdboyer) use this
+	infos  map[Revision]projectInfo
+	ptrees map[Revision]PackageTree
+	vMap   map[UnpairedVersion]Revision
+	rMap   map[Revision][]UnpairedVersion
+	// TODO(sdboyer) mutexes. actually probably just one, b/c complexity
+}
+
+// projectInfo holds manifest and lock
+type projectInfo struct {
+	Manifest
+	Lock
+}
+
+type existence struct {
+	// The existence levels for which a search/check has been performed
+	s sourceExistence
+
+	// The existence levels verified to be present through searching
+	f sourceExistence
+}
+
+func newMetaCache() *sourceMetaCache {
+	return &sourceMetaCache{
+		infos:  make(map[Revision]projectInfo),
+		ptrees: make(map[Revision]PackageTree),
+		vMap:   make(map[UnpairedVersion]Revision),
+		rMap:   make(map[Revision][]UnpairedVersion),
+	}
+}
+
+type baseVCSSource struct {
+	// Object for the cache repository
+	crepo *repo
+
+	// Indicates the extent to which we have searched for, and verified, the
+	// existence of the project/repo.
+	ex existence
+
+	// ProjectAnalyzer used to fulfill getManifestAndLock
+	an ProjectAnalyzer
+
+	// Whether the cache has the latest info on versions
+	cvsync bool
+
+	// The project metadata cache. This is (or is intended to be) persisted to
+	// disk, for reuse across solver runs.
+	dc *sourceMetaCache
+
+	// lvfunc allows the other vcs source types that embed this type to inject
+	// their listVersions func into the baseSource, for use as needed.
+	lvfunc func() (vlist []Version, err error)
+}
+
+func (bs *baseVCSSource) getManifestAndLock(r ProjectRoot, v Version) (Manifest, Lock, error) {
+	if err := bs.ensureCacheExistence(); err != nil {
+		return nil, nil, err
+	}
+
+	rev, err := bs.toRevOrErr(v)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Return the info from the cache, if we already have it
+	if pi, exists := bs.dc.infos[rev]; exists {
+		return pi.Manifest, pi.Lock, nil
+	}
+
+	bs.crepo.mut.Lock()
+	if !bs.crepo.synced {
+		err = bs.crepo.r.Update()
+		if err != nil {
+			return nil, nil, fmt.Errorf("could not fetch latest updates into repository")
+		}
+		bs.crepo.synced = true
+	}
+
+	// Always prefer a rev, if it's available
+	if pv, ok := v.(PairedVersion); ok {
+		err = bs.crepo.r.UpdateVersion(pv.Underlying().String())
+	} else {
+		err = bs.crepo.r.UpdateVersion(v.String())
+	}
+	bs.crepo.mut.Unlock()
+
+	if err != nil {
+		// TODO(sdboyer) More-er proper-er error
+		panic(fmt.Sprintf("canary - why is checkout/whatever failing: %s %s %s", bs.crepo.r.LocalPath(), v.String(), err))
+	}
+
+	bs.crepo.mut.RLock()
+	m, l, err := bs.an.DeriveManifestAndLock(bs.crepo.r.LocalPath(), r)
+	// TODO(sdboyer) cache results
+	bs.crepo.mut.RUnlock()
+
+	if err == nil {
+		if l != nil {
+			l = prepLock(l)
+		}
+
+		// If m is nil, prepManifest will provide an empty one.
+		pi := projectInfo{
+			Manifest: prepManifest(m),
+			Lock:     l,
+		}
+
+		bs.dc.infos[rev] = pi
+
+		return pi.Manifest, pi.Lock, nil
+	}
+
+	return nil, nil, err
+}
+
+// toRevision turns a Version into a Revision, if doing so is possible based on
+// the information contained in the version itself, or in the cache maps.
+func (dc *sourceMetaCache) toRevision(v Version) Revision {
+	switch t := v.(type) {
+	case Revision:
+		return t
+	case PairedVersion:
+		return t.Underlying()
+	case UnpairedVersion:
+		// This will return the empty rev (empty string) if we don't have a
+		// record of it. It's up to the caller to decide, for example, if
+		// it's appropriate to update the cache.
+		return dc.vMap[t]
+	default:
+		panic(fmt.Sprintf("Unknown version type %T", v))
+	}
+}
+
+// toUnpaired turns a Version into an UnpairedVersion, if doing so is possible
+// based on the information contained in the version itself, or in the cache
+// maps.
+//
+// If the input is a revision and multiple UnpairedVersions are associated with
+// it, whatever happens to be the first is returned.
+func (dc *sourceMetaCache) toUnpaired(v Version) UnpairedVersion {
+	switch t := v.(type) {
+	case UnpairedVersion:
+		return t
+	case PairedVersion:
+		return t.Unpair()
+	case Revision:
+		if upv, has := dc.rMap[t]; has && len(upv) > 0 {
+			return upv[0]
+		}
+		return nil
+	default:
+		panic(fmt.Sprintf("unknown version type %T", v))
+	}
+}
+
+func (bs *baseVCSSource) revisionPresentIn(r Revision) (bool, error) {
+	// First and fastest path is to check the data cache to see if the rev is
+	// present. This could give us false positives, but the cases where that can
+	// occur would require a type of cache staleness that seems *exceedingly*
+	// unlikely to occur.
+	if _, has := bs.dc.infos[r]; has {
+		return true, nil
+	} else if _, has := bs.dc.rMap[r]; has {
+		return true, nil
+	}
+
+	err := bs.ensureCacheExistence()
+	if err != nil {
+		return false, err
+	}
+
+	bs.crepo.mut.RLock()
+	defer bs.crepo.mut.RUnlock()
+	return bs.crepo.r.IsReference(string(r)), nil
+}
+
+func (bs *baseVCSSource) ensureCacheExistence() error {
+	// Technically, methods could could attempt to return straight from the
+	// metadata cache even if the repo cache doesn't exist on disk. But that
+	// would allow weird state inconsistencies (cache exists, but no repo...how
+	// does that even happen?) that it'd be better to just not allow so that we
+	// don't have to think about it elsewhere
+	if !bs.checkExistence(existsInCache) {
+		if bs.checkExistence(existsUpstream) {
+			bs.crepo.mut.Lock()
+			err := bs.crepo.r.Get()
+			bs.crepo.mut.Unlock()
+
+			if err != nil {
+				return fmt.Errorf("failed to create repository cache for %s", bs.crepo.r.Remote())
+			}
+			bs.crepo.synced = true
+			bs.ex.s |= existsInCache
+			bs.ex.f |= existsInCache
+		} else {
+			return fmt.Errorf("project %s does not exist upstream", bs.crepo.r.Remote())
+		}
+	}
+
+	return nil
+}
+
+// checkExistence provides a direct method for querying existence levels of the
+// source. It will only perform actual searching (local fs or over the network)
+// if no previous attempt at that search has been made.
+//
+// Note that this may perform read-ish operations on the cache repo, and it
+// takes a lock accordingly. This makes it unsafe to call from a segment where
+// the cache repo mutex is already write-locked, as deadlock will occur.
+func (bs *baseVCSSource) checkExistence(ex sourceExistence) bool {
+	if bs.ex.s&ex != ex {
+		if ex&existsInVendorRoot != 0 && bs.ex.s&existsInVendorRoot == 0 {
+			panic("should now be implemented in bridge")
+		}
+		if ex&existsInCache != 0 && bs.ex.s&existsInCache == 0 {
+			bs.crepo.mut.RLock()
+			bs.ex.s |= existsInCache
+			if bs.crepo.r.CheckLocal() {
+				bs.ex.f |= existsInCache
+			}
+			bs.crepo.mut.RUnlock()
+		}
+		if ex&existsUpstream != 0 && bs.ex.s&existsUpstream == 0 {
+			bs.crepo.mut.RLock()
+			bs.ex.s |= existsUpstream
+			if bs.crepo.r.Ping() {
+				bs.ex.f |= existsUpstream
+			}
+			bs.crepo.mut.RUnlock()
+		}
+	}
+
+	return ex&bs.ex.f == ex
+}
+
+func (bs *baseVCSSource) listPackages(pr ProjectRoot, v Version) (ptree PackageTree, err error) {
+	if err = bs.ensureCacheExistence(); err != nil {
+		return
+	}
+
+	var r Revision
+	if r, err = bs.toRevOrErr(v); err != nil {
+		return
+	}
+
+	// Return the ptree from the cache, if we already have it
+	var exists bool
+	if ptree, exists = bs.dc.ptrees[r]; exists {
+		return
+	}
+
+	// Not in the cache; check out the version and do the analysis
+	bs.crepo.mut.Lock()
+	// Check out the desired version for analysis
+	if r != "" {
+		// Always prefer a rev, if it's available
+		err = bs.crepo.r.UpdateVersion(string(r))
+	} else {
+		// If we don't have a rev, ensure the repo is up to date, otherwise we
+		// could have a desync issue
+		if !bs.crepo.synced {
+			err = bs.crepo.r.Update()
+			if err != nil {
+				return PackageTree{}, fmt.Errorf("could not fetch latest updates into repository: %s", err)
+			}
+			bs.crepo.synced = true
+		}
+		err = bs.crepo.r.UpdateVersion(v.String())
+	}
+
+	ptree, err = listPackages(bs.crepo.r.LocalPath(), string(pr))
+	bs.crepo.mut.Unlock()
+
+	// TODO(sdboyer) cache errs?
+	if err != nil {
+		bs.dc.ptrees[r] = ptree
+	}
+
+	return
+}
+
+// toRevOrErr makes all efforts to convert a Version into a rev, including
+// updating the cache repo (if needed). It does not guarantee that the returned
+// Revision actually exists in the repository (as one of the cheaper methods may
+// have had bad data).
+func (bs *baseVCSSource) toRevOrErr(v Version) (r Revision, err error) {
+	r = bs.dc.toRevision(v)
+	if r == "" {
+		// Rev can be empty if:
+		//  - The cache is unsynced
+		//  - A version was passed that used to exist, but no longer does
+		//  - A garbage version was passed. (Functionally indistinguishable from
+		//  the previous)
+		if !bs.cvsync {
+			// call the lvfunc to sync the meta cache
+			_, err = bs.lvfunc()
+			if err != nil {
+				return
+			}
+		}
+
+		r = bs.dc.toRevision(v)
+		// If we still don't have a rev, then the version's no good
+		if r == "" {
+			err = fmt.Errorf("version %s does not exist in source %s", v, bs.crepo.r.Remote())
+		}
+	}
+
+	return
+}
+
+func (bs *baseVCSSource) exportVersionTo(v Version, to string) error {
+	return bs.crepo.exportVersionTo(v, to)
+}
diff --git a/vendor/github.com/sdboyer/gps/source_manager.go b/vendor/github.com/sdboyer/gps/source_manager.go
index 86627a1..11ec567 100644
--- a/vendor/github.com/sdboyer/gps/source_manager.go
+++ b/vendor/github.com/sdboyer/gps/source_manager.go
@@ -1,57 +1,76 @@
 package gps
 
 import (
-	"encoding/json"
 	"fmt"
-	"go/build"
 	"os"
-	"path"
+	"path/filepath"
+	"strings"
+	"sync"
+	"sync/atomic"
 
-	"github.com/Masterminds/vcs"
+	"github.com/Masterminds/semver"
 )
 
+// Used to compute a friendly filepath from a URL-shaped input
+//
+// TODO(sdboyer) this is awful. Right?
+var sanitizer = strings.NewReplacer(":", "-", "/", "-", "+", "-")
+
 // A SourceManager is responsible for retrieving, managing, and interrogating
 // source repositories. Its primary purpose is to serve the needs of a Solver,
 // but it is handy for other purposes, as well.
 //
-// gps's built-in SourceManager, accessible via NewSourceManager(), is
-// intended to be generic and sufficient for any purpose. It provides some
-// additional semantics around the methods defined here.
+// gps's built-in SourceManager, SourceMgr, is intended to be generic and
+// sufficient for any purpose. It provides some additional semantics around the
+// methods defined here.
 type SourceManager interface {
-	// RepoExists checks if a repository exists, either upstream or in the
+	// SourceExists checks if a repository exists, either upstream or in the
 	// SourceManager's central repository cache.
-	RepoExists(ProjectRoot) (bool, error)
+	SourceExists(ProjectIdentifier) (bool, error)
 
 	// ListVersions retrieves a list of the available versions for a given
 	// repository name.
-	ListVersions(ProjectRoot) ([]Version, error)
+	ListVersions(ProjectIdentifier) ([]Version, error)
 
 	// RevisionPresentIn indicates whether the provided Version is present in
 	// the given repository.
-	RevisionPresentIn(ProjectRoot, Revision) (bool, error)
+	RevisionPresentIn(ProjectIdentifier, Revision) (bool, error)
 
-	// ListPackages retrieves a tree of the Go packages at or below the provided
-	// import path, at the provided version.
-	ListPackages(ProjectRoot, Version) (PackageTree, error)
+	// ListPackages parses the tree of the Go packages at or below root of the
+	// provided ProjectIdentifier, at the provided version.
+	ListPackages(ProjectIdentifier, Version) (PackageTree, error)
 
-	// GetProjectInfo returns manifest and lock information for the provided
-	// import path. gps currently requires that projects be rooted at their
-	// repository root, which means that this ProjectRoot must also be a
+	// GetManifestAndLock returns manifest and lock information for the provided
+	// root import path.
+	//
+	// gps currently requires that projects be rooted at their repository root,
+	// necessitating that the ProjectIdentifier's ProjectRoot must also be a
 	// repository root.
-	GetProjectInfo(ProjectRoot, Version) (Manifest, Lock, error)
+	GetManifestAndLock(ProjectIdentifier, Version) (Manifest, Lock, error)
 
 	// ExportProject writes out the tree of the provided import path, at the
 	// provided version, to the provided directory.
-	ExportProject(ProjectRoot, Version, string) error
+	ExportProject(ProjectIdentifier, Version, string) error
 
-	// Release lets go of any locks held by the SourceManager.
-	Release()
+	// AnalyzerInfo reports the name and version of the logic used to service
+	// GetManifestAndLock().
+	AnalyzerInfo() (name string, version *semver.Version)
+
+	// DeduceRootProject takes an import path and deduces the corresponding
+	// project/source root.
+	DeduceProjectRoot(ip string) (ProjectRoot, error)
 }
 
-// A ProjectAnalyzer is responsible for analyzing a path for Manifest and Lock
-// information. Tools relying on gps must implement one.
+// A ProjectAnalyzer is responsible for analyzing a given path for Manifest and
+// Lock information. Tools relying on gps must implement one.
 type ProjectAnalyzer interface {
-	GetInfo(string, ProjectRoot) (Manifest, Lock, error)
+	// Perform analysis of the filesystem tree rooted at path, with the
+	// root import path importRoot, to determine the project's constraints, as
+	// indicated by a Manifest and Lock.
+	DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error)
+
+	// Report the name and version of this ProjectAnalyzer.
+	Info() (name string, version *semver.Version)
 }
 
 // SourceMgr is the default SourceManager for gps.
@@ -60,22 +79,15 @@
 // tools; control via dependency injection is intended to be sufficient.
 type SourceMgr struct {
 	cachedir string
-	pms      map[ProjectRoot]*pmState
+	srcs     map[string]source
+	srcmut   sync.RWMutex
 	an       ProjectAnalyzer
-	ctx      build.Context
-	//pme               map[ProjectRoot]error
+	dxt      deducerTrie
+	rootxt   prTrie
 }
 
 var _ SourceManager = &SourceMgr{}
 
-// Holds a projectManager, caches of the managed project's data, and information
-// about the freshness of those caches
-type pmState struct {
-	pm   *projectManager
-	cf   *os.File // handle for the cache file
-	vcur bool     // indicates that we've called ListVersions()
-}
-
 // NewSourceManager produces an instance of gps's built-in SourceManager. It
 // takes a cache directory (where local instances of upstream repositories are
 // stored), a vendor directory for the project currently being worked on, and a
@@ -98,12 +110,12 @@
 		return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
 	}
 
-	err := os.MkdirAll(cachedir, 0777)
+	err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777)
 	if err != nil {
 		return nil, err
 	}
 
-	glpath := path.Join(cachedir, "sm.lock")
+	glpath := filepath.Join(cachedir, "sm.lock")
 	_, err = os.Stat(glpath)
 	if err == nil && !force {
 		return nil, fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath)
@@ -114,47 +126,50 @@
 		return nil, fmt.Errorf("failed to create global cache lock file at %s with err %s", glpath, err)
 	}
 
-	ctx := build.Default
-	// Replace GOPATH with our cache dir
-	ctx.GOPATH = cachedir
-
 	return &SourceMgr{
 		cachedir: cachedir,
-		pms:      make(map[ProjectRoot]*pmState),
-		ctx:      ctx,
+		srcs:     make(map[string]source),
 		an:       an,
+		dxt:      pathDeducerTrie(),
+		rootxt:   newProjectRootTrie(),
 	}, nil
 }
 
 // Release lets go of any locks held by the SourceManager.
 func (sm *SourceMgr) Release() {
-	os.Remove(path.Join(sm.cachedir, "sm.lock"))
+	os.Remove(filepath.Join(sm.cachedir, "sm.lock"))
 }
 
-// GetProjectInfo returns manifest and lock information for the provided import
-// path. gps currently requires that projects be rooted at their repository
-// root, which means that this ProjectRoot must also be a repository root.
+// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer.
+func (sm *SourceMgr) AnalyzerInfo() (name string, version *semver.Version) {
+	return sm.an.Info()
+}
+
+// GetManifestAndLock returns manifest and lock information for the provided
+// import path. gps currently requires that projects be rooted at their
+// repository root, necessitating that the ProjectIdentifier's ProjectRoot must
+// also be a repository root.
 //
-// The work of producing the manifest and lock information is delegated to the
-// injected ProjectAnalyzer.
-func (sm *SourceMgr) GetProjectInfo(n ProjectRoot, v Version) (Manifest, Lock, error) {
-	pmc, err := sm.getProjectManager(n)
+// The work of producing the manifest and lock is delegated to the injected
+// ProjectAnalyzer's DeriveManifestAndLock() method.
+func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
+	src, err := sm.getSourceFor(id)
 	if err != nil {
 		return nil, nil, err
 	}
 
-	return pmc.pm.GetInfoAt(v)
+	return src.getManifestAndLock(id.ProjectRoot, v)
 }
 
-// ListPackages retrieves a tree of the Go packages at or below the provided
-// import path, at the provided version.
-func (sm *SourceMgr) ListPackages(n ProjectRoot, v Version) (PackageTree, error) {
-	pmc, err := sm.getProjectManager(n)
+// ListPackages parses the tree of the Go packages at and below the ProjectRoot
+// of the given ProjectIdentifier, at the given version.
+func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+	src, err := sm.getSourceFor(id)
 	if err != nil {
 		return PackageTree{}, err
 	}
 
-	return pmc.pm.ListPackages(v)
+	return src.listPackages(id.ProjectRoot, v)
 }
 
 // ListVersions retrieves a list of the available versions for a given
@@ -165,133 +180,219 @@
 // expected that the caller either not care about order, or sort the result
 // themselves.
 //
-// This list is always retrieved from upstream; if upstream is not accessible
-// (network outage, access issues, or the resource actually went away), an error
-// will be returned.
-func (sm *SourceMgr) ListVersions(n ProjectRoot) ([]Version, error) {
-	pmc, err := sm.getProjectManager(n)
+// This list is always retrieved from upstream on the first call. Subsequent
+// calls will return a cached version of the first call's results. if upstream
+// is not accessible (network outage, access issues, or the resource actually
+// went away), an error will be returned.
+func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]Version, error) {
+	src, err := sm.getSourceFor(id)
 	if err != nil {
 		// TODO(sdboyer) More-er proper-er errors
 		return nil, err
 	}
 
-	return pmc.pm.ListVersions()
+	return src.listVersions()
 }
 
 // RevisionPresentIn indicates whether the provided Revision is present in the given
 // repository.
-func (sm *SourceMgr) RevisionPresentIn(n ProjectRoot, r Revision) (bool, error) {
-	pmc, err := sm.getProjectManager(n)
+func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
+	src, err := sm.getSourceFor(id)
 	if err != nil {
 		// TODO(sdboyer) More-er proper-er errors
 		return false, err
 	}
 
-	return pmc.pm.RevisionPresentIn(r)
+	return src.revisionPresentIn(r)
 }
 
-// RepoExists checks if a repository exists, either upstream or in the cache,
-// for the provided ProjectRoot.
-func (sm *SourceMgr) RepoExists(n ProjectRoot) (bool, error) {
-	pms, err := sm.getProjectManager(n)
+// SourceExists checks if a repository exists, either upstream or in the cache,
+// for the provided ProjectIdentifier.
+func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) {
+	src, err := sm.getSourceFor(id)
 	if err != nil {
 		return false, err
 	}
 
-	return pms.pm.CheckExistence(existsInCache) || pms.pm.CheckExistence(existsUpstream), nil
+	return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil
 }
 
-// ExportProject writes out the tree of the provided import path, at the
-// provided version, to the provided directory.
-func (sm *SourceMgr) ExportProject(n ProjectRoot, v Version, to string) error {
-	pms, err := sm.getProjectManager(n)
+// ExportProject writes out the tree of the provided ProjectIdentifier's
+// ProjectRoot, at the provided version, to the provided directory.
+func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) error {
+	src, err := sm.getSourceFor(id)
 	if err != nil {
 		return err
 	}
 
-	return pms.pm.ExportVersionTo(v, to)
+	return src.exportVersionTo(v, to)
 }
 
-// getProjectManager gets the project manager for the given ProjectRoot.
+// DeduceRootProject takes an import path and deduces the corresponding
+// project/source root.
 //
-// If no such manager yet exists, it attempts to create one.
-func (sm *SourceMgr) getProjectManager(n ProjectRoot) (*pmState, error) {
-	// Check pm cache and errcache first
-	if pm, exists := sm.pms[n]; exists {
-		return pm, nil
-		//} else if pme, errexists := sm.pme[name]; errexists {
-		//return nil, pme
-	}
-
-	repodir := path.Join(sm.cachedir, "src", string(n))
-	// TODO(sdboyer) be more robust about this
-	r, err := vcs.NewRepo("https://"+string(n), repodir)
-	if err != nil {
-		// TODO(sdboyer) be better
-		return nil, err
-	}
-	if !r.CheckLocal() {
-		// TODO(sdboyer) cloning the repo here puts it on a blocking, and possibly
-		// unnecessary path. defer it
-		err = r.Get()
-		if err != nil {
-			// TODO(sdboyer) be better
-			return nil, err
+// Note that some import paths may require network activity to correctly
+// determine the root of the path, such as, but not limited to, vanity import
+// paths. (A special exception is written for gopkg.in to minimize network
+// activity, as its behavior is well-structured)
+func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {
+	if prefix, root, has := sm.rootxt.LongestPrefix(ip); has {
+		// The non-matching tail of the import path could still be malformed.
+		// Validate just that part, if it exists
+		if prefix != ip {
+			if !pathvld.MatchString(strings.TrimPrefix(ip, prefix)) {
+				return "", fmt.Errorf("%q is not a valid import path", ip)
+			}
+			// There was one, and it validated fine - add it so we don't have to
+			// revalidate it later
+			sm.rootxt.Insert(ip, root)
 		}
+		return root, nil
 	}
 
-	// Ensure cache dir exists
-	metadir := path.Join(sm.cachedir, "metadata", string(n))
-	err = os.MkdirAll(metadir, 0777)
+	rootf, _, err := sm.deducePathAndProcess(ip)
 	if err != nil {
-		// TODO(sdboyer) be better
+		return "", err
+	}
+
+	r, err := rootf()
+	return ProjectRoot(r), err
+}
+
+func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
+	nn := id.netName()
+
+	sm.srcmut.RLock()
+	src, has := sm.srcs[nn]
+	sm.srcmut.RUnlock()
+	if has {
+		return src, nil
+	}
+
+	_, srcf, err := sm.deducePathAndProcess(nn)
+	if err != nil {
 		return nil, err
 	}
 
-	pms := &pmState{}
-	cpath := path.Join(metadir, "cache.json")
-	fi, err := os.Stat(cpath)
-	var dc *projectDataCache
-	if fi != nil {
-		pms.cf, err = os.OpenFile(cpath, os.O_RDWR, 0777)
-		if err != nil {
-			// TODO(sdboyer) be better
-			return nil, fmt.Errorf("Err on opening metadata cache file: %s", err)
-		}
+	// we don't care about the ident here, and the future produced by
+	// deducePathAndProcess will dedupe with what's in the sm.srcs map
+	src, _, err = srcf()
+	return src, err
+}
 
-		err = json.NewDecoder(pms.cf).Decode(dc)
-		if err != nil {
-			// TODO(sdboyer) be better
-			return nil, fmt.Errorf("Err on JSON decoding metadata cache file: %s", err)
-		}
-	} else {
-		// TODO(sdboyer) commented this out for now, until we manage it correctly
-		//pms.cf, err = os.Create(cpath)
-		//if err != nil {
-		//// TODO(sdboyer) be better
-		//return nil, fmt.Errorf("Err on creating metadata cache file: %s", err)
-		//}
-
-		dc = &projectDataCache{
-			Infos:    make(map[Revision]projectInfo),
-			Packages: make(map[Revision]PackageTree),
-			VMap:     make(map[Version]Revision),
-			RMap:     make(map[Revision][]Version),
-		}
+func (sm *SourceMgr) deducePathAndProcess(path string) (stringFuture, sourceFuture, error) {
+	df, err := sm.deduceFromPath(path)
+	if err != nil {
+		return nil, nil, err
 	}
 
-	pm := &projectManager{
-		n:   n,
-		ctx: sm.ctx,
-		an:  sm.an,
-		dc:  dc,
-		crepo: &repo{
-			rpath: repodir,
-			r:     r,
-		},
+	var rstart, sstart int32
+	rc, sc := make(chan struct{}, 1), make(chan struct{}, 1)
+
+	// Rewrap in a deferred future, so the caller can decide when to trigger it
+	rootf := func() (pr string, err error) {
+		// CAS because a bad interleaving here would panic on double-closing rc
+		if atomic.CompareAndSwapInt32(&rstart, 0, 1) {
+			go func() {
+				defer close(rc)
+				pr, err = df.root()
+				if err != nil {
+					// Don't cache errs. This doesn't really hurt the solver, and is
+					// beneficial for other use cases because it means we don't have to
+					// expose any kind of controls for clearing caches.
+					return
+				}
+
+				tpr := ProjectRoot(pr)
+				sm.rootxt.Insert(pr, tpr)
+				// It's not harmful if the netname was a URL rather than an
+				// import path
+				if pr != path {
+					// Insert the result into the rootxt twice - once at the
+					// root itself, so as to catch siblings/relatives, and again
+					// at the exact provided import path (assuming they were
+					// different), so that on subsequent calls, exact matches
+					// can skip the regex above.
+					sm.rootxt.Insert(path, tpr)
+				}
+			}()
+		}
+
+		<-rc
+		return pr, err
 	}
 
-	pms.pm = pm
-	sm.pms[n] = pms
-	return pms, nil
+	// Now, handle the source
+	fut := df.psf(sm.cachedir, sm.an)
+
+	// Rewrap in a deferred future, so the caller can decide when to trigger it
+	srcf := func() (src source, ident string, err error) {
+		// CAS because a bad interleaving here would panic on double-closing sc
+		if atomic.CompareAndSwapInt32(&sstart, 0, 1) {
+			go func() {
+				defer close(sc)
+				src, ident, err = fut()
+				if err != nil {
+					// Don't cache errs. This doesn't really hurt the solver, and is
+					// beneficial for other use cases because it means we don't have
+					// to expose any kind of controls for clearing caches.
+					return
+				}
+
+				sm.srcmut.Lock()
+				defer sm.srcmut.Unlock()
+
+				// Check to make sure a source hasn't shown up in the meantime, or that
+				// there wasn't already one at the ident.
+				var hasi, hasp bool
+				var srci, srcp source
+				if ident != "" {
+					srci, hasi = sm.srcs[ident]
+				}
+				srcp, hasp = sm.srcs[path]
+
+				// if neither the ident nor the input path have an entry for this src,
+				// we're in the simple case - write them both in and we're done
+				if !hasi && !hasp {
+					sm.srcs[path] = src
+					if ident != path && ident != "" {
+						sm.srcs[ident] = src
+					}
+					return
+				}
+
+				// Now, the xors.
+				//
+				// If already present for ident but not for path, copy ident's src
+				// to path. This covers cases like a gopkg.in path referring back
+				// onto a github repository, where something else already explicitly
+				// looked up that same gh repo.
+				if hasi && !hasp {
+					sm.srcs[path] = srci
+					src = srci
+				}
+				// If already present for path but not for ident, do NOT copy path's
+				// src to ident, but use the returned one instead. Really, this case
+				// shouldn't occur at all...? But the crucial thing is that the
+				// path-based one has already discovered what actual ident of source
+				// they want to use, and changing that arbitrarily would have
+				// undefined effects.
+				if hasp && !hasi && ident != "" {
+					sm.srcs[ident] = src
+				}
+
+				// If both are present, then assume we're good, and use the path one
+				if hasp && hasi {
+					// TODO(sdboyer) compare these (somehow? reflect? pointer?) and if they're not the
+					// same object, panic
+					src = srcp
+				}
+			}()
+		}
+
+		<-sc
+		return
+	}
+
+	return rootf, srcf, nil
 }
diff --git a/vendor/github.com/sdboyer/gps/source_test.go b/vendor/github.com/sdboyer/gps/source_test.go
new file mode 100644
index 0000000..907d9c3
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/source_test.go
@@ -0,0 +1,319 @@
+package gps
+
+import (
+	"io/ioutil"
+	"net/url"
+	"reflect"
+	"sort"
+	"testing"
+)
+
+func TestGitSourceInteractions(t *testing.T) {
+	// This test is slowish, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping git source version fetching test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	rf := func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}
+
+	n := "github.com/Masterminds/VCSTestRepo"
+	un := "https://" + n
+	u, err := url.Parse(un)
+	if err != nil {
+		t.Errorf("URL was bad, lolwut? errtext: %s", err)
+		rf()
+		t.FailNow()
+	}
+	mb := maybeGitSource{
+		url: u,
+	}
+
+	isrc, ident, err := mb.try(cpath, naiveAnalyzer{})
+	if err != nil {
+		t.Errorf("Unexpected error while setting up gitSource for test repo: %s", err)
+		rf()
+		t.FailNow()
+	}
+	src, ok := isrc.(*gitSource)
+	if !ok {
+		t.Errorf("Expected a gitSource, got a %T", isrc)
+		rf()
+		t.FailNow()
+	}
+	if ident != un {
+		t.Errorf("Expected %s as source ident, got %s", un, ident)
+	}
+
+	vlist, err := src.listVersions()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from git repo: %s", err)
+		rf()
+		t.FailNow()
+	}
+
+	if src.ex.s&existsUpstream != existsUpstream {
+		t.Errorf("gitSource.listVersions() should have set the upstream existence bit for search")
+	}
+	if src.ex.f&existsUpstream != existsUpstream {
+		t.Errorf("gitSource.listVersions() should have set the upstream existence bit for found")
+	}
+	if src.ex.s&existsInCache != 0 {
+		t.Errorf("gitSource.listVersions() should not have set the cache existence bit for search")
+	}
+	if src.ex.f&existsInCache != 0 {
+		t.Errorf("gitSource.listVersions() should not have set the cache existence bit for found")
+	}
+
+	// check that an expected rev is present
+	is, err := src.revisionPresentIn(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
+	if err != nil {
+		t.Errorf("Unexpected error while checking revision presence: %s", err)
+	} else if !is {
+		t.Errorf("Revision that should exist was not present")
+	}
+
+	if len(vlist) != 3 {
+		t.Errorf("git test repo should've produced three versions, got %v: vlist was %s", len(vlist), vlist)
+	} else {
+		sort.Sort(upgradeVersionSorter(vlist))
+		evl := []Version{
+			NewVersion("1.0.0").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
+			NewBranch("master").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
+			NewBranch("test").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
+		}
+		if !reflect.DeepEqual(vlist, evl) {
+			t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
+		}
+	}
+
+	// recheck that rev is present, this time interacting with cache differently
+	is, err = src.revisionPresentIn(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
+	if err != nil {
+		t.Errorf("Unexpected error while re-checking revision presence: %s", err)
+	} else if !is {
+		t.Errorf("Revision that should exist was not present on re-check")
+	}
+}
+
+func TestBzrSourceInteractions(t *testing.T) {
+	// This test is quite slow (ugh bzr), so skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping bzr source version fetching test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	rf := func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}
+
+	n := "launchpad.net/govcstestbzrrepo"
+	un := "https://" + n
+	u, err := url.Parse(un)
+	if err != nil {
+		t.Errorf("URL was bad, lolwut? errtext: %s", err)
+		rf()
+		t.FailNow()
+	}
+	mb := maybeBzrSource{
+		url: u,
+	}
+
+	isrc, ident, err := mb.try(cpath, naiveAnalyzer{})
+	if err != nil {
+		t.Errorf("Unexpected error while setting up bzrSource for test repo: %s", err)
+		rf()
+		t.FailNow()
+	}
+	src, ok := isrc.(*bzrSource)
+	if !ok {
+		t.Errorf("Expected a bzrSource, got a %T", isrc)
+		rf()
+		t.FailNow()
+	}
+	if ident != un {
+		t.Errorf("Expected %s as source ident, got %s", un, ident)
+	}
+
+	// check that an expected rev is present
+	is, err := src.revisionPresentIn(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68"))
+	if err != nil {
+		t.Errorf("Unexpected error while checking revision presence: %s", err)
+	} else if !is {
+		t.Errorf("Revision that should exist was not present")
+	}
+
+	vlist, err := src.listVersions()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err)
+	}
+
+	if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for search")
+	}
+	if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for found")
+	}
+
+	if len(vlist) != 1 {
+		t.Errorf("bzr test repo should've produced one version, got %v", len(vlist))
+	} else {
+		v := NewVersion("1.0.0").Is(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68"))
+		if vlist[0] != v {
+			t.Errorf("bzr pair fetch reported incorrect first version, got %s", vlist[0])
+		}
+	}
+
+	// Run again, this time to ensure cache outputs correctly
+	vlist, err = src.listVersions()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err)
+	}
+
+	if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for search")
+	}
+	if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for found")
+	}
+
+	if len(vlist) != 1 {
+		t.Errorf("bzr test repo should've produced one version, got %v", len(vlist))
+	} else {
+		v := NewVersion("1.0.0").Is(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68"))
+		if vlist[0] != v {
+			t.Errorf("bzr pair fetch reported incorrect first version, got %s", vlist[0])
+		}
+	}
+
+	// recheck that rev is present, this time interacting with cache differently
+	is, err = src.revisionPresentIn(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68"))
+	if err != nil {
+		t.Errorf("Unexpected error while re-checking revision presence: %s", err)
+	} else if !is {
+		t.Errorf("Revision that should exist was not present on re-check")
+	}
+}
+
+func TestHgSourceInteractions(t *testing.T) {
+	// This test is slow, so skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping hg source version fetching test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	rf := func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}
+
+	n := "bitbucket.org/mattfarina/testhgrepo"
+	un := "https://" + n
+	u, err := url.Parse(un)
+	if err != nil {
+		t.Errorf("URL was bad, lolwut? errtext: %s", err)
+		rf()
+		t.FailNow()
+	}
+	mb := maybeHgSource{
+		url: u,
+	}
+
+	isrc, ident, err := mb.try(cpath, naiveAnalyzer{})
+	if err != nil {
+		t.Errorf("Unexpected error while setting up hgSource for test repo: %s", err)
+		rf()
+		t.FailNow()
+	}
+	src, ok := isrc.(*hgSource)
+	if !ok {
+		t.Errorf("Expected a hgSource, got a %T", isrc)
+		rf()
+		t.FailNow()
+	}
+	if ident != un {
+		t.Errorf("Expected %s as source ident, got %s", un, ident)
+	}
+
+	// check that an expected rev is present
+	is, err := src.revisionPresentIn(Revision("d680e82228d206935ab2eaa88612587abe68db07"))
+	if err != nil {
+		t.Errorf("Unexpected error while checking revision presence: %s", err)
+	} else if !is {
+		t.Errorf("Revision that should exist was not present")
+	}
+
+	vlist, err := src.listVersions()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from hg repo: %s", err)
+	}
+	evl := []Version{
+		NewVersion("1.0.0").Is(Revision("d680e82228d206935ab2eaa88612587abe68db07")),
+		NewBranch("test").Is(Revision("6c44ee3fe5d87763616c19bf7dbcadb24ff5a5ce")),
+	}
+
+	if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for search")
+	}
+	if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for found")
+	}
+
+	if len(vlist) != 2 {
+		t.Errorf("hg test repo should've produced one version, got %v", len(vlist))
+	} else {
+		sort.Sort(upgradeVersionSorter(vlist))
+		if !reflect.DeepEqual(vlist, evl) {
+			t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
+		}
+	}
+
+	// Run again, this time to ensure cache outputs correctly
+	vlist, err = src.listVersions()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from hg repo: %s", err)
+	}
+
+	if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for search")
+	}
+	if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for found")
+	}
+
+	if len(vlist) != 2 {
+		t.Errorf("hg test repo should've produced one version, got %v", len(vlist))
+	} else {
+		sort.Sort(upgradeVersionSorter(vlist))
+		if !reflect.DeepEqual(vlist, evl) {
+			t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
+		}
+	}
+
+	// recheck that rev is present, this time interacting with cache differently
+	is, err = src.revisionPresentIn(Revision("d680e82228d206935ab2eaa88612587abe68db07"))
+	if err != nil {
+		t.Errorf("Unexpected error while re-checking revision presence: %s", err)
+	} else if !is {
+		t.Errorf("Revision that should exist was not present on re-check")
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/trace.go b/vendor/github.com/sdboyer/gps/trace.go
new file mode 100644
index 0000000..4c20279
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/trace.go
@@ -0,0 +1,183 @@
+package gps
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+const (
+	successChar   = "✓"
+	successCharSp = successChar + " "
+	failChar      = "✗"
+	failCharSp    = failChar + " "
+	backChar      = "←"
+)
+
+func (s *solver) traceCheckPkgs(bmi bimodalIdentifier) {
+	if !s.params.Trace {
+		return
+	}
+
+	prefix := strings.Repeat("| ", len(s.vqs)+1)
+	s.tl.Printf("%s\n", tracePrefix(fmt.Sprintf("? revisit %s to add %v pkgs", bmi.id.errString(), len(bmi.pl)), prefix, prefix))
+}
+
+func (s *solver) traceCheckQueue(q *versionQueue, bmi bimodalIdentifier, cont bool, offset int) {
+	if !s.params.Trace {
+		return
+	}
+
+	prefix := strings.Repeat("| ", len(s.vqs)+offset)
+	vlen := strconv.Itoa(len(q.pi))
+	if !q.allLoaded {
+		vlen = "at least " + vlen
+	}
+
+	// TODO(sdboyer) how...to list the packages in the limited space we have?
+	var verb string
+	if cont {
+		verb = "continue"
+		vlen = vlen + " more"
+	} else {
+		verb = "attempt"
+	}
+
+	s.tl.Printf("%s\n", tracePrefix(fmt.Sprintf("? %s %s with %v pkgs; %s versions to try", verb, bmi.id.errString(), len(bmi.pl), vlen), prefix, prefix))
+}
+
+// traceStartBacktrack is called with the bmi that first failed, thus initiating
+// backtracking
+func (s *solver) traceStartBacktrack(bmi bimodalIdentifier, err error, pkgonly bool) {
+	if !s.params.Trace {
+		return
+	}
+
+	var msg string
+	if pkgonly {
+		msg = fmt.Sprintf("%s could not add %v pkgs to %s; begin backtrack", backChar, len(bmi.pl), bmi.id.errString())
+	} else {
+		msg = fmt.Sprintf("%s no more versions of %s to try; begin backtrack", backChar, bmi.id.errString())
+	}
+
+	prefix := strings.Repeat("| ", len(s.sel.projects))
+	s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix))
+}
+
+// traceBacktrack is called when a package or project is poppped off during
+// backtracking
+func (s *solver) traceBacktrack(bmi bimodalIdentifier, pkgonly bool) {
+	if !s.params.Trace {
+		return
+	}
+
+	var msg string
+	if pkgonly {
+		msg = fmt.Sprintf("%s backtrack: popped %v pkgs from %s", backChar, len(bmi.pl), bmi.id.errString())
+	} else {
+		msg = fmt.Sprintf("%s backtrack: no more versions of %s to try", backChar, bmi.id.errString())
+	}
+
+	prefix := strings.Repeat("| ", len(s.sel.projects))
+	s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix))
+}
+
+// Called just once after solving has finished, whether success or not
+func (s *solver) traceFinish(sol solution, err error) {
+	if !s.params.Trace {
+		return
+	}
+
+	if err == nil {
+		var pkgcount int
+		for _, lp := range sol.Projects() {
+			pkgcount += len(lp.pkgs)
+		}
+		s.tl.Printf("%s found solution with %v packages from %v projects", successChar, pkgcount, len(sol.Projects()))
+	} else {
+		s.tl.Printf("%s solving failed", failChar)
+	}
+}
+
+// traceSelectRoot is called just once, when the root project is selected
+func (s *solver) traceSelectRoot(ptree PackageTree, cdeps []completeDep) {
+	if !s.params.Trace {
+		return
+	}
+
+	// This duplicates work a bit, but we're in trace mode and it's only once,
+	// so who cares
+	rm := ptree.ExternalReach(true, true, s.ig)
+
+	s.tl.Printf("Root project is %q", s.params.ImportRoot)
+
+	var expkgs int
+	for _, cdep := range cdeps {
+		expkgs += len(cdep.pl)
+	}
+
+	// TODO(sdboyer) include info on ignored pkgs/imports, etc.
+	s.tl.Printf(" %v transitively valid internal packages", len(rm))
+	s.tl.Printf(" %v external packages imported from %v projects", expkgs, len(cdeps))
+	s.tl.Printf(successCharSp + "select (root)")
+}
+
+// traceSelect is called when an atom is successfully selected
+func (s *solver) traceSelect(awp atomWithPackages, pkgonly bool) {
+	if !s.params.Trace {
+		return
+	}
+
+	var msg string
+	if pkgonly {
+		msg = fmt.Sprintf("%s include %v more pkgs from %s", successChar, len(awp.pl), a2vs(awp.a))
+	} else {
+		msg = fmt.Sprintf("%s select %s w/%v pkgs", successChar, a2vs(awp.a), len(awp.pl))
+	}
+
+	prefix := strings.Repeat("| ", len(s.sel.projects)-1)
+	s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix))
+}
+
+func (s *solver) traceInfo(args ...interface{}) {
+	if !s.params.Trace {
+		return
+	}
+
+	if len(args) == 0 {
+		panic("must pass at least one param to traceInfo")
+	}
+
+	preflen := len(s.sel.projects)
+	var msg string
+	switch data := args[0].(type) {
+	case string:
+		msg = tracePrefix(fmt.Sprintf(data, args[1:]...), "| ", "| ")
+	case traceError:
+		preflen += 1
+		// We got a special traceError, use its custom method
+		msg = tracePrefix(data.traceString(), "| ", failCharSp)
+	case error:
+		// Regular error; still use the x leader but default Error() string
+		msg = tracePrefix(data.Error(), "| ", failCharSp)
+	default:
+		// panic here because this can *only* mean a stupid internal bug
+		panic(fmt.Sprintf("canary - unknown type passed as first param to traceInfo %T", data))
+	}
+
+	prefix := strings.Repeat("| ", preflen)
+	s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix))
+}
+
+func tracePrefix(msg, sep, fsep string) string {
+	parts := strings.Split(strings.TrimSuffix(msg, "\n"), "\n")
+	for k, str := range parts {
+		if k == 0 {
+			parts[k] = fsep + str
+		} else {
+			parts[k] = sep + str
+		}
+	}
+
+	return strings.Join(parts, "\n")
+}
diff --git a/vendor/github.com/sdboyer/gps/typed_radix.go b/vendor/github.com/sdboyer/gps/typed_radix.go
new file mode 100644
index 0000000..9f56a9b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/typed_radix.go
@@ -0,0 +1,151 @@
+package gps
+
+import (
+	"strings"
+
+	"github.com/armon/go-radix"
+)
+
+// Typed implementations of radix trees. These are just simple wrappers that let
+// us avoid having to type assert anywhere else, cleaning up other code a bit.
+//
+// Some of the more annoying things to implement (like walks) aren't
+// implemented. They can be added if/when we actually need them.
+//
+// Oh generics, where art thou...
+
+type deducerTrie struct {
+	t *radix.Tree
+}
+
+func newDeducerTrie() deducerTrie {
+	return deducerTrie{
+		t: radix.New(),
+	}
+}
+
+// Delete is used to delete a key, returning the previous value and if it was deleted
+func (t deducerTrie) Delete(s string) (pathDeducer, bool) {
+	if v, had := t.t.Delete(s); had {
+		return v.(pathDeducer), had
+	}
+	return nil, false
+}
+
+// Get is used to lookup a specific key, returning the value and if it was found
+func (t deducerTrie) Get(s string) (pathDeducer, bool) {
+	if v, has := t.t.Get(s); has {
+		return v.(pathDeducer), has
+	}
+	return nil, false
+}
+
+// Insert is used to add a newentry or update an existing entry. Returns if updated.
+func (t deducerTrie) Insert(s string, v pathDeducer) (pathDeducer, bool) {
+	if v2, had := t.t.Insert(s, v); had {
+		return v2.(pathDeducer), had
+	}
+	return nil, false
+}
+
+// Len is used to return the number of elements in the tree
+func (t deducerTrie) Len() int {
+	return t.t.Len()
+}
+
+// LongestPrefix is like Get, but instead of an exact match, it will return the
+// longest prefix match.
+func (t deducerTrie) LongestPrefix(s string) (string, pathDeducer, bool) {
+	if p, v, has := t.t.LongestPrefix(s); has {
+		return p, v.(pathDeducer), has
+	}
+	return "", nil, false
+}
+
+// ToMap is used to walk the tree and convert it to a map.
+func (t deducerTrie) ToMap() map[string]pathDeducer {
+	m := make(map[string]pathDeducer)
+	t.t.Walk(func(s string, v interface{}) bool {
+		m[s] = v.(pathDeducer)
+		return false
+	})
+
+	return m
+}
+
+type prTrie struct {
+	t *radix.Tree
+}
+
+func newProjectRootTrie() prTrie {
+	return prTrie{
+		t: radix.New(),
+	}
+}
+
+// Delete is used to delete a key, returning the previous value and if it was deleted
+func (t prTrie) Delete(s string) (ProjectRoot, bool) {
+	if v, had := t.t.Delete(s); had {
+		return v.(ProjectRoot), had
+	}
+	return "", false
+}
+
+// Get is used to lookup a specific key, returning the value and if it was found
+func (t prTrie) Get(s string) (ProjectRoot, bool) {
+	if v, has := t.t.Get(s); has {
+		return v.(ProjectRoot), has
+	}
+	return "", false
+}
+
+// Insert is used to add a newentry or update an existing entry. Returns if updated.
+func (t prTrie) Insert(s string, v ProjectRoot) (ProjectRoot, bool) {
+	if v2, had := t.t.Insert(s, v); had {
+		return v2.(ProjectRoot), had
+	}
+	return "", false
+}
+
+// Len is used to return the number of elements in the tree
+func (t prTrie) Len() int {
+	return t.t.Len()
+}
+
+// LongestPrefix is like Get, but instead of an exact match, it will return the
+// longest prefix match.
+func (t prTrie) LongestPrefix(s string) (string, ProjectRoot, bool) {
+	if p, v, has := t.t.LongestPrefix(s); has && isPathPrefixOrEqual(p, s) {
+		return p, v.(ProjectRoot), has
+	}
+	return "", "", false
+}
+
+// ToMap is used to walk the tree and convert it to a map.
+func (t prTrie) ToMap() map[string]ProjectRoot {
+	m := make(map[string]ProjectRoot)
+	t.t.Walk(func(s string, v interface{}) bool {
+		m[s] = v.(ProjectRoot)
+		return false
+	})
+
+	return m
+}
+
+// isPathPrefixOrEqual is an additional helper check to ensure that the literal
+// string prefix returned from a radix tree prefix match is also a tree match.
+//
+// The radix tree gets it mostly right, but we have to guard against
+// possibilities like this:
+//
+// github.com/sdboyer/foo
+// github.com/sdboyer/foobar/baz
+//
+// The latter would incorrectly be conflated with the former. As we know we're
+// operating on strings that describe paths, guard against this case by
+// verifying that either the input is the same length as the match (in which
+// case we know they're equal), or that the next character is a "/".
+func isPathPrefixOrEqual(pre, path string) bool {
+	prflen := len(pre)
+	return prflen == len(path) || strings.Index(path[:prflen], "/") == 0
+}
diff --git a/vendor/github.com/sdboyer/gps/types.go b/vendor/github.com/sdboyer/gps/types.go
index f720fa2..b40807d 100644
--- a/vendor/github.com/sdboyer/gps/types.go
+++ b/vendor/github.com/sdboyer/gps/types.go
@@ -75,14 +75,6 @@
 	NetworkName string
 }
 
-// A ProjectConstraint combines a ProjectIdentifier with a Constraint. It
-// indicates that, if packages contained in the ProjectIdentifier enter the
-// depgraph, they must do so at a version that is allowed by the Constraint.
-type ProjectConstraint struct {
-	Ident      ProjectIdentifier
-	Constraint Constraint
-}
-
 func (i ProjectIdentifier) less(j ProjectIdentifier) bool {
 	if i.ProjectRoot < j.ProjectRoot {
 		return true
@@ -134,6 +126,16 @@
 	return i
 }
 
+// ProjectProperties comprise the properties that can attached to a ProjectRoot.
+//
+// In general, these are declared in the context of a map of ProjectRoot to its
+// ProjectProperties; they make little sense without their corresponding
+// ProjectRoot.
+type ProjectProperties struct {
+	NetworkName string
+	Constraint  Constraint
+}
+
 // Package represents a Go package. It contains a subset of the information
 // go/build.Package does.
 type Package struct {
@@ -144,8 +146,6 @@
 }
 
 // bimodalIdentifiers are used to track work to be done in the unselected queue.
-// TODO(sdboyer) marker for root, to know to ignore prefv...or can we do unselected queue
-// sorting only?
 type bimodalIdentifier struct {
 	id ProjectIdentifier
 	// List of packages required within/under the ProjectIdentifier
@@ -172,6 +172,18 @@
 	pl []string
 }
 
+// bmi converts an atomWithPackages into a bimodalIdentifier.
+//
+// This is mostly intended for (read-only) trace use, so the package list slice
+// is not copied. It is the callers responsibility to not modify the pl slice,
+// lest that backpropagate and cause inconsistencies.
+func (awp atomWithPackages) bmi() bimodalIdentifier {
+	return bimodalIdentifier{
+		id: awp.a.id,
+		pl: awp.pl,
+	}
+}
+
 //type byImportPath []Package
 
 //func (s byImportPath) Len() int           { return len(s) }
@@ -183,8 +195,8 @@
 // are the same) name, a constraint, and the actual packages needed that are
 // under that root.
 type completeDep struct {
-	// The base ProjectDep
-	ProjectConstraint
+	// The base workingConstraint
+	workingConstraint
 	// The specific packages required from the ProjectDep
 	pl []string
 }
diff --git a/vendor/github.com/sdboyer/gps/vcs_source.go b/vendor/github.com/sdboyer/gps/vcs_source.go
new file mode 100644
index 0000000..277b1db
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/vcs_source.go
@@ -0,0 +1,439 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+	"sync"
+
+	"github.com/Masterminds/vcs"
+	"github.com/termie/go-shutil"
+)
+
+type vcsSource interface {
+	syncLocal() error
+	ensureLocal() error
+	listLocalVersionPairs() ([]PairedVersion, sourceExistence, error)
+	listUpstreamVersionPairs() ([]PairedVersion, sourceExistence, error)
+	hasRevision(Revision) (bool, error)
+	checkout(Version) error
+	exportVersionTo(Version, string) error
+}
+
+// gitSource is a generic git repository implementation that should work with
+// all standard git remotes.
+type gitSource struct {
+	baseVCSSource
+}
+
+func (s *gitSource) exportVersionTo(v Version, to string) error {
+	s.crepo.mut.Lock()
+	defer s.crepo.mut.Unlock()
+
+	r := s.crepo.r
+	if !r.CheckLocal() {
+		err := r.Get()
+		if err != nil {
+			return fmt.Errorf("failed to clone repo from %s", r.Remote())
+		}
+	}
+	// Back up original index
+	idx, bak := filepath.Join(r.LocalPath(), ".git", "index"), filepath.Join(r.LocalPath(), ".git", "origindex")
+	err := os.Rename(idx, bak)
+	if err != nil {
+		return err
+	}
+
+	// TODO(sdboyer) could have an err here
+	defer os.Rename(bak, idx)
+
+	vstr := v.String()
+	if rv, ok := v.(PairedVersion); ok {
+		vstr = rv.Underlying().String()
+	}
+	_, err = r.RunFromDir("git", "read-tree", vstr)
+	if err != nil {
+		return err
+	}
+
+	// Ensure we have exactly one trailing slash
+	to = strings.TrimSuffix(to, string(os.PathSeparator)) + string(os.PathSeparator)
+	// Checkout from our temporary index to the desired target location on disk;
+	// now it's git's job to make it fast. Sadly, this approach *does* also
+	// write out vendor dirs. There doesn't appear to be a way to make
+	// checkout-index respect sparse checkout rules (-a supercedes it);
+	// the alternative is using plain checkout, though we have a bunch of
+	// housekeeping to do to set up, then tear down, the sparse checkout
+	// controls, as well as restore the original index and HEAD.
+	_, err = r.RunFromDir("git", "checkout-index", "-a", "--prefix="+to)
+	return err
+}
+
+func (s *gitSource) listVersions() (vlist []Version, err error) {
+	if s.cvsync {
+		vlist = make([]Version, len(s.dc.vMap))
+		k := 0
+		for v, r := range s.dc.vMap {
+			vlist[k] = v.Is(r)
+			k++
+		}
+
+		return
+	}
+
+	r := s.crepo.r
+	var out []byte
+	c := exec.Command("git", "ls-remote", r.Remote())
+	// Ensure no terminal prompting for PWs
+	c.Env = mergeEnvLists([]string{"GIT_TERMINAL_PROMPT=0"}, os.Environ())
+	out, err = c.CombinedOutput()
+
+	all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+	if err != nil || len(all) == 0 {
+		// TODO(sdboyer) remove this path? it really just complicates things, for
+		// probably not much benefit
+
+		// ls-remote failed, probably due to bad communication or a faulty
+		// upstream implementation. So fetch updates, then build the list
+		// locally
+		s.crepo.mut.Lock()
+		err = r.Update()
+		s.crepo.mut.Unlock()
+		if err != nil {
+			// Definitely have a problem, now - bail out
+			return
+		}
+
+		// Upstream and cache must exist for this to have worked, so add that to
+		// searched and found
+		s.ex.s |= existsUpstream | existsInCache
+		s.ex.f |= existsUpstream | existsInCache
+		// Also, local is definitely now synced
+		s.crepo.synced = true
+
+		s.crepo.mut.RLock()
+		out, err = r.RunFromDir("git", "show-ref", "--dereference")
+		s.crepo.mut.RUnlock()
+		if err != nil {
+			// TODO(sdboyer) More-er proper-er error
+			return
+		}
+
+		all = bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+		if len(all) == 0 {
+			return nil, fmt.Errorf("no versions available for %s (this is weird)", r.Remote())
+		}
+	}
+
+	// Local cache may not actually exist here, but upstream definitely does
+	s.ex.s |= existsUpstream
+	s.ex.f |= existsUpstream
+
+	smap := make(map[string]bool)
+	uniq := 0
+	vlist = make([]Version, len(all)-1) // less 1, because always ignore HEAD
+	for _, pair := range all {
+		var v PairedVersion
+		if string(pair[46:51]) == "heads" {
+			v = NewBranch(string(pair[52:])).Is(Revision(pair[:40])).(PairedVersion)
+			vlist[uniq] = v
+			uniq++
+		} else if string(pair[46:50]) == "tags" {
+			vstr := string(pair[51:])
+			if strings.HasSuffix(vstr, "^{}") {
+				// If the suffix is there, then we *know* this is the rev of
+				// the underlying commit object that we actually want
+				vstr = strings.TrimSuffix(vstr, "^{}")
+			} else if smap[vstr] {
+				// Already saw the deref'd version of this tag, if one
+				// exists, so skip this.
+				continue
+				// Can only hit this branch if we somehow got the deref'd
+				// version first. Which should be impossible, but this
+				// covers us in case of weirdness, anyway.
+			}
+			v = NewVersion(vstr).Is(Revision(pair[:40])).(PairedVersion)
+			smap[vstr] = true
+			vlist[uniq] = v
+			uniq++
+		}
+	}
+
+	// Trim off excess from the slice
+	vlist = vlist[:uniq]
+
+	// Process the version data into the cache
+	//
+	// reset the rmap and vmap, as they'll be fully repopulated by this
+	// TODO(sdboyer) detect out-of-sync pairings as we do this?
+	s.dc.vMap = make(map[UnpairedVersion]Revision)
+	s.dc.rMap = make(map[Revision][]UnpairedVersion)
+
+	for _, v := range vlist {
+		pv := v.(PairedVersion)
+		u, r := pv.Unpair(), pv.Underlying()
+		s.dc.vMap[u] = r
+		s.dc.rMap[r] = append(s.dc.rMap[r], u)
+	}
+	// Mark the cache as being in sync with upstream's version list
+	s.cvsync = true
+	return
+}
+
+// bzrSource is a generic bzr repository implementation that should work with
+// all standard bazaar remotes.
+type bzrSource struct {
+	baseVCSSource
+}
+
+func (s *bzrSource) listVersions() (vlist []Version, err error) {
+	if s.cvsync {
+		vlist = make([]Version, len(s.dc.vMap))
+		k := 0
+		for v, r := range s.dc.vMap {
+			vlist[k] = v.Is(r)
+			k++
+		}
+
+		return
+	}
+
+	// Must first ensure cache checkout's existence
+	err = s.ensureCacheExistence()
+	if err != nil {
+		return
+	}
+	r := s.crepo.r
+
+	// Local repo won't have all the latest refs if ensureCacheExistence()
+	// didn't create it
+	if !s.crepo.synced {
+		s.crepo.mut.Lock()
+		err = r.Update()
+		s.crepo.mut.Unlock()
+		if err != nil {
+			return
+		}
+
+		s.crepo.synced = true
+	}
+
+	var out []byte
+
+	// Now, list all the tags
+	out, err = r.RunFromDir("bzr", "tags", "--show-ids", "-v")
+	if err != nil {
+		return
+	}
+
+	all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+
+	// reset the rmap and vmap, as they'll be fully repopulated by this
+	// TODO(sdboyer) detect out-of-sync pairings as we do this?
+	s.dc.vMap = make(map[UnpairedVersion]Revision)
+	s.dc.rMap = make(map[Revision][]UnpairedVersion)
+
+	vlist = make([]Version, len(all))
+	k := 0
+	for _, line := range all {
+		idx := bytes.IndexByte(line, 32) // space
+		v := NewVersion(string(line[:idx]))
+		r := Revision(bytes.TrimSpace(line[idx:]))
+
+		s.dc.vMap[v] = r
+		s.dc.rMap[r] = append(s.dc.rMap[r], v)
+		vlist[k] = v.Is(r)
+		k++
+	}
+
+	// Cache is now in sync with upstream's version list
+	s.cvsync = true
+	return
+}
+
+// hgSource is a generic hg repository implementation that should work with
+// all standard mercurial servers.
+type hgSource struct {
+	baseVCSSource
+}
+
+func (s *hgSource) listVersions() (vlist []Version, err error) {
+	if s.cvsync {
+		vlist = make([]Version, len(s.dc.vMap))
+		k := 0
+		for v, r := range s.dc.vMap {
+			vlist[k] = v.Is(r)
+			k++
+		}
+
+		return
+	}
+
+	// Must first ensure cache checkout's existence
+	err = s.ensureCacheExistence()
+	if err != nil {
+		return
+	}
+	r := s.crepo.r
+
+	// Local repo won't have all the latest refs if ensureCacheExistence()
+	// didn't create it
+	if !s.crepo.synced {
+		s.crepo.mut.Lock()
+		err = r.Update()
+		s.crepo.mut.Unlock()
+		if err != nil {
+			return
+		}
+
+		s.crepo.synced = true
+	}
+
+	var out []byte
+
+	// Now, list all the tags
+	out, err = r.RunFromDir("hg", "tags", "--debug", "--verbose")
+	if err != nil {
+		return
+	}
+
+	all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+	lbyt := []byte("local")
+	nulrev := []byte("0000000000000000000000000000000000000000")
+	for _, line := range all {
+		if bytes.Equal(lbyt, line[len(line)-len(lbyt):]) {
+			// Skip local tags
+			continue
+		}
+
+		// tip is magic, don't include it
+		if bytes.HasPrefix(line, []byte("tip")) {
+			continue
+		}
+
+		// Split on colon; this gets us the rev and the tag plus local revno
+		pair := bytes.Split(line, []byte(":"))
+		if bytes.Equal(nulrev, pair[1]) {
+			// null rev indicates this tag is marked for deletion
+			continue
+		}
+
+		idx := bytes.IndexByte(pair[0], 32) // space
+		v := NewVersion(string(pair[0][:idx])).Is(Revision(pair[1])).(PairedVersion)
+		vlist = append(vlist, v)
+	}
+
+	out, err = r.RunFromDir("hg", "branches", "--debug", "--verbose")
+	if err != nil {
+		// better nothing than partial and misleading
+		vlist = nil
+		return
+	}
+
+	all = bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+	lbyt = []byte("(inactive)")
+	for _, line := range all {
+		if bytes.Equal(lbyt, line[len(line)-len(lbyt):]) {
+			// Skip inactive branches
+			continue
+		}
+
+		// Split on colon; this gets us the rev and the branch plus local revno
+		pair := bytes.Split(line, []byte(":"))
+		idx := bytes.IndexByte(pair[0], 32) // space
+		v := NewBranch(string(pair[0][:idx])).Is(Revision(pair[1])).(PairedVersion)
+		vlist = append(vlist, v)
+	}
+
+	// reset the rmap and vmap, as they'll be fully repopulated by this
+	// TODO(sdboyer) detect out-of-sync pairings as we do this?
+	s.dc.vMap = make(map[UnpairedVersion]Revision)
+	s.dc.rMap = make(map[Revision][]UnpairedVersion)
+
+	for _, v := range vlist {
+		pv := v.(PairedVersion)
+		u, r := pv.Unpair(), pv.Underlying()
+		s.dc.vMap[u] = r
+		s.dc.rMap[r] = append(s.dc.rMap[r], u)
+	}
+
+	// Cache is now in sync with upstream's version list
+	s.cvsync = true
+	return
+}
+
+type repo struct {
+	// Path to the root of the default working copy (NOT the repo itself)
+	rpath string
+
+	// Mutex controlling general access to the repo
+	mut sync.RWMutex
+
+	// Object for direct repo interaction
+	r vcs.Repo
+
+	// Whether or not the cache repo is in sync (think dvcs) with upstream
+	synced bool
+}
+
+func (r *repo) exportVersionTo(v Version, to string) error {
+	r.mut.Lock()
+	defer r.mut.Unlock()
+
+	// TODO(sdboyer) This is a dumb, slow approach, but we're punting on making
+	// these fast for now because git is the OVERWHELMING case (it's handled in
+	// its own method)
+	r.r.UpdateVersion(v.String())
+
+	cfg := &shutil.CopyTreeOptions{
+		Symlinks:     true,
+		CopyFunction: shutil.Copy,
+		Ignore: func(src string, contents []os.FileInfo) (ignore []string) {
+			for _, fi := range contents {
+				if !fi.IsDir() {
+					continue
+				}
+				n := fi.Name()
+				switch n {
+				case "vendor", ".bzr", ".svn", ".hg":
+					ignore = append(ignore, n)
+				}
+			}
+
+			return
+		},
+	}
+
+	return shutil.CopyTree(r.rpath, to, cfg)
+}
+
+// This func copied from Masterminds/vcs so we can exec our own commands
+func mergeEnvLists(in, out []string) []string {
+NextVar:
+	for _, inkv := range in {
+		k := strings.SplitAfterN(inkv, "=", 2)[0]
+		for i, outkv := range out {
+			if strings.HasPrefix(outkv, k) {
+				out[i] = inkv
+				continue NextVar
+			}
+		}
+		out = append(out, inkv)
+	}
+	return out
+}
+
+func stripVendor(path string, info os.FileInfo, err error) error {
+	if info.Name() == "vendor" {
+		if _, err := os.Lstat(path); err == nil {
+			if info.IsDir() {
+				return removeAll(path)
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/sdboyer/gps/version.go b/vendor/github.com/sdboyer/gps/version.go
index 57d37ec..ad79bff 100644
--- a/vendor/github.com/sdboyer/gps/version.go
+++ b/vendor/github.com/sdboyer/gps/version.go
@@ -16,6 +16,7 @@
 // hiding behind the interface.
 type Version interface {
 	Constraint
+
 	// Indicates the type of version - Revision, Branch, Version, or Semver
 	Type() string
 }
@@ -24,8 +25,15 @@
 // underlying Revision.
 type PairedVersion interface {
 	Version
+
 	// Underlying returns the immutable Revision that identifies this Version.
 	Underlying() Revision
+
+	// Unpair returns the surface-level UnpairedVersion that half of the pair.
+	//
+	// It does NOT modify the original PairedVersion
+	Unpair() UnpairedVersion
+
 	// Ensures it is impossible to be both a PairedVersion and an
 	// UnpairedVersion
 	_pair(int)
@@ -380,6 +388,10 @@
 	return v.r
 }
 
+func (v versionPair) Unpair() UnpairedVersion {
+	return v.v
+}
+
 func (v versionPair) Matches(v2 Version) bool {
 	switch tv2 := v2.(type) {
 	case versionTypeUnion:
@@ -470,8 +482,6 @@
 			return 0
 		case branchVersion, plainVersion, semVersion:
 			return 1
-		default:
-			panic("unknown version type")
 		}
 	case branchVersion:
 		switch r.(type) {
@@ -481,8 +491,6 @@
 			return 0
 		case plainVersion, semVersion:
 			return 1
-		default:
-			panic("unknown version type")
 		}
 
 	case plainVersion:
@@ -493,8 +501,6 @@
 			return 0
 		case semVersion:
 			return 1
-		default:
-			panic("unknown version type")
 		}
 
 	case semVersion:
@@ -503,10 +509,7 @@
 			return -1
 		case semVersion:
 			return 0
-		default:
-			panic("unknown version type")
 		}
-	default:
-		panic("unknown version type")
 	}
+	panic("unknown version type")
 }
diff --git a/vendor/github.com/sdboyer/gps/version_queue.go b/vendor/github.com/sdboyer/gps/version_queue.go
index e74a1da..7c92253 100644
--- a/vendor/github.com/sdboyer/gps/version_queue.go
+++ b/vendor/github.com/sdboyer/gps/version_queue.go
@@ -40,7 +40,7 @@
 
 	if len(vq.pi) == 0 {
 		var err error
-		vq.pi, err = vq.b.listVersions(vq.id)
+		vq.pi, err = vq.b.ListVersions(vq.id)
 		if err != nil {
 			// TODO(sdboyer) pushing this error this early entails that we
 			// unconditionally deep scan (e.g. vendor), as well as hitting the
@@ -86,7 +86,7 @@
 		}
 
 		vq.allLoaded = true
-		vq.pi, err = vq.b.listVersions(vq.id)
+		vq.pi, err = vq.b.ListVersions(vq.id)
 		if err != nil {
 			return err
 		}