Update to v0.12.0 of gps
diff --git a/cfg/config.go b/cfg/config.go
index 82579e0..2ad944c 100644
--- a/cfg/config.go
+++ b/cfg/config.go
@@ -162,25 +162,22 @@
 
 // DependencyConstraints lists all the non-test dependency constraints
 // described in a glide manifest in a way gps will understand.
-func (c *Config) DependencyConstraints() []gps.ProjectConstraint {
+func (c *Config) DependencyConstraints() gps.ProjectConstraints {
 	return gpsifyDeps(c.Imports)
 }
 
 // TestDependencyConstraints lists all the test dependency constraints described
 // in a glide manifest in a way gps will understand.
-func (c *Config) TestDependencyConstraints() []gps.ProjectConstraint {
+func (c *Config) TestDependencyConstraints() gps.ProjectConstraints {
 	return gpsifyDeps(c.DevImports)
 }
 
-func gpsifyDeps(deps Dependencies) []gps.ProjectConstraint {
-	cp := make([]gps.ProjectConstraint, len(deps))
-	for k, d := range deps {
-		cp[k] = gps.ProjectConstraint{
-			Ident: gps.ProjectIdentifier{
-				ProjectRoot: gps.ProjectRoot(d.Name),
-				NetworkName: d.Repository,
-			},
-			Constraint: d.GetConstraint(),
+func gpsifyDeps(deps Dependencies) gps.ProjectConstraints {
+	cp := make(gps.ProjectConstraints, len(deps))
+	for _, d := range deps {
+		cp[gps.ProjectRoot(d.Name)] = gps.ProjectProperties{
+			NetworkName: d.Repository,
+			Constraint:  d.GetConstraint(),
 		}
 	}
 
@@ -241,7 +238,6 @@
 
 // DeDupe consolidates duplicate dependencies on a Config instance
 func (c *Config) DeDupe() error {
-
 	// Remove duplicates in the imports
 	var err error
 	c.Imports, err = c.Imports.DeDupe()
diff --git a/glide.lock b/glide.lock
index 7979eed..096ee86 100644
--- a/glide.lock
+++ b/glide.lock
@@ -15,7 +15,7 @@
   revision: fbe9fb6ad5b5f35b3e82a7c21123cfc526cbf895
 - name: github.com/sdboyer/gps
   branch: master
-  revision: 44255835bcf52ec1dfacf207dbbb4c1bffe378d0
+  revision: 9ca61cb4e9851c80bb537e7d8e1be56e18e03cc9
 - name: github.com/termie/go-shutil
   revision: bcacb06fecaeec8dc42af03c87c6949f4a05c74c
 - name: gopkg.in/yaml.v2
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/bad/bad.go b/vendor/github.com/sdboyer/gps/_testdata/src/bad/bad.go
new file mode 100644
index 0000000..a1a3d1a
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/bad/bad.go
@@ -0,0 +1,2 @@
+// This ill-formed Go source file is here to ensure the tool is robust
+// against bad packages in the workspace.
diff --git a/vendor/github.com/sdboyer/gps/analysis.go b/vendor/github.com/sdboyer/gps/analysis.go
index d410eb3..1fe0546 100644
--- a/vendor/github.com/sdboyer/gps/analysis.go
+++ b/vendor/github.com/sdboyer/gps/analysis.go
@@ -4,6 +4,7 @@
 	"bytes"
 	"fmt"
 	"go/build"
+	gscan "go/scanner"
 	"io"
 	"io/ioutil"
 	"os"
@@ -45,7 +46,8 @@
 	stdlib["C"] = true
 }
 
-// listPackages lists info for all packages at or below the provided fileRoot.
+// ListPackages reports Go package information about all directories in the tree
+// at or below the provided fileRoot.
 //
 // Directories without any valid Go files are excluded. Directories with
 // multiple packages are excluded.
@@ -63,8 +65,8 @@
 //  importRoot = "github.com/foo/bar"
 //
 // then the root package at path/to/repo will be ascribed import path
-// "github.com/foo/bar", and its subpackage "baz" will be
-// "github.com/foo/bar/baz".
+// "github.com/foo/bar", and the package at
+// "/home/user/workspace/path/to/repo/baz" will be "github.com/foo/bar/baz".
 //
 // A PackageTree is returned, which contains the ImportRoot and map of import path
 // to PackageOrErr - each path under the root that exists will have either a
@@ -164,6 +166,12 @@
 			pkg = happy(ip, p)
 		} else {
 			switch terr := err.(type) {
+			case gscan.ErrorList, *gscan.Error:
+				// This happens if we encounter malformed Go source code
+				ptree.Packages[ip] = PackageOrErr{
+					Err: err,
+				}
+				return nil
 			case *build.NoGoError:
 				ptree.Packages[ip] = PackageOrErr{
 					Err: err,
diff --git a/vendor/github.com/sdboyer/gps/analysis_test.go b/vendor/github.com/sdboyer/gps/analysis_test.go
index c21f53b..06076ab 100644
--- a/vendor/github.com/sdboyer/gps/analysis_test.go
+++ b/vendor/github.com/sdboyer/gps/analysis_test.go
@@ -3,6 +3,8 @@
 import (
 	"fmt"
 	"go/build"
+	"go/scanner"
+	"go/token"
 	"os"
 	"path/filepath"
 	"reflect"
@@ -225,8 +227,8 @@
 
 func TestListPackages(t *testing.T) {
 	srcdir := filepath.Join(getwd(t), "_testdata", "src")
-	j := func(s string) string {
-		return filepath.Join(srcdir, s)
+	j := func(s ...string) string {
+		return filepath.Join(srcdir, filepath.Join(s...))
 	}
 
 	table := map[string]struct {
@@ -458,6 +460,28 @@
 				},
 			},
 		},
+		"malformed go file": {
+			fileRoot:   j("bad"),
+			importRoot: "bad",
+			out: PackageTree{
+				ImportRoot: "bad",
+				Packages: map[string]PackageOrErr{
+					"bad": {
+						Err: scanner.ErrorList{
+							&scanner.Error{
+								Pos: token.Position{
+									Filename: j("bad", "bad.go"),
+									Offset:   113,
+									Line:     2,
+									Column:   43,
+								},
+								Msg: "expected 'package', found 'EOF'",
+							},
+						},
+					},
+				},
+			},
+		},
 		"two nested under empty root": {
 			fileRoot:   j("ren"),
 			importRoot: "ren",
diff --git a/vendor/github.com/sdboyer/gps/bridge.go b/vendor/github.com/sdboyer/gps/bridge.go
index 379cd4b..ab9101f 100644
--- a/vendor/github.com/sdboyer/gps/bridge.go
+++ b/vendor/github.com/sdboyer/gps/bridge.go
@@ -70,7 +70,11 @@
 	if id.ProjectRoot == ProjectRoot(b.s.rpt.ImportRoot) {
 		return b.s.rm, b.s.rl, nil
 	}
-	return b.sm.GetManifestAndLock(id, v)
+
+	b.s.mtr.push("b-gmal")
+	m, l, e := b.sm.GetManifestAndLock(id, v)
+	b.s.mtr.pop()
+	return m, l, e
 }
 
 func (b *bridge) AnalyzerInfo() (string, *semver.Version) {
@@ -82,9 +86,11 @@
 		return vl, nil
 	}
 
+	b.s.mtr.push("b-list-versions")
 	vl, err := b.sm.ListVersions(id)
 	// TODO(sdboyer) cache errors, too?
 	if err != nil {
+		b.s.mtr.pop()
 		return nil, err
 	}
 
@@ -95,15 +101,22 @@
 	}
 
 	b.vlists[id] = vl
+	b.s.mtr.pop()
 	return vl, nil
 }
 
 func (b *bridge) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
-	return b.sm.RevisionPresentIn(id, r)
+	b.s.mtr.push("b-rev-present-in")
+	i, e := b.sm.RevisionPresentIn(id, r)
+	b.s.mtr.pop()
+	return i, e
 }
 
 func (b *bridge) SourceExists(id ProjectIdentifier) (bool, error) {
-	return b.sm.SourceExists(id)
+	b.s.mtr.push("b-source-exists")
+	i, e := b.sm.SourceExists(id)
+	b.s.mtr.pop()
+	return i, e
 }
 
 func (b *bridge) vendorCodeExists(id ProjectIdentifier) (bool, error) {
@@ -123,15 +136,18 @@
 		return nil
 	}
 
+	b.s.mtr.push("b-pair-version")
 	// doing it like this is a bit sloppy
 	for _, v2 := range vl {
 		if p, ok := v2.(PairedVersion); ok {
 			if p.Matches(v) {
+				b.s.mtr.pop()
 				return p
 			}
 		}
 	}
 
+	b.s.mtr.pop()
 	return nil
 }
 
@@ -141,6 +157,7 @@
 		return nil
 	}
 
+	b.s.mtr.push("b-pair-rev")
 	p := []Version{r}
 	// doing it like this is a bit sloppy
 	for _, v2 := range vl {
@@ -151,6 +168,7 @@
 		}
 	}
 
+	b.s.mtr.pop()
 	return p
 }
 
@@ -158,112 +176,25 @@
 // constraint. If that basic check fails and the provided version is incomplete
 // (e.g. an unpaired version or bare revision), it will attempt to gather more
 // information on one or the other and re-perform the comparison.
-func (b *bridge) matches(id ProjectIdentifier, c2 Constraint, v Version) bool {
-	if c2.Matches(v) {
+func (b *bridge) matches(id ProjectIdentifier, c Constraint, v Version) bool {
+	if c.Matches(v) {
 		return true
 	}
 
-	// There's a wide field of possible ways that pairing might result in a
-	// match. For each possible type of version, start by carving out all the
-	// cases where the constraint would have provided an authoritative match
-	// result.
-	switch tv := v.(type) {
-	case PairedVersion:
-		switch tc := c2.(type) {
-		case PairedVersion, Revision, noneConstraint:
-			// These three would all have been authoritative matches
-			return false
-		case UnpairedVersion:
-			// Only way paired and unpaired could match is if they share an
-			// underlying rev
-			pv := b.pairVersion(id, tc)
-			if pv == nil {
-				return false
-			}
-			return pv.Matches(v)
-		case semverConstraint:
-			// Have to check all the possible versions for that rev to see if
-			// any match the semver constraint
-			for _, pv := range b.pairRevision(id, tv.Underlying()) {
-				if tc.Matches(pv) {
-					return true
-				}
-			}
-			return false
-		}
+	b.s.mtr.push("b-matches")
+	// This approach is slightly wasteful, but just SO much less verbose, and
+	// more easily understood.
+	vtu := b.vtu(id, v)
 
-	case Revision:
-		switch tc := c2.(type) {
-		case PairedVersion, Revision, noneConstraint:
-			// These three would all have been authoritative matches
-			return false
-		case UnpairedVersion:
-			// Only way paired and unpaired could match is if they share an
-			// underlying rev
-			pv := b.pairVersion(id, tc)
-			if pv == nil {
-				return false
-			}
-			return pv.Matches(v)
-		case semverConstraint:
-			// Have to check all the possible versions for the rev to see if
-			// any match the semver constraint
-			for _, pv := range b.pairRevision(id, tv) {
-				if tc.Matches(pv) {
-					return true
-				}
-			}
-			return false
-		}
-
-	// UnpairedVersion as input has the most weird cases. It's also the one
-	// we'll probably see the least
-	case UnpairedVersion:
-		switch tc := c2.(type) {
-		case noneConstraint:
-			// obviously
-			return false
-		case Revision, PairedVersion:
-			// Easy case for both - just pair the uv and see if it matches the revision
-			// constraint
-			pv := b.pairVersion(id, tv)
-			if pv == nil {
-				return false
-			}
-			return tc.Matches(pv)
-		case UnpairedVersion:
-			// Both are unpaired versions. See if they share an underlying rev.
-			pv := b.pairVersion(id, tv)
-			if pv == nil {
-				return false
-			}
-
-			pc := b.pairVersion(id, tc)
-			if pc == nil {
-				return false
-			}
-			return pc.Matches(pv)
-
-		case semverConstraint:
-			// semverConstraint can't ever match a rev, but we do need to check
-			// if any other versions corresponding to this rev work.
-			pv := b.pairVersion(id, tv)
-			if pv == nil {
-				return false
-			}
-
-			for _, ttv := range b.pairRevision(id, pv.Underlying()) {
-				if c2.Matches(ttv) {
-					return true
-				}
-			}
-			return false
-		}
-	default:
-		panic("unreachable")
+	var uc Constraint
+	if cv, ok := c.(Version); ok {
+		uc = b.vtu(id, cv)
+	} else {
+		uc = c
 	}
 
-	return false
+	b.s.mtr.pop()
+	return uc.Matches(vtu)
 }
 
 // matchesAny is the authoritative version of Constraint.MatchesAny.
@@ -272,6 +203,7 @@
 		return true
 	}
 
+	b.s.mtr.push("b-matches-any")
 	// This approach is slightly wasteful, but just SO much less verbose, and
 	// more easily understood.
 	var uc1, uc2 Constraint
@@ -287,6 +219,7 @@
 		uc2 = c2
 	}
 
+	b.s.mtr.pop()
 	return uc1.MatchesAny(uc2)
 }
 
@@ -297,6 +230,7 @@
 		return rc
 	}
 
+	b.s.mtr.push("b-intersect")
 	// This approach is slightly wasteful, but just SO much less verbose, and
 	// more easily understood.
 	var uc1, uc2 Constraint
@@ -312,6 +246,7 @@
 		uc2 = c2
 	}
 
+	b.s.mtr.pop()
 	return uc1.Intersect(uc2)
 }
 
@@ -348,11 +283,13 @@
 		panic("should never call ListPackages on root project")
 	}
 
-	return b.sm.ListPackages(id, v)
+	b.s.mtr.push("b-list-pkgs")
+	pt, err := b.sm.ListPackages(id, v)
+	b.s.mtr.pop()
+	return pt, err
 }
 
 func (b *bridge) ExportProject(id ProjectIdentifier, v Version, path string) error {
-	//return b.sm.ExportProject(id, v, path)
 	panic("bridge should never be used to ExportProject")
 }
 
@@ -370,11 +307,14 @@
 }
 
 func (b *bridge) DeduceProjectRoot(ip string) (ProjectRoot, error) {
-	return b.sm.DeduceProjectRoot(ip)
+	b.s.mtr.push("b-deduce-proj-root")
+	pr, e := b.sm.DeduceProjectRoot(ip)
+	b.s.mtr.pop()
+	return pr, e
 }
 
 // breakLock is called when the solver has to break a version recorded in the
-// lock file. It prefetches all the projects in the solver's lock , so that the
+// lock file. It prefetches all the projects in the solver's lock, so that the
 // information is already on hand if/when the solver needs it.
 //
 // Projects that have already been selected are skipped, as it's generally unlikely that the
@@ -389,9 +329,6 @@
 
 	for _, lp := range b.s.rl.Projects() {
 		if _, is := b.s.sel.selected(lp.pi); !is {
-			// ListPackages guarantees that all the necessary network work will
-			// be done, so go with that
-			//
 			// TODO(sdboyer) use this as an opportunity to detect
 			// inconsistencies between upstream and the lock (e.g., moved tags)?
 			pi, v := lp.pi, lp.Version()
@@ -407,6 +344,8 @@
 }
 
 func (b *bridge) SyncSourceFor(id ProjectIdentifier) error {
+	// we don't track metrics here b/c this is often called in its own goroutine
+	// by the solver, and the metrics design is for wall time on a single thread
 	return b.sm.SyncSourceFor(id)
 }
 
@@ -431,14 +370,14 @@
 // This should generally not be called, but is required for the interface. If it
 // is called, we have a bigger problem (the type has escaped the solver); thus,
 // panic.
-func (av versionTypeUnion) String() string {
+func (vtu versionTypeUnion) String() string {
 	panic("versionTypeUnion should never be turned into a string; it is solver internal-only")
 }
 
 // This should generally not be called, but is required for the interface. If it
 // is called, we have a bigger problem (the type has escaped the solver); thus,
 // panic.
-func (av versionTypeUnion) Type() string {
+func (vtu versionTypeUnion) Type() string {
 	panic("versionTypeUnion should never need to answer a Type() call; it is solver internal-only")
 }
 
@@ -446,12 +385,12 @@
 // contained in the union.
 //
 // This DOES allow tags to match branches, albeit indirectly through a revision.
-func (av versionTypeUnion) Matches(v Version) bool {
-	av2, oav := v.(versionTypeUnion)
+func (vtu versionTypeUnion) Matches(v Version) bool {
+	vtu2, otherIs := v.(versionTypeUnion)
 
-	for _, v1 := range av {
-		if oav {
-			for _, v2 := range av2 {
+	for _, v1 := range vtu {
+		if otherIs {
+			for _, v2 := range vtu2 {
 				if v1.Matches(v2) {
 					return true
 				}
@@ -467,12 +406,12 @@
 // MatchesAny returns true if any of the contained versions (which are also
 // constraints) in the union successfully MatchAny with the provided
 // constraint.
-func (av versionTypeUnion) MatchesAny(c Constraint) bool {
-	av2, oav := c.(versionTypeUnion)
+func (vtu versionTypeUnion) MatchesAny(c Constraint) bool {
+	vtu2, otherIs := c.(versionTypeUnion)
 
-	for _, v1 := range av {
-		if oav {
-			for _, v2 := range av2 {
+	for _, v1 := range vtu {
+		if otherIs {
+			for _, v2 := range vtu2 {
 				if v1.MatchesAny(v2) {
 					return true
 				}
@@ -492,12 +431,12 @@
 // In order to avoid weird version floating elsewhere in the solver, the union
 // always returns the input constraint. (This is probably obviously correct, but
 // is still worth noting.)
-func (av versionTypeUnion) Intersect(c Constraint) Constraint {
-	av2, oav := c.(versionTypeUnion)
+func (vtu versionTypeUnion) Intersect(c Constraint) Constraint {
+	vtu2, otherIs := c.(versionTypeUnion)
 
-	for _, v1 := range av {
-		if oav {
-			for _, v2 := range av2 {
+	for _, v1 := range vtu {
+		if otherIs {
+			for _, v2 := range vtu2 {
 				if rc := v1.Intersect(v2); rc != none {
 					return rc
 				}
@@ -510,4 +449,4 @@
 	return none
 }
 
-func (av versionTypeUnion) _private() {}
+func (vtu versionTypeUnion) _private() {}
diff --git a/vendor/github.com/sdboyer/gps/constraint_test.go b/vendor/github.com/sdboyer/gps/constraint_test.go
index 3863e65..6ee1390 100644
--- a/vendor/github.com/sdboyer/gps/constraint_test.go
+++ b/vendor/github.com/sdboyer/gps/constraint_test.go
@@ -683,6 +683,7 @@
 	v5 := NewVersion("v2.0.5").Is(Revision("notamatch"))
 
 	uv1 := versionTypeUnion{v1, v4, rev}
+	uv2 := versionTypeUnion{v2, v3}
 
 	if uv1.MatchesAny(none) {
 		t.Errorf("Union can't match none")
@@ -727,6 +728,10 @@
 		t.Errorf("Union should not reverse-match on anything in disjoint pair")
 	}
 
+	if !uv1.Matches(uv2) {
+		t.Errorf("Union should succeed on matching comparison to other union with some overlap")
+	}
+
 	// MatchesAny - repeat Matches for safety, but add more, too
 	if !uv1.MatchesAny(v4) {
 		t.Errorf("Union should match on branch to branch")
@@ -772,6 +777,10 @@
 		t.Errorf("Union should have no overlap with ~2.0.0 semver range")
 	}
 
+	if !uv1.MatchesAny(uv2) {
+		t.Errorf("Union should succeed on MatchAny against other union with some overlap")
+	}
+
 	// Intersect - repeat all previous
 	if uv1.Intersect(v4) != v4 {
 		t.Errorf("Union intersection on contained version should return that version")
@@ -814,4 +823,28 @@
 	if c2.Intersect(uv1) != none {
 		t.Errorf("Union reverse-intersecting with non-overlapping semver range should return none, got %s", uv1.Intersect(c2))
 	}
+
+	if uv1.Intersect(uv2) != rev {
+		t.Errorf("Unions should intersect down to rev, but got %s", uv1.Intersect(uv2))
+	}
+}
+
+func TestVersionUnionPanicOnType(t *testing.T) {
+	// versionTypeUnions need to panic if Type() gets called
+	defer func() {
+		if err := recover(); err == nil {
+			t.Error("versionTypeUnion did not panic on Type() call")
+		}
+	}()
+	_ = versionTypeUnion{}.Type()
+}
+
+func TestVersionUnionPanicOnString(t *testing.T) {
+	// versionStringUnions need to panic if String() gets called
+	defer func() {
+		if err := recover(); err == nil {
+			t.Error("versionStringUnion did not panic on String() call")
+		}
+	}()
+	_ = versionTypeUnion{}.String()
 }
diff --git a/vendor/github.com/sdboyer/gps/constraints.go b/vendor/github.com/sdboyer/gps/constraints.go
index cf1b484..38b5d92 100644
--- a/vendor/github.com/sdboyer/gps/constraints.go
+++ b/vendor/github.com/sdboyer/gps/constraints.go
@@ -236,45 +236,87 @@
 	return pcs
 }
 
-// overrideAll treats the ProjectConstraints map as an override map, and applies
-// overridden values to the input.
+// merge pulls in all the constraints from other ProjectConstraints map(s),
+// merging them with the receiver into a new ProjectConstraints map.
+//
+// If duplicate ProjectRoots are encountered, the constraints are intersected
+// together and the latter's NetworkName, if non-empty, is taken.
+func (m ProjectConstraints) merge(other ...ProjectConstraints) (out ProjectConstraints) {
+	plen := len(m)
+	for _, pcm := range other {
+		plen += len(pcm)
+	}
+
+	out = make(ProjectConstraints, plen)
+	for pr, pp := range m {
+		out[pr] = pp
+	}
+
+	for _, pcm := range other {
+		for pr, pp := range pcm {
+			if rpp, exists := out[pr]; exists {
+				pp.Constraint = pp.Constraint.Intersect(rpp.Constraint)
+				if pp.NetworkName == "" {
+					pp.NetworkName = rpp.NetworkName
+				}
+			}
+			out[pr] = pp
+		}
+	}
+
+	return
+}
+
+// overrideAll treats the receiver ProjectConstraints map as a set of override
+// instructions, and applies overridden values to the ProjectConstraints.
 //
 // A slice of workingConstraint is returned, allowing differentiation between
 // values that were or were not overridden.
-func (m ProjectConstraints) overrideAll(in []ProjectConstraint) (out []workingConstraint) {
-	out = make([]workingConstraint, len(in))
+func (m ProjectConstraints) overrideAll(pcm ProjectConstraints) (out []workingConstraint) {
+	out = make([]workingConstraint, len(pcm))
 	k := 0
-	for _, pc := range in {
-		out[k] = m.override(pc)
+	for pr, pp := range pcm {
+		out[k] = m.override(pr, pp)
 		k++
 	}
 
+	sort.Stable(sortedWC(out))
 	return
 }
 
 // override replaces a single ProjectConstraint with a workingConstraint,
 // overriding its values if a corresponding entry exists in the
 // ProjectConstraints map.
-func (m ProjectConstraints) override(pc ProjectConstraint) workingConstraint {
+func (m ProjectConstraints) override(pr ProjectRoot, pp ProjectProperties) workingConstraint {
 	wc := workingConstraint{
-		Ident:      pc.Ident,
-		Constraint: pc.Constraint,
+		Ident: ProjectIdentifier{
+			ProjectRoot: pr,
+			NetworkName: pp.NetworkName,
+		},
+		Constraint: pp.Constraint,
 	}
 
-	if pp, has := m[pc.Ident.ProjectRoot]; has {
+	if opp, has := m[pr]; has {
 		// The rule for overrides is that *any* non-zero value for the prop
 		// should be considered an override, even if it's equal to what's
 		// already there.
-		if pp.Constraint != nil {
-			wc.Constraint = pp.Constraint
+		if opp.Constraint != nil {
+			wc.Constraint = opp.Constraint
 			wc.overrConstraint = true
 		}
 
-		if pp.NetworkName != "" {
-			wc.Ident.NetworkName = pp.NetworkName
+		// This may appear incorrect, because the solver encodes meaning into
+		// the empty string for NetworkName (it means that it would use the
+		// import path by default, but could be coerced into using an alternate
+		// URL). However, that 'coercion' can only happen if there's a
+		// disagreement between projects on where a dependency should be sourced
+		// from. Such disagreement is exactly what overrides preclude, so
+		// there's no need to preserve the meaning of "" here - thus, we can
+		// treat it as a zero value and ignore it, rather than applying it.
+		if opp.NetworkName != "" {
+			wc.Ident.NetworkName = opp.NetworkName
 			wc.overrNet = true
 		}
-
 	}
 
 	return wc
@@ -282,14 +324,12 @@
 
 type sortedConstraints []ProjectConstraint
 
-func (s sortedConstraints) Len() int {
-	return len(s)
-}
+func (s sortedConstraints) Len() int           { return len(s) }
+func (s sortedConstraints) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s sortedConstraints) Less(i, j int) bool { return s[i].Ident.less(s[j].Ident) }
 
-func (s sortedConstraints) Swap(i, j int) {
-	s[i], s[j] = s[j], s[i]
-}
+type sortedWC []workingConstraint
 
-func (s sortedConstraints) Less(i, j int) bool {
-	return s[i].Ident.less(s[j].Ident)
-}
+func (s sortedWC) Len() int           { return len(s) }
+func (s sortedWC) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s sortedWC) Less(i, j int) bool { return s[i].Ident.less(s[j].Ident) }
diff --git a/vendor/github.com/sdboyer/gps/deduce.go b/vendor/github.com/sdboyer/gps/deduce.go
index 25dc93d..1e5bac4 100644
--- a/vendor/github.com/sdboyer/gps/deduce.go
+++ b/vendor/github.com/sdboyer/gps/deduce.go
@@ -7,6 +7,7 @@
 	"net/url"
 	"path"
 	"regexp"
+	"strconv"
 	"strings"
 )
 
@@ -256,7 +257,7 @@
 
 	// Putting a scheme on gopkg.in would be really weird, disallow it
 	if u.Scheme != "" {
-		return nil, fmt.Errorf("Specifying alternate schemes on gopkg.in imports is not permitted")
+		return nil, fmt.Errorf("specifying alternate schemes on gopkg.in imports is not permitted")
 	}
 
 	// gopkg.in is always backed by github
@@ -267,6 +268,11 @@
 	} else {
 		u.Path = path.Join(v[2], v[3])
 	}
+	major, err := strconv.ParseInt(v[4][1:], 10, 64)
+	if err != nil {
+		// this should only be reachable if there's an error in the regex
+		return nil, fmt.Errorf("could not parse %q as a gopkg.in major version", v[4][1:])
+	}
 
 	mb := make(maybeSources, len(gitSchemes))
 	for k, scheme := range gitSchemes {
@@ -275,7 +281,11 @@
 			u2.User = url.User("git")
 		}
 		u2.Scheme = scheme
-		mb[k] = maybeGitSource{url: &u2}
+		mb[k] = maybeGopkginSource{
+			opath: v[1],
+			url:   &u2,
+			major: major,
+		}
 	}
 
 	return mb, nil
diff --git a/vendor/github.com/sdboyer/gps/deduce_test.go b/vendor/github.com/sdboyer/gps/deduce_test.go
index 23ffe38..71b44e5 100644
--- a/vendor/github.com/sdboyer/gps/deduce_test.go
+++ b/vendor/github.com/sdboyer/gps/deduce_test.go
@@ -111,60 +111,60 @@
 			in:   "gopkg.in/sdboyer/gps.v0",
 			root: "gopkg.in/sdboyer/gps.v0",
 			mb: maybeSources{
-				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
-				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
-				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
-				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("https://github.com/sdboyer/gps"), major: 0},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("ssh://git@github.com/sdboyer/gps"), major: 0},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("git://github.com/sdboyer/gps"), major: 0},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("http://github.com/sdboyer/gps"), major: 0},
 			},
 		},
 		{
 			in:   "gopkg.in/sdboyer/gps.v0/foo",
 			root: "gopkg.in/sdboyer/gps.v0",
 			mb: maybeSources{
-				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
-				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
-				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
-				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("https://github.com/sdboyer/gps"), major: 0},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("ssh://git@github.com/sdboyer/gps"), major: 0},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("git://github.com/sdboyer/gps"), major: 0},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v0", url: mkurl("http://github.com/sdboyer/gps"), major: 0},
 			},
 		},
 		{
 			in:   "gopkg.in/sdboyer/gps.v1/foo/bar",
 			root: "gopkg.in/sdboyer/gps.v1",
 			mb: maybeSources{
-				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
-				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
-				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
-				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v1", url: mkurl("https://github.com/sdboyer/gps"), major: 1},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v1", url: mkurl("ssh://git@github.com/sdboyer/gps"), major: 1},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v1", url: mkurl("git://github.com/sdboyer/gps"), major: 1},
+				maybeGopkginSource{opath: "gopkg.in/sdboyer/gps.v1", url: mkurl("http://github.com/sdboyer/gps"), major: 1},
 			},
 		},
 		{
 			in:   "gopkg.in/yaml.v1",
 			root: "gopkg.in/yaml.v1",
 			mb: maybeSources{
-				maybeGitSource{url: mkurl("https://github.com/go-yaml/yaml")},
-				maybeGitSource{url: mkurl("ssh://git@github.com/go-yaml/yaml")},
-				maybeGitSource{url: mkurl("git://github.com/go-yaml/yaml")},
-				maybeGitSource{url: mkurl("http://github.com/go-yaml/yaml")},
+				maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("https://github.com/go-yaml/yaml"), major: 1},
+				maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("ssh://git@github.com/go-yaml/yaml"), major: 1},
+				maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("git://github.com/go-yaml/yaml"), major: 1},
+				maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("http://github.com/go-yaml/yaml"), major: 1},
 			},
 		},
 		{
 			in:   "gopkg.in/yaml.v1/foo/bar",
 			root: "gopkg.in/yaml.v1",
 			mb: maybeSources{
-				maybeGitSource{url: mkurl("https://github.com/go-yaml/yaml")},
-				maybeGitSource{url: mkurl("ssh://git@github.com/go-yaml/yaml")},
-				maybeGitSource{url: mkurl("git://github.com/go-yaml/yaml")},
-				maybeGitSource{url: mkurl("http://github.com/go-yaml/yaml")},
+				maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("https://github.com/go-yaml/yaml"), major: 1},
+				maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("ssh://git@github.com/go-yaml/yaml"), major: 1},
+				maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("git://github.com/go-yaml/yaml"), major: 1},
+				maybeGopkginSource{opath: "gopkg.in/yaml.v1", url: mkurl("http://github.com/go-yaml/yaml"), major: 1},
 			},
 		},
 		{
 			in:   "gopkg.in/inf.v0",
 			root: "gopkg.in/inf.v0",
 			mb: maybeSources{
-				maybeGitSource{url: mkurl("https://github.com/go-inf/inf")},
-				maybeGitSource{url: mkurl("ssh://git@github.com/go-inf/inf")},
-				maybeGitSource{url: mkurl("git://github.com/go-inf/inf")},
-				maybeGitSource{url: mkurl("http://github.com/go-inf/inf")},
+				maybeGopkginSource{opath: "gopkg.in/inf.v0", url: mkurl("https://github.com/go-inf/inf"), major: 0},
+				maybeGopkginSource{opath: "gopkg.in/inf.v0", url: mkurl("ssh://git@github.com/go-inf/inf"), major: 0},
+				maybeGopkginSource{opath: "gopkg.in/inf.v0", url: mkurl("git://github.com/go-inf/inf"), major: 0},
+				maybeGopkginSource{opath: "gopkg.in/inf.v0", url: mkurl("http://github.com/go-inf/inf"), major: 0},
 			},
 		},
 		{
@@ -456,11 +456,12 @@
 			root: "golang.org/x/exp",
 			mb:   maybeGitSource{url: mkurl("https://go.googlesource.com/exp")},
 		},
-		{
-			in:   "rsc.io/pdf",
-			root: "rsc.io/pdf",
-			mb:   maybeGitSource{url: mkurl("https://github.com/rsc/pdf")},
-		},
+		// rsc.io appears to have broken
+		//{
+		//in:   "rsc.io/pdf",
+		//root: "rsc.io/pdf",
+		//mb:   maybeGitSource{url: mkurl("https://github.com/rsc/pdf")},
+		//},
 	},
 }
 
@@ -505,6 +506,8 @@
 				return fmt.Sprintf("%T: %s", tmb, ufmt(tmb.url))
 			case maybeHgSource:
 				return fmt.Sprintf("%T: %s", tmb, ufmt(tmb.url))
+			case maybeGopkginSource:
+				return fmt.Sprintf("%T: %s (v%v) %s ", tmb, tmb.opath, tmb.major, ufmt(tmb.url))
 			default:
 				t.Errorf("Unknown maybeSource type: %T", mb)
 				t.FailNow()
@@ -582,13 +585,13 @@
 				t.Errorf("(in: %s) Deducer did not return expected root:\n\t(GOT) %s\n\t(WNT) %s", fix.in, pr, fix.root)
 			}
 
-			_, srcf, err := sm.deducePathAndProcess(fix.in)
+			ft, err := sm.deducePathAndProcess(fix.in)
 			if err != nil {
 				t.Errorf("(in: %s) Unexpected err on deducing source: %s", fix.in, err)
 				return
 			}
 
-			_, ident, err := srcf()
+			_, ident, err := ft.srcf()
 			if err != nil {
 				t.Errorf("(in: %s) Unexpected err on executing source future: %s", fix.in, err)
 				return
diff --git a/vendor/github.com/sdboyer/gps/example.go b/vendor/github.com/sdboyer/gps/example.go
index 728439f..666dba5 100644
--- a/vendor/github.com/sdboyer/gps/example.go
+++ b/vendor/github.com/sdboyer/gps/example.go
@@ -22,10 +22,9 @@
 //  This will compile and work...and then blow away any vendor directory present
 //  in the cwd. Be careful!
 func main() {
-	// Operate on the current directory
+	// Assume the current directory is correctly placed on a GOPATH, and that it's the
+	// root of the project.
 	root, _ := os.Getwd()
-	// Assume the current directory is correctly placed on a GOPATH, and derive
-	// the ProjectRoot from it
 	srcprefix := filepath.Join(build.Default.GOPATH, "src") + string(filepath.Separator)
 	importroot := filepath.ToSlash(strings.TrimPrefix(root, srcprefix))
 
@@ -35,9 +34,10 @@
 		Trace:       true,
 		TraceLogger: log.New(os.Stdout, "", 0),
 	}
+	// Perform static analysis on the current project to find all of its imports.
 	params.RootPackageTree, _ = gps.ListPackages(root, importroot)
 
-	// Set up a SourceManager with the NaiveAnalyzer
+	// Set up a SourceManager. This manages interaction with sources (repositories).
 	sourcemgr, _ := gps.NewSourceManager(NaiveAnalyzer{}, ".repocache")
 	defer sourcemgr.Release()
 
@@ -54,14 +54,16 @@
 
 type NaiveAnalyzer struct{}
 
-// DeriveManifestAndLock gets called when the solver needs manifest/lock data
-// for a particular project (the gps.ProjectRoot parameter) at a particular
-// version. That version will be checked out in a directory rooted at path.
+// DeriveManifestAndLock is called when the solver needs manifest/lock data
+// for a particular dependency project (identified by the gps.ProjectRoot
+// parameter) at a particular version. That version will be checked out in a
+// directory rooted at path.
 func (a NaiveAnalyzer) DeriveManifestAndLock(path string, n gps.ProjectRoot) (gps.Manifest, gps.Lock, error) {
 	return nil, nil, nil
 }
 
-// Reports the name and version of the analyzer. This is mostly irrelevant.
+// Reports the name and version of the analyzer. This is used internally as part
+// of gps' hashing memoization scheme.
 func (a NaiveAnalyzer) Info() (name string, version *semver.Version) {
 	v, _ := semver.NewVersion("v0.0.1")
 	return "example-analyzer", v
diff --git a/vendor/github.com/sdboyer/gps/glide.lock b/vendor/github.com/sdboyer/gps/glide.lock
index ea36f4b..fa41844 100644
--- a/vendor/github.com/sdboyer/gps/glide.lock
+++ b/vendor/github.com/sdboyer/gps/glide.lock
@@ -11,7 +11,7 @@
   version: 0a2c9fc0eee2c4cbb9526877c4a54da047fdcadd
   vcs: git
 - name: github.com/Masterminds/vcs
-  version: 7a21de0acff824ccf45f633cc844a19625149c2f
+  version: fbe9fb6ad5b5f35b3e82a7c21123cfc526cbf895
   vcs: git
 - name: github.com/termie/go-shutil
   version: bcacb06fecaeec8dc42af03c87c6949f4a05c74c
diff --git a/vendor/github.com/sdboyer/gps/hash.go b/vendor/github.com/sdboyer/gps/hash.go
index acede5c..d3be411 100644
--- a/vendor/github.com/sdboyer/gps/hash.go
+++ b/vendor/github.com/sdboyer/gps/hash.go
@@ -1,6 +1,7 @@
 package gps
 
 import (
+	"bytes"
 	"crypto/sha256"
 	"sort"
 )
@@ -16,44 +17,43 @@
 //
 // (Basically, this is for memoization.)
 func (s *solver) HashInputs() []byte {
-	c, tc := s.rm.DependencyConstraints(), s.rm.TestDependencyConstraints()
 	// Apply overrides to the constraints from the root. Otherwise, the hash
 	// would be computed on the basis of a constraint from root that doesn't
 	// actually affect solving.
-	p := s.ovr.overrideAll(pcSliceToMap(c, tc).asSortedSlice())
+	p := s.ovr.overrideAll(s.rm.DependencyConstraints().merge(s.rm.TestDependencyConstraints()))
 
-	// We have everything we need; now, compute the hash.
-	h := sha256.New()
+	// Build up a buffer of all the inputs.
+	buf := new(bytes.Buffer)
 	for _, pd := range p {
-		h.Write([]byte(pd.Ident.ProjectRoot))
-		h.Write([]byte(pd.Ident.NetworkName))
+		buf.WriteString(string(pd.Ident.ProjectRoot))
+		buf.WriteString(pd.Ident.NetworkName)
 		// FIXME Constraint.String() is a surjective-only transformation - tags
 		// and branches with the same name are written out as the same string.
 		// This could, albeit rarely, result in input collisions when a real
 		// change has occurred.
-		h.Write([]byte(pd.Constraint.String()))
+		buf.WriteString(pd.Constraint.String())
 	}
 
 	// The stdlib and old appengine packages play the same functional role in
 	// solving as ignores. Because they change, albeit quite infrequently, we
 	// have to include them in the hash.
-	h.Write([]byte(stdlibPkgs))
-	h.Write([]byte(appenginePkgs))
+	buf.WriteString(stdlibPkgs)
+	buf.WriteString(appenginePkgs)
 
 	// Write each of the packages, or the errors that were found for a
 	// particular subpath, into the hash.
 	for _, perr := range s.rpt.Packages {
 		if perr.Err != nil {
-			h.Write([]byte(perr.Err.Error()))
+			buf.WriteString(perr.Err.Error())
 		} else {
-			h.Write([]byte(perr.P.Name))
-			h.Write([]byte(perr.P.CommentPath))
-			h.Write([]byte(perr.P.ImportPath))
+			buf.WriteString(perr.P.Name)
+			buf.WriteString(perr.P.CommentPath)
+			buf.WriteString(perr.P.ImportPath)
 			for _, imp := range perr.P.Imports {
-				h.Write([]byte(imp))
+				buf.WriteString(imp)
 			}
 			for _, imp := range perr.P.TestImports {
-				h.Write([]byte(imp))
+				buf.WriteString(imp)
 			}
 		}
 	}
@@ -70,23 +70,24 @@
 		sort.Strings(ig)
 
 		for _, igp := range ig {
-			h.Write([]byte(igp))
+			buf.WriteString(igp)
 		}
 	}
 
 	for _, pc := range s.ovr.asSortedSlice() {
-		h.Write([]byte(pc.Ident.ProjectRoot))
+		buf.WriteString(string(pc.Ident.ProjectRoot))
 		if pc.Ident.NetworkName != "" {
-			h.Write([]byte(pc.Ident.NetworkName))
+			buf.WriteString(pc.Ident.NetworkName)
 		}
 		if pc.Constraint != nil {
-			h.Write([]byte(pc.Constraint.String()))
+			buf.WriteString(pc.Constraint.String())
 		}
 	}
 
 	an, av := s.b.AnalyzerInfo()
-	h.Write([]byte(an))
-	h.Write([]byte(av.String()))
+	buf.WriteString(an)
+	buf.WriteString(av.String())
 
-	return h.Sum(nil)
+	hd := sha256.Sum256(buf.Bytes())
+	return hd[:]
 }
diff --git a/vendor/github.com/sdboyer/gps/lock.go b/vendor/github.com/sdboyer/gps/lock.go
index 729d501..fea5319 100644
--- a/vendor/github.com/sdboyer/gps/lock.go
+++ b/vendor/github.com/sdboyer/gps/lock.go
@@ -49,8 +49,9 @@
 	return l
 }
 
-// NewLockedProject creates a new LockedProject struct with a given name,
-// version, and upstream repository URL.
+// NewLockedProject creates a new LockedProject struct with a given
+// ProjectIdentifier (name and optional upstream source URL), version. and list
+// of packages required from the project.
 //
 // Note that passing a nil version will cause a panic. This is a correctness
 // measure to ensure that the solver is never exposed to a version-less lock
@@ -106,20 +107,17 @@
 	return lp.v.Is(lp.r)
 }
 
-func (lp LockedProject) toAtom() atom {
-	pa := atom{
-		id: lp.Ident(),
-	}
-
-	if lp.v == nil {
-		pa.v = lp.r
-	} else if lp.r != "" {
-		pa.v = lp.v.Is(lp.r)
-	} else {
-		pa.v = lp.v
-	}
-
-	return pa
+// Packages returns the list of packages from within the LockedProject that are
+// actually used in the import graph. Some caveats:
+//
+//  * The names given are relative to the root import path for the project. If
+//    the root package itself is imported, it's represented as ".".
+//  * Just because a package path isn't included in this list doesn't mean it's
+//    safe to remove - it could contain C files, or other assets, that can't be
+//    safely removed.
+//  * The slice is not a copy. If you need to modify it, copy it first.
+func (lp LockedProject) Packages() []string {
+	return lp.pkgs
 }
 
 type safeLock struct {
@@ -143,7 +141,10 @@
 func prepLock(l Lock) Lock {
 	pl := l.Projects()
 
-	rl := safeLock{h: l.InputHash()}
+	rl := safeLock{
+		h: l.InputHash(),
+		p: make([]LockedProject, len(pl)),
+	}
 	copy(rl.p, pl)
 
 	return rl
diff --git a/vendor/github.com/sdboyer/gps/manager_test.go b/vendor/github.com/sdboyer/gps/manager_test.go
index 0daaef9..0970c59 100644
--- a/vendor/github.com/sdboyer/gps/manager_test.go
+++ b/vendor/github.com/sdboyer/gps/manager_test.go
@@ -136,20 +136,23 @@
 		}
 	}()
 
-	id := mkPI("github.com/Masterminds/VCSTestRepo").normalize()
+	id := mkPI("github.com/sdboyer/gpkt").normalize()
 	v, err := sm.ListVersions(id)
 	if err != nil {
 		t.Errorf("Unexpected error during initial project setup/fetching %s", err)
 	}
 
-	if len(v) != 3 {
-		t.Errorf("Expected three version results from the test repo, got %v", len(v))
+	if len(v) != 7 {
+		t.Errorf("Expected seven version results from the test repo, got %v", len(v))
 	} else {
-		rev := Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")
 		expected := []Version{
-			NewVersion("1.0.0").Is(rev),
-			NewBranch("master").Is(rev),
-			NewBranch("test").Is(rev),
+			NewVersion("v2.0.0").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")),
+			NewVersion("v1.1.0").Is(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")),
+			NewVersion("v1.0.0").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")),
+			newDefaultBranch("master").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")),
+			NewBranch("v1").Is(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")),
+			NewBranch("v1.1").Is(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")),
+			NewBranch("v3").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")),
 		}
 
 		// SourceManager itself doesn't guarantee ordering; sort them here so we
@@ -161,13 +164,6 @@
 				t.Errorf("Expected version %s in position %v but got %s", e, k, v[k])
 			}
 		}
-
-		if !v[1].(versionPair).v.(branchVersion).isDefault {
-			t.Error("Expected master branch version to have isDefault flag, but it did not")
-		}
-		if v[2].(versionPair).v.(branchVersion).isDefault {
-			t.Error("Expected test branch version not to have isDefault flag, but it did")
-		}
 	}
 
 	// Two birds, one stone - make sure the internal ProjectManager vlist cache
@@ -176,7 +172,7 @@
 	smc := &bridge{
 		sm:     sm,
 		vlists: make(map[ProjectIdentifier][]Version),
-		s:      &solver{},
+		s:      &solver{mtr: newMetrics()},
 	}
 
 	v, err = smc.ListVersions(id)
@@ -184,14 +180,17 @@
 		t.Errorf("Unexpected error during initial project setup/fetching %s", err)
 	}
 
-	rev := Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")
-	if len(v) != 3 {
-		t.Errorf("Expected three version results from the test repo, got %v", len(v))
+	if len(v) != 7 {
+		t.Errorf("Expected seven version results from the test repo, got %v", len(v))
 	} else {
 		expected := []Version{
-			NewVersion("1.0.0").Is(rev),
-			NewBranch("master").Is(rev),
-			NewBranch("test").Is(rev),
+			NewVersion("v2.0.0").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")),
+			NewVersion("v1.1.0").Is(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")),
+			NewVersion("v1.0.0").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")),
+			newDefaultBranch("master").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")),
+			NewBranch("v1").Is(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")),
+			NewBranch("v1.1").Is(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")),
+			NewBranch("v3").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")),
 		}
 
 		for k, e := range expected {
@@ -200,15 +199,21 @@
 			}
 		}
 
-		if !v[1].(versionPair).v.(branchVersion).isDefault {
+		if !v[3].(versionPair).v.(branchVersion).isDefault {
 			t.Error("Expected master branch version to have isDefault flag, but it did not")
 		}
-		if v[2].(versionPair).v.(branchVersion).isDefault {
-			t.Error("Expected test branch version not to have isDefault flag, but it did")
+		if v[4].(versionPair).v.(branchVersion).isDefault {
+			t.Error("Expected v1 branch version not to have isDefault flag, but it did")
+		}
+		if v[5].(versionPair).v.(branchVersion).isDefault {
+			t.Error("Expected v1.1 branch version not to have isDefault flag, but it did")
+		}
+		if v[6].(versionPair).v.(branchVersion).isDefault {
+			t.Error("Expected v3 branch version not to have isDefault flag, but it did")
 		}
 	}
 
-	present, err := smc.RevisionPresentIn(id, rev)
+	present, err := smc.RevisionPresentIn(id, Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e"))
 	if err != nil {
 		t.Errorf("Should have found revision in source, but got err: %s", err)
 	} else if !present {
@@ -222,12 +227,12 @@
 	}
 
 	// Ensure that the appropriate cache dirs and files exist
-	_, err = os.Stat(filepath.Join(cpath, "sources", "https---github.com-Masterminds-VCSTestRepo", ".git"))
+	_, err = os.Stat(filepath.Join(cpath, "sources", "https---github.com-sdboyer-gpkt", ".git"))
 	if err != nil {
 		t.Error("Cache repo does not exist in expected location")
 	}
 
-	_, err = os.Stat(filepath.Join(cpath, "metadata", "github.com", "Masterminds", "VCSTestRepo", "cache.json"))
+	_, err = os.Stat(filepath.Join(cpath, "metadata", "github.com", "sdboyer", "gpkt", "cache.json"))
 	if err != nil {
 		// TODO(sdboyer) disabled until we get caching working
 		//t.Error("Metadata cache json file does not exist in expected location")
@@ -396,9 +401,9 @@
 
 	// setup done, now do the test
 
-	id := mkPI("github.com/Masterminds/VCSTestRepo").normalize()
+	id := mkPI("github.com/sdboyer/gpkt").normalize()
 
-	_, _, err := sm.GetManifestAndLock(id, NewVersion("1.0.0"))
+	_, _, err := sm.GetManifestAndLock(id, NewVersion("v1.0.0"))
 	if err != nil {
 		t.Errorf("Unexpected error from GetInfoAt %s", err)
 	}
@@ -408,8 +413,8 @@
 		t.Errorf("Unexpected error from ListVersions %s", err)
 	}
 
-	if len(v) != 3 {
-		t.Errorf("Expected three results from ListVersions, got %v", len(v))
+	if len(v) != 7 {
+		t.Errorf("Expected seven results from ListVersions, got %v", len(v))
 	}
 }
 
@@ -489,17 +494,18 @@
 	}
 }
 
-// Test that the future returned from SourceMgr.deducePathAndProcess() is safe
-// to call concurrently.
+// Test that the deduction performed in SourceMgr.deducePathAndProcess() is safe
+// for parallel execution - in particular, that parallel calls to the same
+// resource fold in together as expected.
 //
-// Obviously, this is just a heuristic; passage does not guarantee correctness
-// (though failure does guarantee incorrectness)
+// Obviously, this is just a heuristic; while failure means something's
+// definitely broken, success does not guarantee correctness.
 func TestMultiDeduceThreadsafe(t *testing.T) {
 	sm, clean := mkNaiveSM(t)
 	defer clean()
 
 	in := "github.com/sdboyer/gps"
-	rootf, srcf, err := sm.deducePathAndProcess(in)
+	ft, err := sm.deducePathAndProcess(in)
 	if err != nil {
 		t.Errorf("Known-good path %q had unexpected basic deduction error: %s", in, err)
 		t.FailNow()
@@ -518,7 +524,7 @@
 			}
 		}()
 		<-c
-		_, err := rootf()
+		_, err := ft.rootf()
 		if err != nil {
 			t.Errorf("err was non-nil on root detection in goroutine number %v: %s", rnum, err)
 		}
@@ -546,7 +552,7 @@
 			}
 		}()
 		<-c
-		_, _, err := srcf()
+		_, _, err := ft.srcf()
 		if err != nil {
 			t.Errorf("err was non-nil on root detection in goroutine number %v: %s", rnum, err)
 		}
@@ -563,3 +569,77 @@
 		t.Errorf("Sources map should have just two elements, but has %v", len(sm.srcs))
 	}
 }
+
+func TestMultiFetchThreadsafe(t *testing.T) {
+	// This test is quite slow, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping slow test in short mode")
+	}
+
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	projects := []ProjectIdentifier{
+		mkPI("github.com/sdboyer/gps"),
+		mkPI("github.com/sdboyer/gpkt"),
+		mkPI("github.com/sdboyer/gogl"),
+		mkPI("github.com/sdboyer/gliph"),
+		mkPI("github.com/sdboyer/frozone"),
+		mkPI("gopkg.in/sdboyer/gpkt.v1"),
+		mkPI("gopkg.in/sdboyer/gpkt.v2"),
+		mkPI("github.com/Masterminds/VCSTestRepo"),
+		mkPI("github.com/go-yaml/yaml"),
+		mkPI("github.com/Sirupsen/logrus"),
+		mkPI("github.com/Masterminds/semver"),
+		mkPI("github.com/Masterminds/vcs"),
+		//mkPI("bitbucket.org/sdboyer/withbm"),
+		//mkPI("bitbucket.org/sdboyer/nobm"),
+	}
+
+	// 40 gives us ten calls per op, per project, which is decently likely to
+	// reveal any underlying parallelism problems
+	cnum := len(projects) * 40
+	wg := &sync.WaitGroup{}
+
+	for i := 0; i < cnum; i++ {
+		wg.Add(1)
+
+		go func(id ProjectIdentifier, pass int) {
+			switch pass {
+			case 0:
+				t.Logf("Deducing root for %s", id.errString())
+				_, err := sm.DeduceProjectRoot(string(id.ProjectRoot))
+				if err != nil {
+					t.Errorf("err on deducing project root for %s: %s", id.errString(), err.Error())
+				}
+			case 1:
+				t.Logf("syncing %s", id)
+				err := sm.SyncSourceFor(id)
+				if err != nil {
+					t.Errorf("syncing failed for %s with err %s", id.errString(), err.Error())
+				}
+			case 2:
+				t.Logf("listing versions for %s", id)
+				_, err := sm.ListVersions(id)
+				if err != nil {
+					t.Errorf("listing versions failed for %s with err %s", id.errString(), err.Error())
+				}
+			case 3:
+				t.Logf("Checking source existence for %s", id)
+				y, err := sm.SourceExists(id)
+				if err != nil {
+					t.Errorf("err on checking source existence for %s: %s", id.errString(), err.Error())
+				}
+				if !y {
+					t.Errorf("claims %s source does not exist", id.errString())
+				}
+			default:
+				panic(fmt.Sprintf("wtf, %s %v", id, pass))
+			}
+			wg.Done()
+		}(projects[i%len(projects)], (i/len(projects))%4)
+
+		runtime.Gosched()
+	}
+	wg.Wait()
+}
diff --git a/vendor/github.com/sdboyer/gps/manifest.go b/vendor/github.com/sdboyer/gps/manifest.go
index ff23ec0..a95c666 100644
--- a/vendor/github.com/sdboyer/gps/manifest.go
+++ b/vendor/github.com/sdboyer/gps/manifest.go
@@ -15,13 +15,13 @@
 // See the gps docs for more information: https://github.com/sdboyer/gps/wiki
 type Manifest interface {
 	// Returns a list of project-level constraints.
-	DependencyConstraints() []ProjectConstraint
+	DependencyConstraints() ProjectConstraints
 
 	// Returns a list of constraints applicable to test imports.
 	//
 	// These are applied only when tests are incorporated. Typically, that
 	// will only be for root manifests.
-	TestDependencyConstraints() []ProjectConstraint
+	TestDependencyConstraints() ProjectConstraints
 }
 
 // RootManifest extends Manifest to add special controls over solving that are
@@ -51,19 +51,18 @@
 // the fly for projects with no manifest metadata, or metadata through a foreign
 // tool's idioms.
 type SimpleManifest struct {
-	Deps     []ProjectConstraint
-	TestDeps []ProjectConstraint
+	Deps, TestDeps ProjectConstraints
 }
 
 var _ Manifest = SimpleManifest{}
 
 // DependencyConstraints returns the project's dependencies.
-func (m SimpleManifest) DependencyConstraints() []ProjectConstraint {
+func (m SimpleManifest) DependencyConstraints() ProjectConstraints {
 	return m.Deps
 }
 
 // TestDependencyConstraints returns the project's test dependencies.
-func (m SimpleManifest) TestDependencyConstraints() []ProjectConstraint {
+func (m SimpleManifest) TestDependencyConstraints() ProjectConstraints {
 	return m.TestDeps
 }
 
@@ -72,16 +71,14 @@
 //
 // Also, for tests.
 type simpleRootManifest struct {
-	c   []ProjectConstraint
-	tc  []ProjectConstraint
-	ovr ProjectConstraints
-	ig  map[string]bool
+	c, tc, ovr ProjectConstraints
+	ig         map[string]bool
 }
 
-func (m simpleRootManifest) DependencyConstraints() []ProjectConstraint {
+func (m simpleRootManifest) DependencyConstraints() ProjectConstraints {
 	return m.c
 }
-func (m simpleRootManifest) TestDependencyConstraints() []ProjectConstraint {
+func (m simpleRootManifest) TestDependencyConstraints() ProjectConstraints {
 	return m.tc
 }
 func (m simpleRootManifest) Overrides() ProjectConstraints {
@@ -92,15 +89,18 @@
 }
 func (m simpleRootManifest) dup() simpleRootManifest {
 	m2 := simpleRootManifest{
-		c:   make([]ProjectConstraint, len(m.c)),
-		tc:  make([]ProjectConstraint, len(m.tc)),
-		ovr: ProjectConstraints{},
-		ig:  map[string]bool{},
+		c:   make(ProjectConstraints, len(m.c)),
+		tc:  make(ProjectConstraints, len(m.tc)),
+		ovr: make(ProjectConstraints, len(m.ovr)),
+		ig:  make(map[string]bool, len(m.ig)),
 	}
 
-	copy(m2.c, m.c)
-	copy(m2.tc, m.tc)
-
+	for k, v := range m.c {
+		m2.c[k] = v
+	}
+	for k, v := range m.tc {
+		m2.tc[k] = v
+	}
 	for k, v := range m.ovr {
 		m2.ovr[k] = v
 	}
@@ -125,8 +125,8 @@
 	ddeps := m.TestDependencyConstraints()
 
 	rm := SimpleManifest{
-		Deps:     make([]ProjectConstraint, len(deps)),
-		TestDeps: make([]ProjectConstraint, len(ddeps)),
+		Deps:     make(ProjectConstraints, len(deps)),
+		TestDeps: make(ProjectConstraints, len(ddeps)),
 	}
 
 	for k, d := range deps {
diff --git a/vendor/github.com/sdboyer/gps/maybe_source.go b/vendor/github.com/sdboyer/gps/maybe_source.go
index 34fd5d5..08629e1 100644
--- a/vendor/github.com/sdboyer/gps/maybe_source.go
+++ b/vendor/github.com/sdboyer/gps/maybe_source.go
@@ -9,6 +9,13 @@
 	"github.com/Masterminds/vcs"
 )
 
+// A maybeSource represents a set of information that, given some
+// typically-expensive network effort, could be transformed into a proper source.
+//
+// Wrapping these up as their own type kills two birds with one stone:
+//
+// * Allows control over when deduction logic triggers network activity
+// * Makes it easy to attempt multiple URLs for a given import path
 type maybeSource interface {
 	try(cachedir string, an ProjectAnalyzer) (source, string, error)
 }
@@ -84,6 +91,53 @@
 	return src, ustr, nil
 }
 
+type maybeGopkginSource struct {
+	// the original gopkg.in import path. this is used to create the on-disk
+	// location to avoid duplicate resource management - e.g., if instances of
+	// a gopkg.in project are accessed via different schemes, or if the
+	// underlying github repository is accessed directly.
+	opath string
+	// the actual upstream URL - always github
+	url *url.URL
+	// the major version to apply for filtering
+	major int64
+}
+
+func (m maybeGopkginSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) {
+	// We don't actually need a fully consistent transform into the on-disk path
+	// - just something that's unique to the particular gopkg.in domain context.
+	// So, it's OK to just dumb-join the scheme with the path.
+	path := filepath.Join(cachedir, "sources", sanitizer.Replace(m.url.Scheme+"/"+m.opath))
+	ustr := m.url.String()
+	r, err := vcs.NewGitRepo(ustr, path)
+	if err != nil {
+		return nil, "", err
+	}
+
+	src := &gopkginSource{
+		gitSource: gitSource{
+			baseVCSSource: baseVCSSource{
+				an: an,
+				dc: newMetaCache(),
+				crepo: &repo{
+					r:     r,
+					rpath: path,
+				},
+			},
+		},
+		major: m.major,
+	}
+
+	src.baseVCSSource.lvfunc = src.listVersions
+
+	_, err = src.listVersions()
+	if err != nil {
+		return nil, "", err
+	}
+
+	return src, ustr, nil
+}
+
 type maybeBzrSource struct {
 	url *url.URL
 }
diff --git a/vendor/github.com/sdboyer/gps/metrics.go b/vendor/github.com/sdboyer/gps/metrics.go
new file mode 100644
index 0000000..bd5629e
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/metrics.go
@@ -0,0 +1,81 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"sort"
+	"text/tabwriter"
+	"time"
+)
+
+type metrics struct {
+	stack []string
+	times map[string]time.Duration
+	last  time.Time
+}
+
+func newMetrics() *metrics {
+	return &metrics{
+		stack: []string{"other"},
+		times: map[string]time.Duration{
+			"other": 0,
+		},
+		last: time.Now(),
+	}
+}
+
+func (m *metrics) push(name string) {
+	cn := m.stack[len(m.stack)-1]
+	m.times[cn] = m.times[cn] + time.Since(m.last)
+
+	m.stack = append(m.stack, name)
+	m.last = time.Now()
+}
+
+func (m *metrics) pop() {
+	on := m.stack[len(m.stack)-1]
+	m.times[on] = m.times[on] + time.Since(m.last)
+
+	m.stack = m.stack[:len(m.stack)-1]
+	m.last = time.Now()
+}
+
+func (m *metrics) dump(l *log.Logger) {
+	s := make(ndpairs, len(m.times))
+	k := 0
+	for n, d := range m.times {
+		s[k] = ndpair{
+			n: n,
+			d: d,
+		}
+		k++
+	}
+
+	sort.Sort(sort.Reverse(s))
+
+	var tot time.Duration
+	var buf bytes.Buffer
+	w := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', tabwriter.AlignRight)
+	for _, nd := range s {
+		tot += nd.d
+		fmt.Fprintf(w, "\t%s:\t%v\t\n", nd.n, nd.d)
+	}
+	fmt.Fprintf(w, "\n\tTOTAL:\t%v\t\n", tot)
+
+	l.Println("\nSolver wall times by segment:")
+	w.Flush()
+	fmt.Println((&buf).String())
+
+}
+
+type ndpair struct {
+	n string
+	d time.Duration
+}
+
+type ndpairs []ndpair
+
+func (s ndpairs) Less(i, j int) bool { return s[i].d < s[j].d }
+func (s ndpairs) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s ndpairs) Len() int           { return len(s) }
diff --git a/vendor/github.com/sdboyer/gps/satisfy.go b/vendor/github.com/sdboyer/gps/satisfy.go
index 78cffa0..d3a76b1 100644
--- a/vendor/github.com/sdboyer/gps/satisfy.go
+++ b/vendor/github.com/sdboyer/gps/satisfy.go
@@ -7,6 +7,7 @@
 // The goal is to determine whether selecting the atom would result in a state
 // where all the solver requirements are still satisfied.
 func (s *solver) check(a atomWithPackages, pkgonly bool) error {
+	s.mtr.push("satisfy")
 	pa := a.a
 	if nilpa == pa {
 		// This shouldn't be able to happen, but if it does, it unequivocally
@@ -19,12 +20,14 @@
 	if !pkgonly {
 		if err := s.checkAtomAllowable(pa); err != nil {
 			s.traceInfo(err)
+			s.mtr.pop()
 			return err
 		}
 	}
 
 	if err := s.checkRequiredPackagesExist(a); err != nil {
 		s.traceInfo(err)
+		s.mtr.pop()
 		return err
 	}
 
@@ -32,6 +35,7 @@
 	if err != nil {
 		// An err here would be from the package fetcher; pass it straight back
 		// TODO(sdboyer) can we traceInfo this?
+		s.mtr.pop()
 		return err
 	}
 
@@ -42,14 +46,17 @@
 	for _, dep := range deps {
 		if err := s.checkIdentMatches(a, dep); err != nil {
 			s.traceInfo(err)
+			s.mtr.pop()
 			return err
 		}
 		if err := s.checkDepsConstraintsAllowable(a, dep); err != nil {
 			s.traceInfo(err)
+			s.mtr.pop()
 			return err
 		}
 		if err := s.checkDepsDisallowsSelected(a, dep); err != nil {
 			s.traceInfo(err)
+			s.mtr.pop()
 			return err
 		}
 		// TODO(sdboyer) decide how to refactor in order to re-enable this. Checking for
@@ -60,12 +67,14 @@
 		//}
 		if err := s.checkPackageImportsFromDepExist(a, dep); err != nil {
 			s.traceInfo(err)
+			s.mtr.pop()
 			return err
 		}
 
 		// TODO(sdboyer) add check that fails if adding this atom would create a loop
 	}
 
+	s.mtr.pop()
 	return nil
 }
 
diff --git a/vendor/github.com/sdboyer/gps/solve_basic_test.go b/vendor/github.com/sdboyer/gps/solve_basic_test.go
index 9fe9780..022820a 100644
--- a/vendor/github.com/sdboyer/gps/solve_basic_test.go
+++ b/vendor/github.com/sdboyer/gps/solve_basic_test.go
@@ -294,17 +294,40 @@
 	return l
 }
 
-// mksolution makes a result set
-func mksolution(pairs ...string) map[ProjectIdentifier]Version {
-	m := make(map[ProjectIdentifier]Version)
-	for _, pair := range pairs {
-		a := mkAtom(pair)
-		m[a.id] = a.v
+// mksolution makes creates a map of project identifiers to their LockedProject
+// result, which is sufficient to act as a solution fixture for the purposes of
+// most tests.
+//
+// Either strings or LockedProjects can be provided. If a string is provided, it
+// is assumed that we're in the default, "basic" case where there is exactly one
+// package in a project, and it is the root of the project - meaning that only
+// the "." package should be listed. If a LockedProject is provided (e.g. as
+// returned from mklp()), then it's incorporated directly.
+//
+// If any other type is provided, the func will panic.
+func mksolution(inputs ...interface{}) map[ProjectIdentifier]LockedProject {
+	m := make(map[ProjectIdentifier]LockedProject)
+	for _, in := range inputs {
+		switch t := in.(type) {
+		case string:
+			a := mkAtom(t)
+			m[a.id] = NewLockedProject(a.id, a.v, []string{"."})
+		case LockedProject:
+			m[t.pi] = t
+		default:
+			panic(fmt.Sprintf("unexpected input to mksolution: %T %s", in, in))
+		}
 	}
 
 	return m
 }
 
+// mklp creates a LockedProject from string inputs
+func mklp(pair string, pkgs ...string) LockedProject {
+	a := mkAtom(pair)
+	return NewLockedProject(a.id, a.v, pkgs)
+}
+
 // computeBasicReachMap takes a depspec and computes a reach map which is
 // identical to the explicit depgraph.
 //
@@ -351,7 +374,7 @@
 	rootTree() PackageTree
 	specs() []depspec
 	maxTries() int
-	solution() map[ProjectIdentifier]Version
+	solution() map[ProjectIdentifier]LockedProject
 	failure() error
 }
 
@@ -374,8 +397,8 @@
 	n string
 	// depspecs. always treat first as root
 	ds []depspec
-	// results; map of name/version pairs
-	r map[ProjectIdentifier]Version
+	// results; map of name/atom pairs
+	r map[ProjectIdentifier]LockedProject
 	// max attempts the solver should need to find solution. 0 means no limit
 	maxAttempts int
 	// Use downgrade instead of default upgrade sorter
@@ -388,6 +411,8 @@
 	ovr ProjectConstraints
 	// request up/downgrade to all projects
 	changeall bool
+	// individual projects to change
+	changelist []ProjectRoot
 }
 
 func (f basicFixture) name() string {
@@ -402,14 +427,14 @@
 	return f.maxAttempts
 }
 
-func (f basicFixture) solution() map[ProjectIdentifier]Version {
+func (f basicFixture) solution() map[ProjectIdentifier]LockedProject {
 	return f.r
 }
 
 func (f basicFixture) rootmanifest() RootManifest {
 	return simpleRootManifest{
-		c:   f.ds[0].deps,
-		tc:  f.ds[0].devdeps,
+		c:   pcSliceToMap(f.ds[0].deps),
+		tc:  pcSliceToMap(f.ds[0].devdeps),
 		ovr: f.ovr,
 	}
 }
@@ -599,6 +624,111 @@
 		changeall: true,
 		downgrade: true,
 	},
+	"update one with only one": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0"),
+			mkDepspec("foo 1.0.1"),
+			mkDepspec("foo 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+		),
+		changelist: []ProjectRoot{"foo"},
+	},
+	"update one of multi": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "bar *"),
+			mkDepspec("foo 1.0.0"),
+			mkDepspec("foo 1.0.1"),
+			mkDepspec("foo 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+			"bar 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+			"bar 1.0.1",
+		),
+		changelist: []ProjectRoot{"foo"},
+	},
+	"update both of multi": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "bar *"),
+			mkDepspec("foo 1.0.0"),
+			mkDepspec("foo 1.0.1"),
+			mkDepspec("foo 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+			"bar 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+			"bar 1.0.2",
+		),
+		changelist: []ProjectRoot{"foo", "bar"},
+	},
+	"update two of more": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "bar *", "baz *"),
+			mkDepspec("foo 1.0.0"),
+			mkDepspec("foo 1.0.1"),
+			mkDepspec("foo 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+			mkDepspec("baz 1.0.0"),
+			mkDepspec("baz 1.0.1"),
+			mkDepspec("baz 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+			"bar 1.0.1",
+			"baz 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+			"bar 1.0.2",
+			"baz 1.0.1",
+		),
+		changelist: []ProjectRoot{"foo", "bar"},
+	},
+	"break other lock with targeted update": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "baz *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+			mkDepspec("baz 1.0.0"),
+			mkDepspec("baz 1.0.1"),
+			mkDepspec("baz 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+			"bar 1.0.1",
+			"baz 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+			"bar 1.0.2",
+			"baz 1.0.1",
+		),
+		changelist: []ProjectRoot{"foo", "bar"},
+	},
 	"with incompatible locked dependency": {
 		ds: []depspec{
 			mkDepspec("root 0.0.0", "foo >1.0.1"),
@@ -664,6 +794,24 @@
 		),
 		maxAttempts: 4,
 	},
+	"break lock when only the deps necessitate it": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "bar *"),
+			mkDepspec("foo 1.0.0 foorev", "bar <2.0.0"),
+			mkDepspec("foo 2.0.0", "bar <3.0.0"),
+			mkDepspec("bar 2.0.0", "baz <3.0.0"),
+			mkDepspec("baz 2.0.0", "foo >1.0.0"),
+		},
+		l: mklock(
+			"foo 1.0.0 foorev",
+		),
+		r: mksolution(
+			"foo 2.0.0",
+			"bar 2.0.0",
+			"baz 2.0.0",
+		),
+		maxAttempts: 4,
+	},
 	"locked atoms are matched on both local and net name": {
 		ds: []depspec{
 			mkDepspec("root 0.0.0", "foo *"),
@@ -1420,13 +1568,13 @@
 var _ Lock = fixLock{}
 
 // impl Spec interface
-func (ds depspec) DependencyConstraints() []ProjectConstraint {
-	return ds.deps
+func (ds depspec) DependencyConstraints() ProjectConstraints {
+	return pcSliceToMap(ds.deps)
 }
 
 // impl Spec interface
-func (ds depspec) TestDependencyConstraints() []ProjectConstraint {
-	return ds.devdeps
+func (ds depspec) TestDependencyConstraints() ProjectConstraints {
+	return pcSliceToMap(ds.devdeps)
 }
 
 type fixLock []LockedProject
diff --git a/vendor/github.com/sdboyer/gps/solve_bimodal_test.go b/vendor/github.com/sdboyer/gps/solve_bimodal_test.go
index f430ad9..cbd5957 100644
--- a/vendor/github.com/sdboyer/gps/solve_bimodal_test.go
+++ b/vendor/github.com/sdboyer/gps/solve_bimodal_test.go
@@ -104,7 +104,7 @@
 				pkg("a/foo")),
 		},
 		r: mksolution(
-			"a 1.0.0",
+			mklp("a 1.0.0", "foo"),
 		),
 	},
 	// Import jump is in a dep, and points to a transitive dep
@@ -285,7 +285,7 @@
 			),
 		},
 		r: mksolution(
-			"a 1.0.0",
+			mklp("a 1.0.0", ".", "second"),
 			"b 2.0.0",
 			"c 1.2.0",
 			"d 1.0.0",
@@ -742,7 +742,7 @@
 	// bimodal project. first is always treated as root project
 	ds []depspec
 	// results; map of name/version pairs
-	r map[ProjectIdentifier]Version
+	r map[ProjectIdentifier]LockedProject
 	// max attempts the solver should need to find solution. 0 means no limit
 	maxAttempts int
 	// Use downgrade instead of default upgrade sorter
@@ -774,14 +774,14 @@
 	return f.maxAttempts
 }
 
-func (f bimodalFixture) solution() map[ProjectIdentifier]Version {
+func (f bimodalFixture) solution() map[ProjectIdentifier]LockedProject {
 	return f.r
 }
 
 func (f bimodalFixture) rootmanifest() RootManifest {
 	m := simpleRootManifest{
-		c:   f.ds[0].deps,
-		tc:  f.ds[0].devdeps,
+		c:   pcSliceToMap(f.ds[0].deps),
+		tc:  pcSliceToMap(f.ds[0].devdeps),
 		ovr: f.ovr,
 		ig:  make(map[string]bool),
 	}
diff --git a/vendor/github.com/sdboyer/gps/solve_test.go b/vendor/github.com/sdboyer/gps/solve_test.go
index 425dd50..f6a0b7a 100644
--- a/vendor/github.com/sdboyer/gps/solve_test.go
+++ b/vendor/github.com/sdboyer/gps/solve_test.go
@@ -20,6 +20,7 @@
 // TODO(sdboyer) regression test ensuring that locks with only revs for projects don't cause errors
 func init() {
 	flag.StringVar(&fixtorun, "gps.fix", "", "A single fixture to run in TestBasicSolves or TestBimodalSolves")
+	mkBridge(nil, nil)
 	overrideMkBridge()
 }
 
@@ -93,6 +94,7 @@
 		Lock:            dummyLock{},
 		Downgrade:       fix.downgrade,
 		ChangeAll:       fix.changeall,
+		ToChange:        fix.changelist,
 	}
 
 	if fix.l != nil {
@@ -193,10 +195,9 @@
 		}
 
 		// Dump result projects into a map for easier interrogation
-		rp := make(map[ProjectIdentifier]Version)
-		for _, p := range r.p {
-			pa := p.toAtom()
-			rp[pa.id] = pa.v
+		rp := make(map[ProjectIdentifier]LockedProject)
+		for _, lp := range r.p {
+			rp[lp.pi] = lp
 		}
 
 		fixlen, rlen := len(fix.solution()), len(rp)
@@ -207,24 +208,26 @@
 
 		// Whether or not len is same, still have to verify that results agree
 		// Walk through fixture/expected results first
-		for p, v := range fix.solution() {
-			if av, exists := rp[p]; !exists {
-				t.Errorf("(fixture: %q) Project %q expected but missing from results", fix.name(), ppi(p))
+		for id, flp := range fix.solution() {
+			if lp, exists := rp[id]; !exists {
+				t.Errorf("(fixture: %q) Project %q expected but missing from results", fix.name(), ppi(id))
 			} else {
 				// delete result from map so we skip it on the reverse pass
-				delete(rp, p)
-				if v != av {
-					t.Errorf("(fixture: %q) Expected version %q of project %q, but actual version was %q", fix.name(), pv(v), ppi(p), pv(av))
+				delete(rp, id)
+				if flp.Version() != lp.Version() {
+					t.Errorf("(fixture: %q) Expected version %q of project %q, but actual version was %q", fix.name(), pv(flp.Version()), ppi(id), pv(lp.Version()))
+				}
+
+				if !reflect.DeepEqual(lp.pkgs, flp.pkgs) {
+					t.Errorf("(fixture: %q) Package list was not not as expected for project %s@%s:\n\t(GOT) %s\n\t(WNT) %s", fix.name(), ppi(id), pv(lp.Version()), lp.pkgs, flp.pkgs)
 				}
 			}
 		}
 
 		// Now walk through remaining actual results
-		for p, v := range rp {
-			if fv, exists := fix.solution()[p]; !exists {
-				t.Errorf("(fixture: %q) Unexpected project %q present in results", fix.name(), ppi(p))
-			} else if v != fv {
-				t.Errorf("(fixture: %q) Got version %q of project %q, but expected version was %q", fix.name(), pv(v), ppi(p), pv(fv))
+		for id, lp := range rp {
+			if _, exists := fix.solution()[id]; !exists {
+				t.Errorf("(fixture: %q) Unexpected project %s@%s present in results, with pkgs:\n\t%s", fix.name(), ppi(id), pv(lp.Version()), lp.pkgs)
 			}
 		}
 	}
@@ -353,6 +356,27 @@
 	}
 	params.Manifest = nil
 
+	params.ToChange = []ProjectRoot{"foo"}
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Should have errored on non-empty ToChange without a lock provided")
+	} else if !strings.Contains(err.Error(), "update specifically requested for") {
+		t.Error("Prepare should have given error on ToChange without Lock, but gave:", err)
+	}
+
+	params.Lock = safeLock{
+		p: []LockedProject{
+			NewLockedProject(mkPI("bar"), Revision("makebelieve"), nil),
+		},
+	}
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Should have errored on ToChange containing project not in lock")
+	} else if !strings.Contains(err.Error(), "cannot update foo as it is not in the lock") {
+		t.Error("Prepare should have given error on ToChange with item not present in Lock, but gave:", err)
+	}
+
+	params.Lock, params.ToChange = nil, nil
 	_, err = Prepare(params, sm)
 	if err != nil {
 		t.Error("Basic conditions satisfied, prepare should have completed successfully, err as:", err)
diff --git a/vendor/github.com/sdboyer/gps/solver.go b/vendor/github.com/sdboyer/gps/solver.go
index 5556589..923ede2 100644
--- a/vendor/github.com/sdboyer/gps/solver.go
+++ b/vendor/github.com/sdboyer/gps/solver.go
@@ -4,7 +4,6 @@
 	"container/heap"
 	"fmt"
 	"log"
-	"os"
 	"sort"
 	"strings"
 
@@ -159,6 +158,9 @@
 
 	// A defensively-copied instance of params.RootPackageTree
 	rpt PackageTree
+
+	// metrics for the current solve run.
+	mtr *metrics
 }
 
 // A Solver is the main workhorse of gps: given a set of project inputs, it
@@ -203,6 +205,9 @@
 	if params.Trace && params.TraceLogger == nil {
 		return nil, badOptsFailure("trace requested, but no logger provided")
 	}
+	if params.Lock == nil && len(params.ToChange) != 0 {
+		return nil, badOptsFailure(fmt.Sprintf("update specifically requested for %s, but no lock was provided to upgrade from", params.ToChange))
+	}
 
 	if params.Manifest == nil {
 		params.Manifest = simpleRootManifest{}
@@ -256,10 +261,6 @@
 	s.chng = make(map[ProjectRoot]struct{})
 	s.rlm = make(map[ProjectRoot]LockedProject)
 
-	for _, v := range s.params.ToChange {
-		s.chng[v] = struct{}{}
-	}
-
 	// Initialize stacks and queues
 	s.sel = &selection{
 		deps: make(map[ProjectRoot][]dependency),
@@ -282,6 +283,13 @@
 		s.rl = prepLock(s.params.Lock)
 	}
 
+	for _, p := range s.params.ToChange {
+		if _, exists := s.rlm[p]; !exists {
+			return nil, badOptsFailure(fmt.Sprintf("cannot update %s as it is not in the lock", p))
+		}
+		s.chng[p] = struct{}{}
+	}
+
 	return s, nil
 }
 
@@ -290,6 +298,9 @@
 //
 // This is the entry point to the main gps workhorse.
 func (s *solver) Solve() (Solution, error) {
+	// Set up a metrics object
+	s.mtr = newMetrics()
+
 	// Prime the queues with the root project
 	err := s.selectRoot()
 	if err != nil {
@@ -298,6 +309,7 @@
 
 	all, err := s.solve()
 
+	s.mtr.pop()
 	var soln solution
 	if err == nil {
 		soln = solution{
@@ -316,6 +328,9 @@
 	}
 
 	s.traceFinish(soln, err)
+	if s.params.Trace {
+		s.mtr.dump(s.params.TraceLogger)
+	}
 	return soln, err
 }
 
@@ -338,13 +353,14 @@
 		// guarantee the bmi will contain at least one package from this project
 		// that has yet to be selected.)
 		if awp, is := s.sel.selected(bmi.id); !is {
+			s.mtr.push("new-atom")
 			// Analysis path for when we haven't selected the project yet - need
 			// to create a version queue.
 			queue, err := s.createVersionQueue(bmi)
 			if err != nil {
 				// Err means a failure somewhere down the line; try backtracking.
 				s.traceStartBacktrack(bmi, err, false)
-				//s.traceBacktrack(bmi, false)
+				s.mtr.pop()
 				if s.backtrack() {
 					// backtracking succeeded, move to the next unselected id
 					continue
@@ -365,7 +381,9 @@
 			}
 			s.selectAtom(awp, false)
 			s.vqs = append(s.vqs, queue)
+			s.mtr.pop()
 		} else {
+			s.mtr.push("add-atom")
 			// We're just trying to add packages to an already-selected project.
 			// That means it's not OK to burn through the version queue for that
 			// project as we do when first selecting a project, as doing so
@@ -394,12 +412,14 @@
 					// backtracking succeeded, move to the next unselected id
 					continue
 				}
+				s.mtr.pop()
 				return nil, err
 			}
 			s.selectAtom(nawp, true)
 			// We don't add anything to the stack of version queues because the
 			// backtracker knows not to pop the vqstack if it backtracks
 			// across a pure-package addition.
+			s.mtr.pop()
 		}
 	}
 
@@ -426,6 +446,7 @@
 // selectRoot is a specialized selectAtom, used solely to initially
 // populate the queues at the beginning of a solve run.
 func (s *solver) selectRoot() error {
+	s.mtr.push("select-root")
 	pa := atom{
 		id: ProjectIdentifier{
 			ProjectRoot: ProjectRoot(s.rpt.ImportRoot),
@@ -459,8 +480,7 @@
 
 	// If we're looking for root's deps, get it from opts and local root
 	// analysis, rather than having the sm do it
-	c, tc := s.rm.DependencyConstraints(), s.rm.TestDependencyConstraints()
-	mdeps := s.ovr.overrideAll(pcSliceToMap(c, tc).asSortedSlice())
+	mdeps := s.ovr.overrideAll(s.rm.DependencyConstraints().merge(s.rm.TestDependencyConstraints()))
 
 	// Err is not possible at this point, as it could only come from
 	// listPackages(), which if we're here already succeeded for root
@@ -474,7 +494,7 @@
 
 	for _, dep := range deps {
 		// If we have no lock, or if this dep isn't in the lock, then prefetch
-		// it. See longer explanation in selectRoot() for how we benefit from
+		// it. See longer explanation in selectAtom() for how we benefit from
 		// parallelism here.
 		if _, has := s.rlm[dep.Ident.ProjectRoot]; !has {
 			go s.b.SyncSourceFor(dep.Ident)
@@ -486,6 +506,7 @@
 	}
 
 	s.traceSelectRoot(s.rpt, deps)
+	s.mtr.pop()
 	return nil
 }
 
@@ -597,12 +618,7 @@
 		}
 
 		// Make a new completeDep with an open constraint, respecting overrides
-		pd := s.ovr.override(ProjectConstraint{
-			Ident: ProjectIdentifier{
-				ProjectRoot: root,
-			},
-			Constraint: Any(),
-		})
+		pd := s.ovr.override(root, ProjectProperties{Constraint: Any()})
 
 		// Insert the pd into the trie so that further deps from this
 		// project get caught by the prefix search
@@ -876,6 +892,7 @@
 		return false
 	}
 
+	s.mtr.push("backtrack")
 	for {
 		for {
 			if len(s.vqs) == 0 {
@@ -935,6 +952,7 @@
 		s.vqs, s.vqs[len(s.vqs)-1] = s.vqs[:len(s.vqs)-1], nil
 	}
 
+	s.mtr.pop()
 	// Backtracking was successful if loop ended before running out of versions
 	if len(s.vqs) == 0 {
 		return false
@@ -1044,6 +1062,7 @@
 //
 // Behavior is slightly diffferent if pkgonly is true.
 func (s *solver) selectAtom(a atomWithPackages, pkgonly bool) {
+	s.mtr.push("select-atom")
 	s.unsel.remove(bimodalIdentifier{
 		id: a.a.id,
 		pl: a.pl,
@@ -1117,9 +1136,11 @@
 	}
 
 	s.traceSelect(a, pkgonly)
+	s.mtr.pop()
 }
 
 func (s *solver) unselectLast() (atomWithPackages, bool) {
+	s.mtr.push("unselect")
 	awp, first := s.sel.popSelection()
 	heap.Push(s.unsel, bimodalIdentifier{id: awp.a.id, pl: awp.pl})
 
@@ -1139,6 +1160,7 @@
 		}
 	}
 
+	s.mtr.pop()
 	return awp, first
 }
 
@@ -1160,8 +1182,18 @@
 		panic("unreachable")
 	}
 
+	lp.pkgs = make([]string, len(pkgs))
+	k := 0
+
+	pr := string(pa.id.ProjectRoot)
+	trim := pr + "/"
 	for pkg := range pkgs {
-		lp.pkgs = append(lp.pkgs, strings.TrimPrefix(pkg, string(pa.id.ProjectRoot)+string(os.PathSeparator)))
+		if pkg == string(pa.id.ProjectRoot) {
+			lp.pkgs[k] = "."
+		} else {
+			lp.pkgs[k] = strings.TrimPrefix(pkg, trim)
+		}
+		k++
 	}
 	sort.Strings(lp.pkgs)
 
diff --git a/vendor/github.com/sdboyer/gps/source.go b/vendor/github.com/sdboyer/gps/source.go
index 81cb3be..01bb8c0 100644
--- a/vendor/github.com/sdboyer/gps/source.go
+++ b/vendor/github.com/sdboyer/gps/source.go
@@ -96,15 +96,13 @@
 		return pi.Manifest, pi.Lock, nil
 	}
 
-	bs.crepo.mut.Lock()
-	if !bs.crepo.synced {
-		err = bs.crepo.r.Update()
-		if err != nil {
-			return nil, nil, fmt.Errorf("failed fetching latest updates with err: %s", err.Error())
-		}
-		bs.crepo.synced = true
+	// Cache didn't help; ensure our local is fully up to date.
+	err = bs.syncLocal()
+	if err != nil {
+		return nil, nil, err
 	}
 
+	bs.crepo.mut.Lock()
 	// Always prefer a rev, if it's available
 	if pv, ok := v.(PairedVersion); ok {
 		err = bs.crepo.r.UpdateVersion(pv.Underlying().String())
@@ -115,7 +113,7 @@
 
 	if err != nil {
 		// TODO(sdboyer) More-er proper-er error
-		panic(fmt.Sprintf("canary - why is checkout/whatever failing: %s %s %s", bs.crepo.r.LocalPath(), v.String(), err))
+		panic(fmt.Sprintf("canary - why is checkout/whatever failing: %s %s %s", bs.crepo.r.LocalPath(), v.String(), unwrapVcsErr(err)))
 	}
 
 	bs.crepo.mut.RLock()
@@ -139,7 +137,7 @@
 		return pi.Manifest, pi.Lock, nil
 	}
 
-	return nil, nil, err
+	return nil, nil, unwrapVcsErr(err)
 }
 
 // toRevision turns a Version into a Revision, if doing so is possible based on
@@ -212,15 +210,27 @@
 	if !bs.checkExistence(existsInCache) {
 		if bs.checkExistence(existsUpstream) {
 			bs.crepo.mut.Lock()
+			if bs.crepo.synced {
+				// A second ensure call coming in while the first is completing
+				// isn't terribly unlikely, especially for a large repo. In that
+				// event, the synced flag will have flipped on by the time we
+				// acquire the lock. If it has, there's no need to do this work
+				// twice.
+				bs.crepo.mut.Unlock()
+				return nil
+			}
+
 			err := bs.crepo.r.Get()
-			bs.crepo.mut.Unlock()
 
 			if err != nil {
-				return fmt.Errorf("failed to create repository cache for %s with err:\n%s", bs.crepo.r.Remote(), err)
+				bs.crepo.mut.Unlock()
+				return fmt.Errorf("failed to create repository cache for %s with err:\n%s", bs.crepo.r.Remote(), unwrapVcsErr(err))
 			}
+
 			bs.crepo.synced = true
 			bs.ex.s |= existsInCache
 			bs.ex.f |= existsInCache
+			bs.crepo.mut.Unlock()
 		} else {
 			return fmt.Errorf("project %s does not exist upstream", bs.crepo.r.Remote())
 		}
@@ -290,11 +300,15 @@
 	// This case is really just for git repos, where the lvfunc doesn't
 	// guarantee that the local repo is synced
 	if !bs.crepo.synced {
-		bs.syncerr = bs.crepo.r.Update()
-		if bs.syncerr != nil {
+		bs.crepo.mut.Lock()
+		err := bs.crepo.r.Update()
+		if err != nil {
+			bs.syncerr = fmt.Errorf("failed fetching latest updates with err: %s", unwrapVcsErr(err))
+			bs.crepo.mut.Unlock()
 			return bs.syncerr
 		}
 		bs.crepo.synced = true
+		bs.crepo.mut.Unlock()
 	}
 
 	return nil
@@ -328,20 +342,24 @@
 		if !bs.crepo.synced {
 			err = bs.crepo.r.Update()
 			if err != nil {
-				return PackageTree{}, fmt.Errorf("could not fetch latest updates into repository: %s", err)
+				err = fmt.Errorf("could not fetch latest updates into repository: %s", unwrapVcsErr(err))
+				return
 			}
 			bs.crepo.synced = true
 		}
 		err = bs.crepo.r.UpdateVersion(v.String())
 	}
 
-	ptree, err = ListPackages(bs.crepo.r.LocalPath(), string(pr))
-	bs.crepo.mut.Unlock()
-
-	// TODO(sdboyer) cache errs?
-	if err != nil {
-		bs.dc.ptrees[r] = ptree
+	if err == nil {
+		ptree, err = ListPackages(bs.crepo.r.LocalPath(), string(pr))
+		// TODO(sdboyer) cache errs?
+		if err == nil {
+			bs.dc.ptrees[r] = ptree
+		}
+	} else {
+		err = unwrapVcsErr(err)
 	}
+	bs.crepo.mut.Unlock()
 
 	return
 }
diff --git a/vendor/github.com/sdboyer/gps/source_errors.go b/vendor/github.com/sdboyer/gps/source_errors.go
new file mode 100644
index 0000000..522616b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/source_errors.go
@@ -0,0 +1,21 @@
+package gps
+
+import (
+	"fmt"
+
+	"github.com/Masterminds/vcs"
+)
+
+// unwrapVcsErr will extract actual command output from a vcs err, if possible
+//
+// TODO this is really dumb, lossy, and needs proper handling
+func unwrapVcsErr(err error) error {
+	switch verr := err.(type) {
+	case *vcs.LocalError:
+		return fmt.Errorf("%s: %s", verr.Error(), verr.Out())
+	case *vcs.RemoteError:
+		return fmt.Errorf("%s: %s", verr.Error(), verr.Out())
+	default:
+		return err
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/source_manager.go b/vendor/github.com/sdboyer/gps/source_manager.go
index f59ae62..7b4bcb5 100644
--- a/vendor/github.com/sdboyer/gps/source_manager.go
+++ b/vendor/github.com/sdboyer/gps/source_manager.go
@@ -6,7 +6,6 @@
 	"path/filepath"
 	"strings"
 	"sync"
-	"sync/atomic"
 
 	"github.com/Masterminds/semver"
 )
@@ -86,11 +85,19 @@
 	lf       *os.File
 	srcs     map[string]source
 	srcmut   sync.RWMutex
+	srcfuts  map[string]*unifiedFuture
+	srcfmut  sync.RWMutex
 	an       ProjectAnalyzer
 	dxt      deducerTrie
 	rootxt   prTrie
 }
 
+type unifiedFuture struct {
+	rc, sc chan struct{}
+	rootf  stringFuture
+	srcf   sourceFuture
+}
+
 var _ SourceManager = &SourceMgr{}
 
 // NewSourceManager produces an instance of gps's built-in SourceManager. It
@@ -138,6 +145,7 @@
 		cachedir: cachedir,
 		lf:       fi,
 		srcs:     make(map[string]source),
+		srcfuts:  make(map[string]*unifiedFuture),
 		an:       an,
 		dxt:      pathDeducerTrie(),
 		rootxt:   newProjectRootTrie(),
@@ -284,16 +292,17 @@
 		return root, nil
 	}
 
-	rootf, _, err := sm.deducePathAndProcess(ip)
+	ft, err := sm.deducePathAndProcess(ip)
 	if err != nil {
 		return "", err
 	}
 
-	r, err := rootf()
+	r, err := ft.rootf()
 	return ProjectRoot(r), err
 }
 
 func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
+	//pretty.Println(id.ProjectRoot)
 	nn := id.netName()
 
 	sm.srcmut.RLock()
@@ -303,130 +312,164 @@
 		return src, nil
 	}
 
-	_, srcf, err := sm.deducePathAndProcess(nn)
+	ft, err := sm.deducePathAndProcess(nn)
 	if err != nil {
 		return nil, err
 	}
 
 	// we don't care about the ident here, and the future produced by
 	// deducePathAndProcess will dedupe with what's in the sm.srcs map
-	src, _, err = srcf()
+	src, _, err = ft.srcf()
 	return src, err
 }
 
-func (sm *SourceMgr) deducePathAndProcess(path string) (stringFuture, sourceFuture, error) {
+func (sm *SourceMgr) deducePathAndProcess(path string) (*unifiedFuture, error) {
+	// Check for an already-existing future in the map first
+	sm.srcfmut.RLock()
+	ft, exists := sm.srcfuts[path]
+	sm.srcfmut.RUnlock()
+
+	if exists {
+		return ft, nil
+	}
+
+	// Don't have one - set one up.
 	df, err := sm.deduceFromPath(path)
 	if err != nil {
-		return nil, nil, err
+		return nil, err
 	}
 
-	var rstart, sstart int32
-	rc, sc := make(chan struct{}, 1), make(chan struct{}, 1)
+	sm.srcfmut.Lock()
+	defer sm.srcfmut.Unlock()
+	// A bad interleaving could allow two goroutines to make it here for the
+	// same path, so we have to re-check existence.
+	if ft, exists = sm.srcfuts[path]; exists {
+		return ft, nil
+	}
 
-	// Rewrap in a deferred future, so the caller can decide when to trigger it
-	rootf := func() (pr string, err error) {
-		// CAS because a bad interleaving here would panic on double-closing rc
-		if atomic.CompareAndSwapInt32(&rstart, 0, 1) {
-			go func() {
-				defer close(rc)
-				pr, err = df.root()
-				if err != nil {
-					// Don't cache errs. This doesn't really hurt the solver, and is
-					// beneficial for other use cases because it means we don't have to
-					// expose any kind of controls for clearing caches.
-					return
-				}
+	ft = &unifiedFuture{
+		rc: make(chan struct{}, 1),
+		sc: make(chan struct{}, 1),
+	}
 
-				tpr := ProjectRoot(pr)
-				sm.rootxt.Insert(pr, tpr)
-				// It's not harmful if the netname was a URL rather than an
-				// import path
-				if pr != path {
-					// Insert the result into the rootxt twice - once at the
-					// root itself, so as to catch siblings/relatives, and again
-					// at the exact provided import path (assuming they were
-					// different), so that on subsequent calls, exact matches
-					// can skip the regex above.
-					sm.rootxt.Insert(path, tpr)
-				}
-			}()
+	// Rewrap the rootfinding func in another future
+	var pr string
+	var rooterr error
+
+	// Kick off the func to get root and register it into the rootxt.
+	rootf := func() {
+		defer close(ft.rc)
+		pr, rooterr = df.root()
+		if rooterr != nil {
+			// Don't cache errs. This doesn't really hurt the solver, and is
+			// beneficial for other use cases because it means we don't have to
+			// expose any kind of controls for clearing caches.
+			return
 		}
 
-		<-rc
-		return pr, err
+		tpr := ProjectRoot(pr)
+		sm.rootxt.Insert(pr, tpr)
+		// It's not harmful if the netname was a URL rather than an
+		// import path
+		if pr != path {
+			// Insert the result into the rootxt twice - once at the
+			// root itself, so as to catch siblings/relatives, and again
+			// at the exact provided import path (assuming they were
+			// different), so that on subsequent calls, exact matches
+			// can skip the regex above.
+			sm.rootxt.Insert(path, tpr)
+		}
 	}
 
-	// Now, handle the source
+	// If deduction tells us this is slow, do it async in its own goroutine;
+	// otherwise, we can do it here and give the scheduler a bit of a break.
+	if df.rslow {
+		go rootf()
+	} else {
+		rootf()
+	}
+
+	// Store a closure bound to the future result on the futTracker.
+	ft.rootf = func() (string, error) {
+		<-ft.rc
+		return pr, rooterr
+	}
+
+	// Root future is handled, now build up the source future.
+	//
+	// First, complete the partialSourceFuture with information the sm has about
+	// our cachedir and analyzer
 	fut := df.psf(sm.cachedir, sm.an)
 
-	// Rewrap in a deferred future, so the caller can decide when to trigger it
-	srcf := func() (src source, ident string, err error) {
-		// CAS because a bad interleaving here would panic on double-closing sc
-		if atomic.CompareAndSwapInt32(&sstart, 0, 1) {
-			go func() {
-				defer close(sc)
-				src, ident, err = fut()
-				if err != nil {
-					// Don't cache errs. This doesn't really hurt the solver, and is
-					// beneficial for other use cases because it means we don't have
-					// to expose any kind of controls for clearing caches.
-					return
-				}
-
-				sm.srcmut.Lock()
-				defer sm.srcmut.Unlock()
-
-				// Check to make sure a source hasn't shown up in the meantime, or that
-				// there wasn't already one at the ident.
-				var hasi, hasp bool
-				var srci, srcp source
-				if ident != "" {
-					srci, hasi = sm.srcs[ident]
-				}
-				srcp, hasp = sm.srcs[path]
-
-				// if neither the ident nor the input path have an entry for this src,
-				// we're in the simple case - write them both in and we're done
-				if !hasi && !hasp {
-					sm.srcs[path] = src
-					if ident != path && ident != "" {
-						sm.srcs[ident] = src
-					}
-					return
-				}
-
-				// Now, the xors.
-				//
-				// If already present for ident but not for path, copy ident's src
-				// to path. This covers cases like a gopkg.in path referring back
-				// onto a github repository, where something else already explicitly
-				// looked up that same gh repo.
-				if hasi && !hasp {
-					sm.srcs[path] = srci
-					src = srci
-				}
-				// If already present for path but not for ident, do NOT copy path's
-				// src to ident, but use the returned one instead. Really, this case
-				// shouldn't occur at all...? But the crucial thing is that the
-				// path-based one has already discovered what actual ident of source
-				// they want to use, and changing that arbitrarily would have
-				// undefined effects.
-				if hasp && !hasi && ident != "" {
-					sm.srcs[ident] = src
-				}
-
-				// If both are present, then assume we're good, and use the path one
-				if hasp && hasi {
-					// TODO(sdboyer) compare these (somehow? reflect? pointer?) and if they're not the
-					// same object, panic
-					src = srcp
-				}
-			}()
+	// The maybeSource-trying process is always slow, so keep it async here.
+	var src source
+	var ident string
+	var srcerr error
+	go func() {
+		defer close(ft.sc)
+		src, ident, srcerr = fut()
+		if srcerr != nil {
+			// Don't cache errs. This doesn't really hurt the solver, and is
+			// beneficial for other use cases because it means we don't have
+			// to expose any kind of controls for clearing caches.
+			return
 		}
 
-		<-sc
-		return
+		sm.srcmut.Lock()
+		defer sm.srcmut.Unlock()
+
+		// Check to make sure a source hasn't shown up in the meantime, or that
+		// there wasn't already one at the ident.
+		var hasi, hasp bool
+		var srci, srcp source
+		if ident != "" {
+			srci, hasi = sm.srcs[ident]
+		}
+		srcp, hasp = sm.srcs[path]
+
+		// if neither the ident nor the input path have an entry for this src,
+		// we're in the simple case - write them both in and we're done
+		if !hasi && !hasp {
+			sm.srcs[path] = src
+			if ident != path && ident != "" {
+				sm.srcs[ident] = src
+			}
+			return
+		}
+
+		// Now, the xors.
+		//
+		// If already present for ident but not for path, copy ident's src
+		// to path. This covers cases like a gopkg.in path referring back
+		// onto a github repository, where something else already explicitly
+		// looked up that same gh repo.
+		if hasi && !hasp {
+			sm.srcs[path] = srci
+			src = srci
+		}
+		// If already present for path but not for ident, do NOT copy path's
+		// src to ident, but use the returned one instead. Really, this case
+		// shouldn't occur at all...? But the crucial thing is that the
+		// path-based one has already discovered what actual ident of source
+		// they want to use, and changing that arbitrarily would have
+		// undefined effects.
+		if hasp && !hasi && ident != "" {
+			sm.srcs[ident] = src
+		}
+
+		// If both are present, then assume we're good, and use the path one
+		if hasp && hasi {
+			// TODO(sdboyer) compare these (somehow? reflect? pointer?) and if they're not the
+			// same object, panic
+			src = srcp
+		}
+	}()
+
+	ft.srcf = func() (source, string, error) {
+		<-ft.sc
+		return src, ident, srcerr
 	}
 
-	return rootf, srcf, nil
+	sm.srcfuts[path] = ft
+	return ft, nil
 }
diff --git a/vendor/github.com/sdboyer/gps/source_test.go b/vendor/github.com/sdboyer/gps/source_test.go
index 787e573..284df82 100644
--- a/vendor/github.com/sdboyer/gps/source_test.go
+++ b/vendor/github.com/sdboyer/gps/source_test.go
@@ -4,6 +4,7 @@
 	"io/ioutil"
 	"net/url"
 	"reflect"
+	"sync"
 	"testing"
 )
 
@@ -24,7 +25,7 @@
 		}
 	}
 
-	n := "github.com/Masterminds/VCSTestRepo"
+	n := "github.com/sdboyer/gpkt"
 	un := "https://" + n
 	u, err := url.Parse(un)
 	if err != nil {
@@ -73,21 +74,25 @@
 	}
 
 	// check that an expected rev is present
-	is, err := src.revisionPresentIn(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
+	is, err := src.revisionPresentIn(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e"))
 	if err != nil {
 		t.Errorf("Unexpected error while checking revision presence: %s", err)
 	} else if !is {
 		t.Errorf("Revision that should exist was not present")
 	}
 
-	if len(vlist) != 3 {
-		t.Errorf("git test repo should've produced three versions, got %v: vlist was %s", len(vlist), vlist)
+	if len(vlist) != 7 {
+		t.Errorf("git test repo should've produced seven versions, got %v: vlist was %s", len(vlist), vlist)
 	} else {
 		SortForUpgrade(vlist)
 		evl := []Version{
-			NewVersion("1.0.0").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
-			newDefaultBranch("master").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
-			NewBranch("test").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
+			NewVersion("v2.0.0").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")),
+			NewVersion("v1.1.0").Is(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")),
+			NewVersion("v1.0.0").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")),
+			newDefaultBranch("master").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")),
+			NewBranch("v1").Is(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")),
+			NewBranch("v1.1").Is(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")),
+			NewBranch("v3").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")),
 		}
 		if !reflect.DeepEqual(vlist, evl) {
 			t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
@@ -103,6 +108,145 @@
 	}
 }
 
+func TestGopkginSourceInteractions(t *testing.T) {
+	// This test is slowish, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping gopkg.in source version fetching test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	rf := func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}
+
+	tfunc := func(opath, n string, major int64, evl []Version) {
+		un := "https://" + n
+		u, err := url.Parse(un)
+		if err != nil {
+			t.Errorf("URL was bad, lolwut? errtext: %s", err)
+			return
+		}
+		mb := maybeGopkginSource{
+			opath: opath,
+			url:   u,
+			major: major,
+		}
+
+		isrc, ident, err := mb.try(cpath, naiveAnalyzer{})
+		if err != nil {
+			t.Errorf("Unexpected error while setting up gopkginSource for test repo: %s", err)
+			return
+		}
+		src, ok := isrc.(*gopkginSource)
+		if !ok {
+			t.Errorf("Expected a gopkginSource, got a %T", isrc)
+			return
+		}
+		if ident != un {
+			t.Errorf("Expected %s as source ident, got %s", un, ident)
+		}
+		if src.major != major {
+			t.Errorf("Expected %v as major version filter on gopkginSource, got %v", major, src.major)
+		}
+
+		// check that an expected rev is present
+		rev := evl[0].(PairedVersion).Underlying()
+		is, err := src.revisionPresentIn(rev)
+		if err != nil {
+			t.Errorf("Unexpected error while checking revision presence: %s", err)
+		} else if !is {
+			t.Errorf("Revision %s that should exist was not present", rev)
+		}
+
+		vlist, err := src.listVersions()
+		if err != nil {
+			t.Errorf("Unexpected error getting version pairs from hg repo: %s", err)
+		}
+
+		if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache {
+			t.Errorf("gopkginSource.listVersions() should have set the upstream and cache existence bits for search")
+		}
+		if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache {
+			t.Errorf("gopkginSource.listVersions() should have set the upstream and cache existence bits for found")
+		}
+
+		if len(vlist) != len(evl) {
+			t.Errorf("gopkgin test repo should've produced %v versions, got %v", len(evl), len(vlist))
+		} else {
+			SortForUpgrade(vlist)
+			if !reflect.DeepEqual(vlist, evl) {
+				t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
+			}
+		}
+
+		// Run again, this time to ensure cache outputs correctly
+		vlist, err = src.listVersions()
+		if err != nil {
+			t.Errorf("Unexpected error getting version pairs from hg repo: %s", err)
+		}
+
+		if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache {
+			t.Errorf("gopkginSource.listVersions() should have set the upstream and cache existence bits for search")
+		}
+		if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache {
+			t.Errorf("gopkginSource.listVersions() should have set the upstream and cache existence bits for found")
+		}
+
+		if len(vlist) != len(evl) {
+			t.Errorf("gopkgin test repo should've produced %v versions, got %v", len(evl), len(vlist))
+		} else {
+			SortForUpgrade(vlist)
+			if !reflect.DeepEqual(vlist, evl) {
+				t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
+			}
+		}
+
+		// recheck that rev is present, this time interacting with cache differently
+		is, err = src.revisionPresentIn(rev)
+		if err != nil {
+			t.Errorf("Unexpected error while re-checking revision presence: %s", err)
+		} else if !is {
+			t.Errorf("Revision that should exist was not present on re-check")
+		}
+	}
+
+	// simultaneously run for v1, v2, and v3 filters of the target repo
+	wg := &sync.WaitGroup{}
+	wg.Add(3)
+	go func() {
+		tfunc("gopkg.in/sdboyer/gpkt.v1", "github.com/sdboyer/gpkt", 1, []Version{
+			NewVersion("v1.1.0").Is(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")),
+			NewVersion("v1.0.0").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")),
+			newDefaultBranch("v1.1").Is(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")),
+			NewBranch("v1").Is(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")),
+		})
+		wg.Done()
+	}()
+
+	go func() {
+		tfunc("gopkg.in/sdboyer/gpkt.v2", "github.com/sdboyer/gpkt", 2, []Version{
+			NewVersion("v2.0.0").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")),
+		})
+		wg.Done()
+	}()
+
+	go func() {
+		tfunc("gopkg.in/sdboyer/gpkt.v3", "github.com/sdboyer/gpkt", 3, []Version{
+			newDefaultBranch("v3").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")),
+		})
+		wg.Done()
+	}()
+
+	wg.Wait()
+	rf()
+}
+
 func TestBzrSourceInteractions(t *testing.T) {
 	// This test is quite slow (ugh bzr), so skip it on -short
 	if testing.Short() {
diff --git a/vendor/github.com/sdboyer/gps/vcs_source.go b/vendor/github.com/sdboyer/gps/vcs_source.go
index 91089ca..19887e5 100644
--- a/vendor/github.com/sdboyer/gps/vcs_source.go
+++ b/vendor/github.com/sdboyer/gps/vcs_source.go
@@ -9,19 +9,23 @@
 	"strings"
 	"sync"
 
+	"github.com/Masterminds/semver"
 	"github.com/Masterminds/vcs"
 	"github.com/termie/go-shutil"
 )
 
-type vcsSource interface {
-	syncLocal() error
-	ensureLocal() error
-	listLocalVersionPairs() ([]PairedVersion, sourceExistence, error)
-	listUpstreamVersionPairs() ([]PairedVersion, sourceExistence, error)
-	hasRevision(Revision) (bool, error)
-	checkout(Version) error
-	exportVersionTo(Version, string) error
-}
+// Kept here as a reference in case it does become important to implement a
+// vcsSource interface. Remove if/when it becomes clear we're never going to do
+// this.
+//type vcsSource interface {
+//syncLocal() error
+//ensureLocal() error
+//listLocalVersionPairs() ([]PairedVersion, sourceExistence, error)
+//listUpstreamVersionPairs() ([]PairedVersion, sourceExistence, error)
+//hasRevision(Revision) (bool, error)
+//checkout(Version) error
+//exportVersionTo(Version, string) error
+//}
 
 // gitSource is a generic git repository implementation that should work with
 // all standard git remotes.
@@ -106,6 +110,29 @@
 		return
 	}
 
+	vlist, err = s.doListVersions()
+	if err != nil {
+		return nil, err
+	}
+
+	// Process the version data into the cache
+	//
+	// reset the rmap and vmap, as they'll be fully repopulated by this
+	s.dc.vMap = make(map[UnpairedVersion]Revision)
+	s.dc.rMap = make(map[Revision][]UnpairedVersion)
+
+	for _, v := range vlist {
+		pv := v.(PairedVersion)
+		u, r := pv.Unpair(), pv.Underlying()
+		s.dc.vMap[u] = r
+		s.dc.rMap[r] = append(s.dc.rMap[r], u)
+	}
+	// Mark the cache as being in sync with upstream's version list
+	s.cvsync = true
+	return
+}
+
+func (s *gitSource) doListVersions() (vlist []Version, err error) {
 	r := s.crepo.r
 	var out []byte
 	c := exec.Command("git", "ls-remote", r.Remote())
@@ -154,7 +181,7 @@
 	s.ex.s |= existsUpstream
 	s.ex.f |= existsUpstream
 
-	// pull out the HEAD rev (it's always first) so we know what branches to
+	// Pull out the HEAD rev (it's always first) so we know what branches to
 	// mark as default. This is, perhaps, not the best way to glean this, but it
 	// was good enough for git itself until 1.8.5. Also, the alternative is
 	// sniffing data out of the pack protocol, which is a separate request, and
@@ -174,12 +201,12 @@
 	// * Multiple branches match the HEAD rev
 	// * None of them are master
 	// * The solver makes it into the branch list in the version queue
-	// * The user has provided no constraint, or DefaultBranch
+	// * The user/tool has provided no constraint (so, anyConstraint)
 	// * A branch that is not actually the default, but happens to share the
-	// rev, is lexicographically earlier than the true default branch
+	//   rev, is lexicographically less than the true default branch
 	//
-	// Then the user could end up with an erroneous non-default branch in their
-	// lock file.
+	// If all of those conditions are met, then the user would end up with an
+	// erroneous non-default branch in their lock file.
 	headrev := Revision(all[0][:40])
 	var onedef, multidef, defmaster bool
 
@@ -247,10 +274,88 @@
 		}
 	}
 
-	// Process the version data into the cache
+	return
+}
+
+// gopkginSource is a specialized git source that performs additional filtering
+// according to the input URL.
+type gopkginSource struct {
+	gitSource
+	major int64
+}
+
+func (s *gopkginSource) listVersions() (vlist []Version, err error) {
+	if s.cvsync {
+		vlist = make([]Version, len(s.dc.vMap))
+		k := 0
+		for v, r := range s.dc.vMap {
+			vlist[k] = v.Is(r)
+			k++
+		}
+
+		return
+	}
+
+	ovlist, err := s.doListVersions()
+	if err != nil {
+		return nil, err
+	}
+
+	// Apply gopkg.in's filtering rules
+	vlist = make([]Version, len(ovlist))
+	k := 0
+	var dbranch int // index of branch to be marked default
+	var bsv *semver.Version
+	for _, v := range ovlist {
+		// all git versions will always be paired
+		pv := v.(versionPair)
+		switch tv := pv.v.(type) {
+		case semVersion:
+			if tv.sv.Major() == s.major {
+				vlist[k] = v
+				k++
+			}
+		case branchVersion:
+			// The semver lib isn't exactly the same as gopkg.in's logic, but
+			// it's close enough that it's probably fine to use. We can be more
+			// exact if real problems crop up. The most obvious vector for
+			// problems is that we totally ignore the "unstable" designation
+			// right now.
+			sv, err := semver.NewVersion(tv.name)
+			if err != nil || sv.Major() != s.major {
+				// not a semver-shaped branch name at all, or not the same major
+				// version as specified in the import path constraint
+				continue
+			}
+
+			// Turn off the default branch marker unconditionally; we can't know
+			// which one to mark as default until we've seen them all
+			tv.isDefault = false
+			// Figure out if this is the current leader for default branch
+			if bsv == nil || bsv.LessThan(sv) {
+				bsv = sv
+				dbranch = k
+			}
+			pv.v = tv
+			vlist[k] = pv
+			k++
+		}
+		// The switch skips plainVersions because they cannot possibly meet
+		// gopkg.in's requirements
+	}
+
+	vlist = vlist[:k]
+	if bsv != nil {
+		dbv := vlist[dbranch].(versionPair)
+		vlist[dbranch] = branchVersion{
+			name:      dbv.v.(branchVersion).name,
+			isDefault: true,
+		}.Is(dbv.r)
+	}
+
+	// Process the filtered version data into the cache
 	//
 	// reset the rmap and vmap, as they'll be fully repopulated by this
-	// TODO(sdboyer) detect out-of-sync pairings as we do this?
 	s.dc.vMap = make(map[UnpairedVersion]Revision)
 	s.dc.rMap = make(map[Revision][]UnpairedVersion)
 
@@ -379,7 +484,7 @@
 	// didn't create it
 	if !s.crepo.synced {
 		s.crepo.mut.Lock()
-		err = r.Update()
+		err = unwrapVcsErr(r.Update())
 		s.crepo.mut.Unlock()
 		if err != nil {
 			return
@@ -524,7 +629,7 @@
 	if !r.synced {
 		err := r.r.Update()
 		if err != nil {
-			return fmt.Errorf("err on attempting to update repo: %s", err.Error())
+			return fmt.Errorf("err on attempting to update repo: %s", unwrapVcsErr(err))
 		}
 	}