Chase vsolver v0.6.0
diff --git a/glide.lock b/glide.lock
index 592bbf0..da12611 100644
--- a/glide.lock
+++ b/glide.lock
@@ -10,7 +10,7 @@
- name: github.com/Masterminds/vcs
version: 7af28b64c5ec41b1558f5514fd938379822c237c
- name: github.com/sdboyer/vsolver
- version: 41ed3fc78392de9c882444546e038bf6724ca416
+ version: 4a1c3dd00ed484b3e87b4668b357e531b36baaa8
- name: github.com/termie/go-shutil
version: bcacb06fecaeec8dc42af03c87c6949f4a05c74c
- name: gopkg.in/yaml.v2
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/locals.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/locals.go
new file mode 100644
index 0000000..3f73943
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/locals.go
@@ -0,0 +1,13 @@
+package main
+
+import (
+ "varied/otherpath"
+ "varied/namemismatch"
+ "varied/simple"
+)
+
+var (
+ _ = simple.S
+ _ = nm.V
+ _ = otherpath.O
+)
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/m1p/a.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/m1p/a.go
new file mode 100644
index 0000000..181620f
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+ "sort"
+
+ "github.com/sdboyer/vsolver"
+)
+
+var (
+ M = sort.Strings
+ _ = vsolver.Solve
+)
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/m1p/b.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+ "os"
+ "sort"
+)
+
+var (
+ _ = sort.Strings
+ _ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/main.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/main.go
new file mode 100644
index 0000000..92c3dc1
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/main.go
@@ -0,0 +1,9 @@
+package main
+
+import (
+ "net/http"
+)
+
+var (
+ _ = http.Client
+)
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/namemismatch/nm.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/namemismatch/nm.go
new file mode 100644
index 0000000..44a0abb
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/namemismatch/nm.go
@@ -0,0 +1,12 @@
+package nm
+
+import (
+ "os"
+
+ "github.com/Masterminds/semver"
+)
+
+var (
+ V = os.FileInfo
+ _ = semver.Constraint
+)
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/otherpath/otherpath_test.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/otherpath/otherpath_test.go
new file mode 100644
index 0000000..73891e6
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/otherpath/otherpath_test.go
@@ -0,0 +1,5 @@
+package otherpath
+
+import "varied/m1p"
+
+var O = m1p.M
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/another/another.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/another/another.go
new file mode 100644
index 0000000..85368da
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/another/another.go
@@ -0,0 +1,7 @@
+package another
+
+import "hash"
+
+var (
+ H = hash.Hash
+)
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/another/another_test.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/another/another_test.go
new file mode 100644
index 0000000..72a89ad
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/another/another_test.go
@@ -0,0 +1,7 @@
+package another
+
+import "encoding/binary"
+
+var (
+ _ = binary.PutVarint
+)
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/another/locals.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/another/locals.go
new file mode 100644
index 0000000..d8d0316
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/another/locals.go
@@ -0,0 +1,5 @@
+package another
+
+import "varied/m1p"
+
+var _ = m1p.M
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/locals.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/locals.go
new file mode 100644
index 0000000..7717e80
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/locals.go
@@ -0,0 +1,7 @@
+package simple
+
+import "varied/simple/another"
+
+var (
+ _ = another.H
+)
diff --git a/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/simple.go b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/simple.go
new file mode 100644
index 0000000..ed4a9c0
--- /dev/null
+++ b/vendor/github.com/sdboyer/vsolver/_testdata/src/varied/simple/simple.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+ "go/parser"
+
+ "github.com/sdboyer/vsolver"
+)
+
+var (
+ _ = parser.ParseFile
+ S = vsolver.Prepare
+)
diff --git a/vendor/github.com/sdboyer/vsolver/analysis.go b/vendor/github.com/sdboyer/vsolver/analysis.go
index 4a463a8..b91d2a5 100644
--- a/vendor/github.com/sdboyer/vsolver/analysis.go
+++ b/vendor/github.com/sdboyer/vsolver/analysis.go
@@ -34,8 +34,7 @@
}
}
-// listPackages lists info for all packages at or below the provided fileRoot,
-// optionally folding in data from test files as well.
+// listPackages lists info for all packages at or below the provided fileRoot.
//
// Directories without any valid Go files are excluded. Directories with
// multiple packages are excluded.
@@ -44,13 +43,13 @@
// the import path for each package. The obvious case is for something typical,
// like:
//
-// fileRoot = /home/user/go/src/github.com/foo/bar
-// importRoot = github.com/foo/bar
+// fileRoot = "/home/user/go/src/github.com/foo/bar"
+// importRoot = "github.com/foo/bar"
//
-// Where the fileRoot and importRoot align. However, if you provide:
+// where the fileRoot and importRoot align. However, if you provide:
//
-// fileRoot = /home/user/workspace/path/to/repo
-// importRoot = github.com/foo/bar
+// fileRoot = "/home/user/workspace/path/to/repo"
+// importRoot = "github.com/foo/bar"
//
// then the root package at path/to/repo will be ascribed import path
// "github.com/foo/bar", and its subpackage "baz" will be
@@ -58,7 +57,7 @@
//
// A PackageTree is returned, which contains the ImportRoot and map of import path
// to PackageOrErr - each path under the root that exists will have either a
-// Package, or an error describing why the package is not valid.
+// Package, or an error describing why the directory is not a valid package.
func listPackages(fileRoot, importRoot string) (PackageTree, error) {
// Set up a build.ctx for parsing
ctx := build.Default
@@ -302,6 +301,11 @@
rt := strings.TrimSuffix(basedir, string(os.PathSeparator)) + string(os.PathSeparator)
for pkg, w := range workmap {
+ if len(w.ex) == 0 {
+ rm[strings.TrimPrefix(pkg, rt)] = nil
+ continue
+ }
+
edeps := make([]string, len(w.ex))
k := 0
for opkg := range w.ex {
@@ -309,6 +313,7 @@
k++
}
+ sort.Strings(edeps)
rm[strings.TrimPrefix(pkg, rt)] = edeps
}
@@ -544,19 +549,25 @@
return
}
+// A PackageTree represents the results of recursively parsing a tree of
+// packages, starting at the ImportRoot. The results of parsing the files in the
+// directory identified by each import path - a Package or an error - are stored
+// in the Packages map, keyed by that import path.
type PackageTree struct {
ImportRoot string
Packages map[string]PackageOrErr
}
+// PackageOrErr stores the results of attempting to parse a single directory for
+// Go source code.
type PackageOrErr struct {
P Package
Err error
}
// ExternalReach looks through a PackageTree and computes the list of external
-// dependencies (not under the tree at its designated import root) that are
-// imported by packages in the tree.
+// packages (not logical children of PackageTree.ImportRoot) that are
+// transitively imported by the internal packages in the tree.
//
// main indicates whether (true) or not (false) to include main packages in the
// analysis. main packages should generally be excluded when analyzing the
@@ -564,9 +575,37 @@
//
// tests indicates whether (true) or not (false) to include imports from test
// files in packages when computing the reach map.
-func (t PackageTree) ExternalReach(main, tests bool) (map[string][]string, error) {
+//
+// ignore is a map of import paths that, if encountered, should be excluded from
+// analysis. This exclusion applies to both internal and external packages. If
+// an external import path is ignored, it is simply omitted from the results.
+//
+// If an internal path is ignored, then it is excluded from all transitive
+// dependency chains and does not appear as a key in the final map. That is, if
+// you ignore A/foo, then the external package list for all internal packages
+// that import A/foo will not include external packages were only reachable
+// through A/foo.
+//
+// Visually, this means that, given a PackageTree with root A and packages at A,
+// A/foo, and A/bar, and the following import chain:
+//
+// A -> A/foo -> A/bar -> B/baz
+//
+// If you ignore A/foo, then the returned map would be:
+//
+// map[string][]string{
+// "A": []string{},
+// "A/bar": []string{"B/baz"},
+// }
+//
+// It is safe to pass a nil map if there are no packages to ignore.
+func (t PackageTree) ExternalReach(main, tests bool, ignore map[string]bool) (map[string][]string, error) {
var someerrs bool
+ if ignore == nil {
+ ignore = make(map[string]bool)
+ }
+
// world's simplest adjacency list
workmap := make(map[string]wm)
@@ -581,6 +620,10 @@
if p.Name == "main" && !main {
continue
}
+ // Skip ignored packages
+ if ignore[ip] {
+ continue
+ }
imps = imps[:0]
imps = p.Imports
@@ -594,6 +637,10 @@
}
for _, imp := range imps {
+ if ignore[imp] {
+ continue
+ }
+
if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) {
w.ex[imp] = struct{}{}
} else {
@@ -616,7 +663,7 @@
if len(workmap) == 0 {
if someerrs {
// TODO proper errs
- return nil, fmt.Errorf("No packages without errors in %s", t.ImportRoot)
+ return nil, fmt.Errorf("no packages without errors in %s", t.ImportRoot)
}
return nil, nil
}
@@ -625,12 +672,37 @@
return wmToReach(workmap, "") // TODO this passes tests, but doesn't seem right
}
-func (t PackageTree) ListExternalImports(main, tests bool) ([]string, error) {
+// ListExternalImports computes a sorted, deduplicated list of all the external
+// packages that are imported by all packages in the PackageTree.
+//
+// "External" is defined as anything not prefixed, after path cleaning, by the
+// PackageTree.ImportRoot. This includes stdlib.
+//
+// If an internal path is ignored, all of the external packages that it uniquely
+// imports are omitted. Note, however, that no internal transitivity checks are
+// made here - every non-ignored package in the tree is considered
+// independently. That means, given a PackageTree with root A and packages at A,
+// A/foo, and A/bar, and the following import chain:
+//
+// A -> A/foo -> A/bar -> B/baz
+//
+// If you ignore A or A/foo, A/bar will still be visited, and B/baz will be
+// returned, because this method visits ALL packages in the tree, not only those reachable
+// from the root (or any other) packages. If your use case requires interrogating
+// external imports with respect to only specific package entry points, you need
+// ExternalReach() instead.
+//
+// It is safe to pass a nil map if there are no packages to ignore.
+func (t PackageTree) ListExternalImports(main, tests bool, ignore map[string]bool) ([]string, error) {
var someerrs bool
exm := make(map[string]struct{})
+ if ignore == nil {
+ ignore = make(map[string]bool)
+ }
+
var imps []string
- for _, perr := range t.Packages {
+ for ip, perr := range t.Packages {
if perr.Err != nil {
someerrs = true
continue
@@ -641,6 +713,10 @@
if p.Name == "main" && !main {
continue
}
+ // Skip ignored packages
+ if ignore[ip] {
+ continue
+ }
imps = imps[:0]
imps = p.Imports
@@ -649,7 +725,7 @@
}
for _, imp := range imps {
- if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) {
+ if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) && !ignore[imp] {
exm[imp] = struct{}{}
}
}
@@ -670,6 +746,7 @@
k++
}
+ sort.Strings(ex)
return ex, nil
}
diff --git a/vendor/github.com/sdboyer/vsolver/analysis_test.go b/vendor/github.com/sdboyer/vsolver/analysis_test.go
index 5431df4..4abb537 100644
--- a/vendor/github.com/sdboyer/vsolver/analysis_test.go
+++ b/vendor/github.com/sdboyer/vsolver/analysis_test.go
@@ -5,6 +5,7 @@
"os"
"path/filepath"
"reflect"
+ "strings"
"testing"
)
@@ -34,7 +35,7 @@
},
},
out: map[string][]string{
- "foo": {},
+ "foo": nil,
},
},
"no external": {
@@ -49,8 +50,8 @@
},
},
out: map[string][]string{
- "foo": {},
- "foo/bar": {},
+ "foo": nil,
+ "foo/bar": nil,
},
},
"no external with subpkg": {
@@ -67,8 +68,8 @@
},
},
out: map[string][]string{
- "foo": {},
- "foo/bar": {},
+ "foo": nil,
+ "foo/bar": nil,
},
},
"simple base transitive": {
@@ -452,6 +453,90 @@
},
},
},
+ // This case mostly exists for the PackageTree methods, but it does
+ // cover a bit of range
+ "varied": {
+ fileRoot: j("varied"),
+ importRoot: "varied",
+ out: PackageTree{
+ ImportRoot: "varied",
+ Packages: map[string]PackageOrErr{
+ "varied": PackageOrErr{
+ P: Package{
+ ImportPath: "varied",
+ CommentPath: "",
+ Name: "main",
+ Imports: []string{
+ "net/http",
+ "varied/namemismatch",
+ "varied/otherpath",
+ "varied/simple",
+ },
+ },
+ },
+ "varied/otherpath": PackageOrErr{
+ P: Package{
+ ImportPath: "varied/otherpath",
+ CommentPath: "",
+ Name: "otherpath",
+ Imports: []string{},
+ TestImports: []string{
+ "varied/m1p",
+ },
+ },
+ },
+ "varied/simple": PackageOrErr{
+ P: Package{
+ ImportPath: "varied/simple",
+ CommentPath: "",
+ Name: "simple",
+ Imports: []string{
+ "github.com/sdboyer/vsolver",
+ "go/parser",
+ "varied/simple/another",
+ },
+ },
+ },
+ "varied/simple/another": PackageOrErr{
+ P: Package{
+ ImportPath: "varied/simple/another",
+ CommentPath: "",
+ Name: "another",
+ Imports: []string{
+ "hash",
+ "varied/m1p",
+ },
+ TestImports: []string{
+ "encoding/binary",
+ },
+ },
+ },
+ "varied/namemismatch": PackageOrErr{
+ P: Package{
+ ImportPath: "varied/namemismatch",
+ CommentPath: "",
+ Name: "nm",
+ Imports: []string{
+ "github.com/Masterminds/semver",
+ "os",
+ },
+ },
+ },
+ "varied/m1p": PackageOrErr{
+ P: Package{
+ ImportPath: "varied/m1p",
+ CommentPath: "",
+ Name: "m1p",
+ Imports: []string{
+ "github.com/sdboyer/vsolver",
+ "os",
+ "sort",
+ },
+ },
+ },
+ },
+ },
+ },
}
for name, fix := range table {
@@ -474,12 +559,405 @@
if fix.out.ImportRoot != "" && fix.out.Packages != nil {
if !reflect.DeepEqual(out, fix.out) {
- t.Errorf("listPackages(%q): Did not receive expected package:\n\t(GOT): %s\n\t(WNT): %s", name, out, fix.out)
+ if fix.out.ImportRoot != out.ImportRoot {
+ t.Errorf("listPackages(%q): Expected ImportRoot %s, got %s", name, fix.out.ImportRoot, out.ImportRoot)
+ }
+
+ // overwrite the out one to see if we still have a real problem
+ out.ImportRoot = fix.out.ImportRoot
+
+ if !reflect.DeepEqual(out, fix.out) {
+ if len(fix.out.Packages) < 2 {
+ t.Errorf("listPackages(%q): Did not get expected PackageOrErrs:\n\t(GOT): %s\n\t(WNT): %s", name, out, fix.out)
+ } else {
+ seen := make(map[string]bool)
+ for path, perr := range fix.out.Packages {
+ seen[path] = true
+ if operr, exists := out.Packages[path]; !exists {
+ t.Errorf("listPackages(%q): Expected PackageOrErr for path %s was missing from output:\n\t%s", path, perr)
+ } else {
+ if !reflect.DeepEqual(perr, operr) {
+ t.Errorf("listPackages(%q): PkgOrErr for path %s was not as expected:\n\t(GOT): %s\n\t(WNT): %s", name, path, operr, perr)
+ }
+ }
+ }
+
+ for path, operr := range out.Packages {
+ if seen[path] {
+ continue
+ }
+
+ t.Errorf("listPackages(%q): Got PackageOrErr for path %s, but none was expected:\n\t%s", path, operr)
+ }
+ }
+ }
}
}
}
}
+func TestListExternalImports(t *testing.T) {
+ // There's enough in the 'varied' test case to test most of what matters
+ vptree, err := listPackages(filepath.Join(getwd(t), "_testdata", "src", "varied"), "varied")
+ if err != nil {
+ t.Fatalf("listPackages failed on varied test case: %s", err)
+ }
+
+ var expect []string
+ var name string
+ var ignore map[string]bool
+ var main, tests bool
+
+ validate := func() {
+ result, err := vptree.ListExternalImports(main, tests, ignore)
+ if err != nil {
+ t.Errorf("%q case returned err: %s", name, err)
+ }
+ if !reflect.DeepEqual(expect, result) {
+ t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect)
+ }
+ }
+
+ all := []string{
+ "encoding/binary",
+ "github.com/Masterminds/semver",
+ "github.com/sdboyer/vsolver",
+ "go/parser",
+ "hash",
+ "net/http",
+ "os",
+ "sort",
+ }
+
+ // helper to rewrite expect, except for a couple packages
+ //
+ // this makes it easier to see what we're taking out on each test
+ except := func(not ...string) {
+ expect = make([]string, len(all)-len(not))
+
+ drop := make(map[string]bool)
+ for _, npath := range not {
+ drop[npath] = true
+ }
+
+ k := 0
+ for _, path := range all {
+ if !drop[path] {
+ expect[k] = path
+ k++
+ }
+ }
+ }
+
+ // everything on
+ name = "simple"
+ except()
+ main, tests = true, true
+ validate()
+
+ // Now without tests, which should just cut one
+ name = "no tests"
+ tests = false
+ except("encoding/binary")
+ validate()
+
+ // Now skip main, which still just cuts out one
+ name = "no main"
+ main, tests = false, true
+ except("net/http")
+ validate()
+
+ // No test and no main, which should be additive
+ name = "no test, no main"
+ main, tests = false, false
+ except("net/http", "encoding/binary")
+ validate()
+
+ // now, the ignore tests. turn main and tests back on
+ main, tests = true, true
+
+ // start with non-matching
+ name = "non-matching ignore"
+ ignore = map[string]bool{
+ "nomatch": true,
+ }
+ except()
+ validate()
+
+ // should have the same effect as ignoring main
+ name = "ignore the root"
+ ignore = map[string]bool{
+ "varied": true,
+ }
+ except("net/http")
+ validate()
+
+ // now drop a more interesting one
+ name = "ignore simple"
+ ignore = map[string]bool{
+ "varied/simple": true,
+ }
+ // we get github.com/sdboyer/vsolver from m1p, too, so it should still be
+ // there
+ except("go/parser")
+ validate()
+
+ // now drop two
+ name = "ignore simple and namemismatch"
+ ignore = map[string]bool{
+ "varied/simple": true,
+ "varied/namemismatch": true,
+ }
+ except("go/parser", "github.com/Masterminds/semver")
+ validate()
+
+ // make sure tests and main play nice with ignore
+ name = "ignore simple and namemismatch, and no tests"
+ tests = false
+ except("go/parser", "github.com/Masterminds/semver", "encoding/binary")
+ validate()
+ name = "ignore simple and namemismatch, and no main"
+ main, tests = false, true
+ except("go/parser", "github.com/Masterminds/semver", "net/http")
+ validate()
+ name = "ignore simple and namemismatch, and no main or tests"
+ main, tests = false, false
+ except("go/parser", "github.com/Masterminds/semver", "net/http", "encoding/binary")
+ validate()
+
+ main, tests = true, true
+
+ // ignore two that should knock out vsolver
+ name = "ignore both importers"
+ ignore = map[string]bool{
+ "varied/simple": true,
+ "varied/m1p": true,
+ }
+ except("sort", "github.com/sdboyer/vsolver", "go/parser")
+ validate()
+
+ // finally, directly ignore some external packages
+ name = "ignore external"
+ ignore = map[string]bool{
+ "github.com/sdboyer/vsolver": true,
+ "go/parser": true,
+ "sort": true,
+ }
+ except("sort", "github.com/sdboyer/vsolver", "go/parser")
+ validate()
+}
+
+func TestExternalReach(t *testing.T) {
+ // There's enough in the 'varied' test case to test most of what matters
+ vptree, err := listPackages(filepath.Join(getwd(t), "_testdata", "src", "varied"), "varied")
+ if err != nil {
+ t.Fatalf("listPackages failed on varied test case: %s", err)
+ }
+
+ // Set up vars for validate closure
+ var expect map[string][]string
+ var name string
+ var main, tests bool
+ var ignore map[string]bool
+
+ validate := func() {
+ result, err := vptree.ExternalReach(main, tests, ignore)
+ if err != nil {
+ t.Errorf("ver(%q): case returned err: %s", name, err)
+ }
+ if !reflect.DeepEqual(expect, result) {
+ seen := make(map[string]bool)
+ for ip, epkgs := range expect {
+ seen[ip] = true
+ if pkgs, exists := result[ip]; !exists {
+ t.Errorf("ver(%q): expected import path %s was not present in result", name, ip)
+ } else {
+ if !reflect.DeepEqual(pkgs, epkgs) {
+ t.Errorf("ver(%q): did not get expected package set for import path %s:\n\t(GOT): %s\n\t(WNT): %s", name, ip, pkgs, epkgs)
+ }
+ }
+ }
+
+ for ip, pkgs := range result {
+ if seen[ip] {
+ continue
+ }
+ t.Errorf("ver(%q): Got packages for import path %s, but none were expected:\n\t%s", name, ip, pkgs)
+ }
+ }
+ }
+
+ all := map[string][]string{
+ "varied": {"encoding/binary", "github.com/Masterminds/semver", "github.com/sdboyer/vsolver", "go/parser", "hash", "net/http", "os", "sort"},
+ "varied/m1p": {"github.com/sdboyer/vsolver", "os", "sort"},
+ "varied/namemismatch": {"github.com/Masterminds/semver", "os"},
+ "varied/otherpath": {"github.com/sdboyer/vsolver", "os", "sort"},
+ "varied/simple": {"encoding/binary", "github.com/sdboyer/vsolver", "go/parser", "hash", "os", "sort"},
+ "varied/simple/another": {"encoding/binary", "github.com/sdboyer/vsolver", "hash", "os", "sort"},
+ }
+ // build a map to validate the exception inputs. do this because shit is
+ // hard enough to keep track of that it's preferable not to have silent
+ // success if a typo creeps in and we're trying to except an import that
+ // isn't in a pkg in the first place
+ valid := make(map[string]map[string]bool)
+ for ip, expkgs := range all {
+ m := make(map[string]bool)
+ for _, pkg := range expkgs {
+ m[pkg] = true
+ }
+ valid[ip] = m
+ }
+
+ // helper to compose expect, excepting specific packages
+ //
+ // this makes it easier to see what we're taking out on each test
+ except := func(pkgig ...string) {
+ // reinit expect with everything from all
+ expect = make(map[string][]string)
+ for ip, expkgs := range all {
+ sl := make([]string, len(expkgs))
+ copy(sl, expkgs)
+ expect[ip] = sl
+ }
+
+ // now build the dropmap
+ drop := make(map[string]map[string]bool)
+ for _, igstr := range pkgig {
+ // split on space; first elem is import path to pkg, the rest are
+ // the imports to drop.
+ not := strings.Split(igstr, " ")
+ var ip string
+ ip, not = not[0], not[1:]
+ if _, exists := valid[ip]; !exists {
+ t.Fatalf("%s is not a package name we're working with, doofus", ip)
+ }
+
+ // if only a single elem was passed, though, drop the whole thing
+ if len(not) == 0 {
+ delete(expect, ip)
+ continue
+ }
+
+ m := make(map[string]bool)
+ for _, imp := range not {
+ if !valid[ip][imp] {
+ t.Fatalf("%s is not a reachable import of %s, even in the all case", imp, ip)
+ }
+ m[imp] = true
+ }
+
+ drop[ip] = m
+ }
+
+ for ip, pkgs := range expect {
+ var npkgs []string
+ for _, imp := range pkgs {
+ if !drop[ip][imp] {
+ npkgs = append(npkgs, imp)
+ }
+ }
+
+ expect[ip] = npkgs
+ }
+ }
+
+ // first, validate all
+ name = "all"
+ main, tests = true, true
+ except()
+ validate()
+
+ // turn off main pkgs, which necessarily doesn't affect anything else
+ name = "no main"
+ main = false
+ except("varied")
+ validate()
+
+ // ignoring the "varied" pkg has same effect as disabling main pkgs
+ name = "ignore root"
+ ignore = map[string]bool{
+ "varied": true,
+ }
+ main = true
+ validate()
+
+ // when we drop tests, varied/otherpath loses its link to varied/m1p and
+ // varied/simple/another loses its test import, which has a fairly big
+ // cascade
+ name = "no tests"
+ tests = false
+ ignore = nil
+ except(
+ "varied encoding/binary",
+ "varied/simple encoding/binary",
+ "varied/simple/another encoding/binary",
+ "varied/otherpath github.com/sdboyer/vsolver os sort",
+ )
+
+ // almost the same as previous, but varied just goes away completely
+ name = "no main or tests"
+ main = false
+ except(
+ "varied",
+ "varied/simple encoding/binary",
+ "varied/simple/another encoding/binary",
+ "varied/otherpath github.com/sdboyer/vsolver os sort",
+ )
+ validate()
+
+ // focus on ignores now, so reset main and tests
+ main, tests = true, true
+
+ // now, the fun stuff. punch a hole in the middle by cutting out
+ // varied/simple
+ name = "ignore varied/simple"
+ ignore = map[string]bool{
+ "varied/simple": true,
+ }
+ except(
+ // root pkg loses on everything in varied/simple/another
+ "varied hash encoding/binary go/parser",
+ "varied/simple",
+ )
+ validate()
+
+ // widen the hole by excluding otherpath
+ name = "ignore varied/{otherpath,simple}"
+ ignore = map[string]bool{
+ "varied/otherpath": true,
+ "varied/simple": true,
+ }
+ except(
+ // root pkg loses on everything in varied/simple/another and varied/m1p
+ "varied hash encoding/binary go/parser github.com/sdboyer/vsolver sort",
+ "varied/otherpath",
+ "varied/simple",
+ )
+ validate()
+
+ // remove namemismatch, though we're mostly beating a dead horse now
+ name = "ignore varied/{otherpath,simple,namemismatch}"
+ ignore["varied/namemismatch"] = true
+ except(
+ // root pkg loses on everything in varied/simple/another and varied/m1p
+ "varied hash encoding/binary go/parser github.com/sdboyer/vsolver sort os github.com/Masterminds/semver",
+ "varied/otherpath",
+ "varied/simple",
+ "varied/namemismatch",
+ )
+ validate()
+
+}
+
+var _ = map[string][]string{
+ "varied": {"encoding/binary", "github.com/Masterminds/semver", "github.com/sdboyer/vsolver", "go/parser", "hash", "net/http", "os", "sort"},
+ "varied/m1p": {"github.com/sdboyer/vsolver", "os", "sort"},
+ "varied/namemismatch": {"github.com/Masterminds/semver", "os"},
+ "varied/otherpath": {"github.com/sdboyer/vsolver", "os", "sort"},
+ "varied/simple": {"encoding/binary", "github.com/sdboyer/vsolver", "go/parser", "hash", "os", "sort"},
+ "varied/simple/another": {"encoding/binary", "github.com/sdboyer/vsolver", "hash", "os", "sort"},
+}
+
func getwd(t *testing.T) string {
cwd, err := os.Getwd()
if err != nil {
diff --git a/vendor/github.com/sdboyer/vsolver/bridge.go b/vendor/github.com/sdboyer/vsolver/bridge.go
index 7cf67ec..7f57f15 100644
--- a/vendor/github.com/sdboyer/vsolver/bridge.go
+++ b/vendor/github.com/sdboyer/vsolver/bridge.go
@@ -9,7 +9,7 @@
// sourceBridges provide an adapter to SourceManagers that tailor operations
// for a single solve run.
type sourceBridge interface {
- getProjectInfo(pa ProjectAtom) (ProjectInfo, error)
+ getProjectInfo(pa atom) (Manifest, Lock, error)
listVersions(id ProjectIdentifier) ([]Version, error)
pairRevision(id ProjectIdentifier, r Revision) []Version
pairVersion(id ProjectIdentifier, v UnpairedVersion) PairedVersion
@@ -19,21 +19,11 @@
matchesAny(id ProjectIdentifier, c1, c2 Constraint) bool
intersect(id ProjectIdentifier, c1, c2 Constraint) Constraint
listPackages(id ProjectIdentifier, v Version) (PackageTree, error)
- computeRootReach(path string) ([]string, error)
+ computeRootReach() ([]string, error)
verifyRoot(path string) error
deduceRemoteRepo(path string) (*remoteRepo, error)
}
-func newBridge(name ProjectName, root string, sm SourceManager, downgrade bool) sourceBridge {
- return &bridge{
- sm: sm,
- sortdown: downgrade,
- name: name,
- root: root,
- vlists: make(map[ProjectName][]Version),
- }
-}
-
// bridge is an adapter around a proper SourceManager. It provides localized
// caching that's tailored to the requirements of a particular solve run.
//
@@ -69,6 +59,9 @@
err error
}
+ // A map of packages to ignore.
+ ignore map[string]bool
+
// Map of project root name to their available version list. This cache is
// layered on top of the proper SourceManager's cache; the only difference
// is that this keeps the versions sorted in the direction required by the
@@ -76,8 +69,8 @@
vlists map[ProjectName][]Version
}
-func (b *bridge) getProjectInfo(pa ProjectAtom) (ProjectInfo, error) {
- return b.sm.GetProjectInfo(ProjectName(pa.Ident.netName()), pa.Version)
+func (b *bridge) getProjectInfo(pa atom) (Manifest, Lock, error) {
+ return b.sm.GetProjectInfo(ProjectName(pa.id.netName()), pa.v)
}
func (b *bridge) key(id ProjectIdentifier) ProjectName {
@@ -355,13 +348,22 @@
// analysis be in any permanent cache, and we want to read directly from our
// potentially messy root project source location on disk. Together, this means
// that we can't ask the real SourceManager to do it.
-func (b *bridge) computeRootReach(path string) ([]string, error) {
+func (b *bridge) computeRootReach() ([]string, error) {
// TODO i now cannot remember the reasons why i thought being less stringent
- // in the analysis was OK. so, for now, we just compute list of
- // externally-touched packages.
+ // in the analysis was OK. so, for now, we just compute a bog-standard list
+ // of externally-touched packages, including mains and test.
+ ptree, err := b.listRootPackages()
+ if err != nil {
+ return nil, err
+ }
+ return ptree.ListExternalImports(true, true, b.ignore)
+}
+
+func (b *bridge) listRootPackages() (PackageTree, error) {
if b.crp == nil {
ptree, err := listPackages(b.root, string(b.name))
+
b.crp = &struct {
ptree PackageTree
err error
@@ -371,10 +373,10 @@
}
}
if b.crp.err != nil {
- return nil, b.crp.err
+ return PackageTree{}, b.crp.err
}
- return b.crp.ptree.ListExternalImports(true, true)
+ return b.crp.ptree, nil
}
// listPackages lists all the packages contained within the given project at a
@@ -383,23 +385,13 @@
// The root project is handled separately, as the source manager isn't
// responsible for that code.
func (b *bridge) listPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
- if id.LocalName != b.name {
- // FIXME if we're aliasing here, the returned PackageTree will have
- // unaliased import paths, which is super not correct
- return b.sm.ListPackages(b.key(id), v)
- }
- if b.crp == nil {
- ptree, err := listPackages(b.root, string(b.name))
- b.crp = &struct {
- ptree PackageTree
- err error
- }{
- ptree: ptree,
- err: err,
- }
+ if id.LocalName == b.name {
+ return b.listRootPackages()
}
- return b.crp.ptree, b.crp.err
+ // FIXME if we're aliasing here, the returned PackageTree will have
+ // unaliased import paths, which is super not correct
+ return b.sm.ListPackages(b.key(id), v)
}
// verifyRoot ensures that the provided path to the project root is in good
@@ -407,9 +399,9 @@
// run.
func (b *bridge) verifyRoot(path string) error {
if fi, err := os.Stat(path); err != nil {
- return BadOptsFailure(fmt.Sprintf("Could not read project root (%s): %s", path, err))
+ return badOptsFailure(fmt.Sprintf("Could not read project root (%s): %s", path, err))
} else if !fi.IsDir() {
- return BadOptsFailure(fmt.Sprintf("Project root (%s) is a file, not a directory.", path))
+ return badOptsFailure(fmt.Sprintf("Project root (%s) is a file, not a directory.", path))
}
return nil
diff --git a/vendor/github.com/sdboyer/vsolver/errors.go b/vendor/github.com/sdboyer/vsolver/errors.go
index 58e0952..18f50fb 100644
--- a/vendor/github.com/sdboyer/vsolver/errors.go
+++ b/vendor/github.com/sdboyer/vsolver/errors.go
@@ -17,11 +17,6 @@
cannotResolve
)
-type SolveError interface {
- error
- Children() []error
-}
-
type traceError interface {
traceString() string
}
@@ -77,35 +72,35 @@
}
type disjointConstraintFailure struct {
- goal Dependency
- failsib []Dependency
- nofailsib []Dependency
+ goal dependency
+ failsib []dependency
+ nofailsib []dependency
c Constraint
}
func (e *disjointConstraintFailure) Error() string {
if len(e.failsib) == 1 {
str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which has no overlap with existing constraint %s from %s at %s"
- return fmt.Sprintf(str, e.goal.Depender.Ident.errString(), e.goal.Depender.Version, e.goal.Dep.Ident.errString(), e.goal.Dep.Constraint.String(), e.failsib[0].Dep.Constraint.String(), e.failsib[0].Depender.Ident.errString(), e.failsib[0].Depender.Version)
+ return fmt.Sprintf(str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String(), e.failsib[0].dep.Constraint.String(), e.failsib[0].depender.id.errString(), e.failsib[0].depender.v)
}
var buf bytes.Buffer
- var sibs []Dependency
+ var sibs []dependency
if len(e.failsib) > 1 {
sibs = e.failsib
str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which has no overlap with the following existing constraints:\n"
- fmt.Fprintf(&buf, str, e.goal.Depender.Ident.errString(), e.goal.Depender.Version, e.goal.Dep.Ident.errString(), e.goal.Dep.Constraint.String())
+ fmt.Fprintf(&buf, str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String())
} else {
sibs = e.nofailsib
str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which does not overlap with the intersection of existing constraints from other currently selected packages:\n"
- fmt.Fprintf(&buf, str, e.goal.Depender.Ident.errString(), e.goal.Depender.Version, e.goal.Dep.Ident.errString(), e.goal.Dep.Constraint.String())
+ fmt.Fprintf(&buf, str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String())
}
for _, c := range sibs {
- fmt.Fprintf(&buf, "\t%s from %s at %s\n", c.Dep.Constraint.String(), c.Depender.Ident.errString(), c.Depender.Version)
+ fmt.Fprintf(&buf, "\t%s from %s at %s\n", c.dep.Constraint.String(), c.depender.id.errString(), c.depender.v)
}
return buf.String()
@@ -113,12 +108,12 @@
func (e *disjointConstraintFailure) traceString() string {
var buf bytes.Buffer
- fmt.Fprintf(&buf, "constraint %s on %s disjoint with other dependers:\n", e.goal.Dep.Constraint.String(), e.goal.Dep.Ident.errString())
+ fmt.Fprintf(&buf, "constraint %s on %s disjoint with other dependers:\n", e.goal.dep.Constraint.String(), e.goal.dep.Ident.errString())
for _, f := range e.failsib {
- fmt.Fprintf(&buf, "%s from %s at %s (no overlap)\n", f.Dep.Constraint.String(), f.Depender.Ident.LocalName, f.Depender.Version)
+ fmt.Fprintf(&buf, "%s from %s at %s (no overlap)\n", f.dep.Constraint.String(), f.depender.id.LocalName, f.depender.v)
}
for _, f := range e.nofailsib {
- fmt.Fprintf(&buf, "%s from %s at %s (some overlap)\n", f.Dep.Constraint.String(), f.Depender.Ident.LocalName, f.Depender.Version)
+ fmt.Fprintf(&buf, "%s from %s at %s (some overlap)\n", f.dep.Constraint.String(), f.depender.id.LocalName, f.depender.v)
}
return buf.String()
@@ -128,39 +123,39 @@
// constraints does not admit the currently-selected version of the target
// project.
type constraintNotAllowedFailure struct {
- goal Dependency
+ goal dependency
v Version
}
func (e *constraintNotAllowedFailure) Error() string {
str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which does not allow the currently selected version of %s"
- return fmt.Sprintf(str, e.goal.Depender.Ident.errString(), e.goal.Depender.Version, e.goal.Dep.Ident.errString(), e.goal.Dep.Constraint, e.v)
+ return fmt.Sprintf(str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint, e.v)
}
func (e *constraintNotAllowedFailure) traceString() string {
str := "%s at %s depends on %s with %s, but that's already selected at %s"
- return fmt.Sprintf(str, e.goal.Depender.Ident.LocalName, e.goal.Depender.Version, e.goal.Dep.Ident.LocalName, e.goal.Dep.Constraint, e.v)
+ return fmt.Sprintf(str, e.goal.depender.id.LocalName, e.goal.depender.v, e.goal.dep.Ident.LocalName, e.goal.dep.Constraint, e.v)
}
type versionNotAllowedFailure struct {
- goal ProjectAtom
- failparent []Dependency
+ goal atom
+ failparent []dependency
c Constraint
}
func (e *versionNotAllowedFailure) Error() string {
if len(e.failparent) == 1 {
str := "Could not introduce %s at %s, as it is not allowed by constraint %s from project %s."
- return fmt.Sprintf(str, e.goal.Ident.errString(), e.goal.Version, e.failparent[0].Dep.Constraint.String(), e.failparent[0].Depender.Ident.errString())
+ return fmt.Sprintf(str, e.goal.id.errString(), e.goal.v, e.failparent[0].dep.Constraint.String(), e.failparent[0].depender.id.errString())
}
var buf bytes.Buffer
str := "Could not introduce %s at %s, as it is not allowed by constraints from the following projects:\n"
- fmt.Fprintf(&buf, str, e.goal.Ident.errString(), e.goal.Version)
+ fmt.Fprintf(&buf, str, e.goal.id.errString(), e.goal.v)
for _, f := range e.failparent {
- fmt.Fprintf(&buf, "\t%s from %s at %s\n", f.Dep.Constraint.String(), f.Depender.Ident.errString(), f.Depender.Version)
+ fmt.Fprintf(&buf, "\t%s from %s at %s\n", f.dep.Constraint.String(), f.depender.id.errString(), f.depender.v)
}
return buf.String()
@@ -169,9 +164,9 @@
func (e *versionNotAllowedFailure) traceString() string {
var buf bytes.Buffer
- fmt.Fprintf(&buf, "%s at %s not allowed by constraint %s:\n", e.goal.Ident.LocalName, e.goal.Version, e.c.String())
+ fmt.Fprintf(&buf, "%s at %s not allowed by constraint %s:\n", e.goal.id.LocalName, e.goal.v, e.c.String())
for _, f := range e.failparent {
- fmt.Fprintf(&buf, " %s from %s at %s\n", f.Dep.Constraint.String(), f.Depender.Ident.LocalName, f.Depender.Version)
+ fmt.Fprintf(&buf, " %s from %s at %s\n", f.dep.Constraint.String(), f.depender.id.LocalName, f.depender.v)
}
return buf.String()
@@ -186,36 +181,36 @@
return fmt.Sprintf(e.prob, e.goal)
}
-type BadOptsFailure string
+type badOptsFailure string
-func (e BadOptsFailure) Error() string {
+func (e badOptsFailure) Error() string {
return string(e)
}
type sourceMismatchFailure struct {
shared ProjectName
- sel []Dependency
+ sel []dependency
current, mismatch string
- prob ProjectAtom
+ prob atom
}
func (e *sourceMismatchFailure) Error() string {
var cur []string
for _, c := range e.sel {
- cur = append(cur, string(c.Depender.Ident.LocalName))
+ cur = append(cur, string(c.depender.id.LocalName))
}
str := "Could not introduce %s at %s, as it depends on %s from %s, but %s is already marked as coming from %s by %s"
- return fmt.Sprintf(str, e.prob.Ident.errString(), e.prob.Version, e.shared, e.mismatch, e.shared, e.current, strings.Join(cur, ", "))
+ return fmt.Sprintf(str, e.prob.id.errString(), e.prob.v, e.shared, e.mismatch, e.shared, e.current, strings.Join(cur, ", "))
}
func (e *sourceMismatchFailure) traceString() string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "disagreement on network addr for %s:\n", e.shared)
- fmt.Fprintf(&buf, " %s from %s\n", e.mismatch, e.prob.Ident.errString())
+ fmt.Fprintf(&buf, " %s from %s\n", e.mismatch, e.prob.id.errString())
for _, dep := range e.sel {
- fmt.Fprintf(&buf, " %s from %s\n", e.current, dep.Depender.Ident.errString())
+ fmt.Fprintf(&buf, " %s from %s\n", e.current, dep.depender.id.errString())
}
return buf.String()
@@ -223,10 +218,10 @@
type errDeppers struct {
err error
- deppers []ProjectAtom
+ deppers []atom
}
type checkeeHasProblemPackagesFailure struct {
- goal ProjectAtom
+ goal atom
failpkg map[string]errDeppers
}
@@ -238,8 +233,8 @@
indent = "\t"
fmt.Fprintf(
&buf, "Could not introduce %s at %s due to multiple problematic subpackages:\n",
- e.goal.Ident.errString(),
- e.goal.Version,
+ e.goal.id.errString(),
+ e.goal.v,
)
}
@@ -254,8 +249,8 @@
if len(e.failpkg) == 1 {
fmt.Fprintf(
&buf, "Could not introduce %s at %s, as its subpackage %s %s.",
- e.goal.Ident.errString(),
- e.goal.Version,
+ e.goal.id.errString(),
+ e.goal.v,
pkg,
cause,
)
@@ -266,13 +261,13 @@
if len(errdep.deppers) == 1 {
fmt.Fprintf(
&buf, " (Package is required by %s at %s.)",
- errdep.deppers[0].Ident.errString(),
- errdep.deppers[0].Version,
+ errdep.deppers[0].id.errString(),
+ errdep.deppers[0].v,
)
} else {
fmt.Fprintf(&buf, " Package is required by:")
for _, pa := range errdep.deppers {
- fmt.Fprintf(&buf, "\n%s\t%s at %s", indent, pa.Ident.errString(), pa.Version)
+ fmt.Fprintf(&buf, "\n%s\t%s at %s", indent, pa.id.errString(), pa.v)
}
}
}
@@ -283,7 +278,7 @@
func (e *checkeeHasProblemPackagesFailure) traceString() string {
var buf bytes.Buffer
- fmt.Fprintf(&buf, "%s at %s has problem subpkg(s):\n", e.goal.Ident.LocalName, e.goal.Version)
+ fmt.Fprintf(&buf, "%s at %s has problem subpkg(s):\n", e.goal.id.LocalName, e.goal.v)
for pkg, errdep := range e.failpkg {
if errdep.err == nil {
fmt.Fprintf(&buf, "\t%s is missing; ", pkg)
@@ -294,13 +289,13 @@
if len(errdep.deppers) == 1 {
fmt.Fprintf(
&buf, "required by %s at %s.",
- errdep.deppers[0].Ident.errString(),
- errdep.deppers[0].Version,
+ errdep.deppers[0].id.errString(),
+ errdep.deppers[0].v,
)
} else {
fmt.Fprintf(&buf, " required by:")
for _, pa := range errdep.deppers {
- fmt.Fprintf(&buf, "\n\t\t%s at %s", pa.Ident.errString(), pa.Version)
+ fmt.Fprintf(&buf, "\n\t\t%s at %s", pa.id.errString(), pa.v)
}
}
}
@@ -309,7 +304,7 @@
}
type depHasProblemPackagesFailure struct {
- goal Dependency
+ goal dependency
v Version
pl []string
prob map[string]error
@@ -329,10 +324,10 @@
if len(e.pl) == 1 {
return fmt.Sprintf(
"Could not introduce %s at %s, as it requires package %s from %s, but in version %s that package %s",
- e.goal.Depender.Ident.errString(),
- e.goal.Depender.Version,
+ e.goal.depender.id.errString(),
+ e.goal.depender.v,
e.pl[0],
- e.goal.Dep.Ident.errString(),
+ e.goal.dep.Ident.errString(),
e.v,
fcause(e.pl[0]),
)
@@ -341,9 +336,9 @@
var buf bytes.Buffer
fmt.Fprintf(
&buf, "Could not introduce %s at %s, as it requires problematic packages from %s (current version %s):",
- e.goal.Depender.Ident.errString(),
- e.goal.Depender.Version,
- e.goal.Dep.Ident.errString(),
+ e.goal.depender.id.errString(),
+ e.goal.depender.v,
+ e.goal.dep.Ident.errString(),
e.v,
)
@@ -368,9 +363,9 @@
fmt.Fprintf(
&buf, "%s at %s depping on %s at %s has problem subpkg(s):",
- e.goal.Depender.Ident.errString(),
- e.goal.Depender.Version,
- e.goal.Dep.Ident.errString(),
+ e.goal.depender.id.errString(),
+ e.goal.depender.v,
+ e.goal.dep.Ident.errString(),
e.v,
)
diff --git a/vendor/github.com/sdboyer/vsolver/flags.go b/vendor/github.com/sdboyer/vsolver/flags.go
index 1e9cc5e..8a7880f 100644
--- a/vendor/github.com/sdboyer/vsolver/flags.go
+++ b/vendor/github.com/sdboyer/vsolver/flags.go
@@ -1,20 +1,9 @@
package vsolver
-// ProjectExistence values represent the extent to which a project "exists."
-type ProjectExistence uint8
+// projectExistence values represent the extent to which a project "exists."
+type projectExistence uint8
const (
- // ExistsInLock indicates that a project exists (i.e., is mentioned in) a
- // lock file.
- // TODO not sure if it makes sense to have this IF it's just the source
- // manager's responsibility for putting this together - the implication is
- // that this is the root lock file, right?
- ExistsInLock = 1 << iota
-
- // ExistsInManifest indicates that a project exists (i.e., is mentioned in)
- // a manifest.
- ExistsInManifest
-
// ExistsInVendorRoot indicates that a project exists in a vendor directory
// at the predictable location based on import path. It does NOT imply, much
// less guarantee, any of the following:
@@ -30,7 +19,7 @@
//
// In short, the information encoded in this flag should not be construed as
// exhaustive.
- ExistsInVendorRoot
+ existsInVendorRoot projectExistence = 1 << iota
// ExistsInCache indicates that a project exists on-disk in the local cache.
// It does not guarantee that an upstream exists, thus it cannot imply
@@ -40,16 +29,9 @@
// Additionally, this refers only to the existence of the local repository
// itself; it says nothing about the existence or completeness of the
// separate metadata cache.
- ExistsInCache
+ existsInCache
// ExistsUpstream indicates that a project repository was locatable at the
// path provided by a project's URI (a base import path).
- ExistsUpstream
-)
-
-const (
- // Bitmask for existence levels that are managed by the ProjectManager
- pmexLvls ProjectExistence = ExistsInVendorRoot | ExistsInCache | ExistsUpstream
- // Bitmask for existence levels that are managed by the SourceManager
- smexLvls ProjectExistence = ExistsInLock | ExistsInManifest
+ existsUpstream
)
diff --git a/vendor/github.com/sdboyer/vsolver/hash.go b/vendor/github.com/sdboyer/vsolver/hash.go
index 570c943..5fe87aa 100644
--- a/vendor/github.com/sdboyer/vsolver/hash.go
+++ b/vendor/github.com/sdboyer/vsolver/hash.go
@@ -25,12 +25,12 @@
}
// Pass in magic root values, and the bridge will analyze the right thing
- ptree, err := s.b.listPackages(ProjectIdentifier{LocalName: s.args.N}, nil)
+ ptree, err := s.b.listPackages(ProjectIdentifier{LocalName: s.args.Name}, nil)
if err != nil {
- return nil, BadOptsFailure(fmt.Sprintf("Error while parsing imports under %s: %s", s.args.Root, err.Error()))
+ return nil, badOptsFailure(fmt.Sprintf("Error while parsing imports under %s: %s", s.args.Root, err.Error()))
}
- d, dd := s.args.M.GetDependencies(), s.args.M.GetDevDependencies()
+ d, dd := s.args.Manifest.DependencyConstraints(), s.args.Manifest.TestDependencyConstraints()
p := make(sortedDeps, len(d))
copy(p, d)
p = append(p, dd...)
@@ -72,9 +72,24 @@
}
}
+ // Add the package ignores, if any.
+ if len(s.ig) > 0 {
+ // Dump and sort the ignores
+ ig := make([]string, len(s.ig))
+ k := 0
+ for pkg := range s.ig {
+ ig[k] = pkg
+ k++
+ }
+ sort.Strings(ig)
+
+ for _, igp := range ig {
+ h.Write([]byte(igp))
+ }
+ }
+
// TODO overrides
// TODO aliases
- // TODO ignores
return h.Sum(nil), nil
}
diff --git a/vendor/github.com/sdboyer/vsolver/hash_test.go b/vendor/github.com/sdboyer/vsolver/hash_test.go
index b0f49bb..4bbb7d2 100644
--- a/vendor/github.com/sdboyer/vsolver/hash_test.go
+++ b/vendor/github.com/sdboyer/vsolver/hash_test.go
@@ -10,13 +10,14 @@
fix := basicFixtures[2]
args := SolveArgs{
- Root: string(fix.ds[0].Name()),
- N: fix.ds[0].Name(),
- M: fix.ds[0],
+ Root: string(fix.ds[0].Name()),
+ Name: fix.ds[0].Name(),
+ Manifest: fix.ds[0],
+ Ignore: []string{"foo", "bar"},
}
// prep a fixture-overridden solver
- si, err := Prepare(args, SolveOpts{}, newdepspecSM(fix.ds))
+ si, err := Prepare(args, SolveOpts{}, newdepspecSM(fix.ds, nil))
s := si.(*solver)
if err != nil {
t.Fatalf("Could not prepare solver due to err: %s", err)
@@ -33,7 +34,7 @@
}
h := sha256.New()
- for _, v := range []string{"a", "a", "1.0.0", "b", "b", "1.0.0", stdlibPkgs, "root", "", "root", "a", "b"} {
+ for _, v := range []string{"a", "a", "1.0.0", "b", "b", "1.0.0", stdlibPkgs, "root", "", "root", "a", "b", "bar", "foo"} {
h.Write([]byte(v))
}
correct := h.Sum(nil)
diff --git a/vendor/github.com/sdboyer/vsolver/lock.go b/vendor/github.com/sdboyer/vsolver/lock.go
index b906981..19a75d3 100644
--- a/vendor/github.com/sdboyer/vsolver/lock.go
+++ b/vendor/github.com/sdboyer/vsolver/lock.go
@@ -116,17 +116,17 @@
return lp.path
}
-func (lp LockedProject) toAtom() ProjectAtom {
- pa := ProjectAtom{
- Ident: lp.Ident(),
+func (lp LockedProject) toAtom() atom {
+ pa := atom{
+ id: lp.Ident(),
}
if lp.v == nil {
- pa.Version = lp.r
+ pa.v = lp.r
} else if lp.r != "" {
- pa.Version = lp.v.Is(lp.r)
+ pa.v = lp.v.Is(lp.r)
} else {
- pa.Version = lp.v
+ pa.v = lp.v
}
return pa
diff --git a/vendor/github.com/sdboyer/vsolver/manager_test.go b/vendor/github.com/sdboyer/vsolver/manager_test.go
index 38bc0d7..98e0e38 100644
--- a/vendor/github.com/sdboyer/vsolver/manager_test.go
+++ b/vendor/github.com/sdboyer/vsolver/manager_test.go
@@ -40,7 +40,7 @@
if err != nil {
t.Errorf("Failed to create temp dir: %s", err)
}
- _, err = NewSourceManager(cpath, bd, false, dummyAnalyzer{})
+ _, err = NewSourceManager(dummyAnalyzer{}, cpath, bd, false)
if err != nil {
t.Errorf("Unexpected error on SourceManager creation: %s", err)
@@ -52,12 +52,12 @@
}
}()
- _, err = NewSourceManager(cpath, bd, false, dummyAnalyzer{})
+ _, err = NewSourceManager(dummyAnalyzer{}, cpath, bd, false)
if err == nil {
t.Errorf("Creating second SourceManager should have failed due to file lock contention")
}
- sm, err := NewSourceManager(cpath, bd, true, dummyAnalyzer{})
+ sm, err := NewSourceManager(dummyAnalyzer{}, cpath, bd, true)
defer sm.Release()
if err != nil {
t.Errorf("Creating second SourceManager should have succeeded when force flag was passed, but failed with err %s", err)
@@ -78,7 +78,7 @@
if err != nil {
t.Errorf("Failed to create temp dir: %s", err)
}
- sm, err := NewSourceManager(cpath, bd, false, dummyAnalyzer{})
+ sm, err := NewSourceManager(dummyAnalyzer{}, cpath, bd, false)
if err != nil {
t.Errorf("Unexpected error on SourceManager creation: %s", err)
@@ -186,7 +186,7 @@
}
// Check upstream existence flag
- if !pms.pm.CheckExistence(ExistsUpstream) {
+ if !pms.pm.CheckExistence(existsUpstream) {
t.Errorf("ExistsUpstream flag not being correctly set the project")
}
}
@@ -202,7 +202,7 @@
t.Errorf("Failed to create temp dir: %s", err)
}
- smi, err := NewSourceManager(cpath, bd, false, dummyAnalyzer{})
+ smi, err := NewSourceManager(dummyAnalyzer{}, cpath, bd, false)
if err != nil {
t.Errorf("Unexpected error on SourceManager creation: %s", err)
t.FailNow()
@@ -224,7 +224,7 @@
t.Errorf("Unexpected error on ProjectManager creation: %s", err)
t.FailNow()
}
- pms[k] = pmi.pm.(*projectManager)
+ pms[k] = pmi.pm
}
defer func() {
@@ -240,7 +240,7 @@
if err != nil {
t.Errorf("Unexpected error getting version pairs from git repo: %s", err)
}
- if exbits != ExistsUpstream {
+ if exbits != existsUpstream {
t.Errorf("git pair fetch should only set upstream existence bits, but got %v", exbits)
}
if len(vlist) != 3 {
@@ -267,7 +267,7 @@
if err != nil {
t.Errorf("Unexpected error getting version pairs from hg repo: %s", err)
}
- if exbits != ExistsUpstream|ExistsInCache {
+ if exbits != existsUpstream|existsInCache {
t.Errorf("hg pair fetch should set upstream and cache existence bits, but got %v", exbits)
}
if len(vlist) != 2 {
@@ -289,7 +289,7 @@
if err != nil {
t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err)
}
- if exbits != ExistsUpstream|ExistsInCache {
+ if exbits != existsUpstream|existsInCache {
t.Errorf("bzr pair fetch should set upstream and cache existence bits, but got %v", exbits)
}
if len(vlist) != 1 {
@@ -314,7 +314,7 @@
if err != nil {
t.Errorf("Failed to create temp dir: %s", err)
}
- sm, err := NewSourceManager(cpath, bd, false, dummyAnalyzer{})
+ sm, err := NewSourceManager(dummyAnalyzer{}, cpath, bd, false)
if err != nil {
t.Errorf("Unexpected error on SourceManager creation: %s", err)
@@ -332,7 +332,7 @@
pn := ProjectName("github.com/Masterminds/VCSTestRepo")
- _, err = sm.GetProjectInfo(pn, NewVersion("1.0.0"))
+ _, _, err = sm.GetProjectInfo(pn, NewVersion("1.0.0"))
if err != nil {
t.Errorf("Unexpected error from GetInfoAt %s", err)
}
diff --git a/vendor/github.com/sdboyer/vsolver/manifest.go b/vendor/github.com/sdboyer/vsolver/manifest.go
index 89dd0da..51dac26 100644
--- a/vendor/github.com/sdboyer/vsolver/manifest.go
+++ b/vendor/github.com/sdboyer/vsolver/manifest.go
@@ -2,11 +2,8 @@
// Manifest represents the data from a manifest file (or however the
// implementing tool chooses to store it) at a particular version that is
-// relevant to the satisfiability solving process:
-//
-// - A list of dependencies: project name, and a constraint
-// - A list of development-time dependencies (e.g. for testing - only
-// the root project's are incorporated)
+// relevant to the satisfiability solving process. That means constraints on
+// dependencies, both for normal dependencies and for tests.
//
// Finding a solution that satisfies the constraints expressed by all of these
// dependencies (and those from all other projects, transitively), is what the
@@ -18,8 +15,8 @@
// from consideration in the solving algorithm.
type Manifest interface {
Name() ProjectName
- GetDependencies() []ProjectDep
- GetDevDependencies() []ProjectDep
+ DependencyConstraints() []ProjectDep
+ TestDependencyConstraints() []ProjectDep
}
// SimpleManifest is a helper for tools to enumerate manifest data. It's
@@ -27,9 +24,9 @@
// the fly for projects with no manifest metadata, or metadata through a foreign
// tool's idioms.
type SimpleManifest struct {
- N ProjectName
- P []ProjectDep
- DP []ProjectDep
+ N ProjectName
+ Deps []ProjectDep
+ TestDeps []ProjectDep
}
var _ Manifest = SimpleManifest{}
@@ -40,13 +37,13 @@
}
// GetDependencies returns the project's dependencies.
-func (m SimpleManifest) GetDependencies() []ProjectDep {
- return m.P
+func (m SimpleManifest) DependencyConstraints() []ProjectDep {
+ return m.Deps
}
// GetDependencies returns the project's test dependencies.
-func (m SimpleManifest) GetDevDependencies() []ProjectDep {
- return m.DP
+func (m SimpleManifest) TestDependencyConstraints() []ProjectDep {
+ return m.TestDeps
}
// prepManifest ensures a manifest is prepared and safe for use by the solver.
@@ -67,22 +64,22 @@
}
}
- deps := m.GetDependencies()
- ddeps := m.GetDevDependencies()
+ deps := m.DependencyConstraints()
+ ddeps := m.TestDependencyConstraints()
rm := SimpleManifest{
- N: m.Name(),
- P: make([]ProjectDep, len(deps)),
- DP: make([]ProjectDep, len(ddeps)),
+ N: m.Name(),
+ Deps: make([]ProjectDep, len(deps)),
+ TestDeps: make([]ProjectDep, len(ddeps)),
}
for k, d := range deps {
d.Ident = d.Ident.normalize()
- rm.P[k] = d
+ rm.Deps[k] = d
}
for k, d := range ddeps {
d.Ident = d.Ident.normalize()
- rm.DP[k] = d
+ rm.TestDeps[k] = d
}
return rm
diff --git a/vendor/github.com/sdboyer/vsolver/project_manager.go b/vendor/github.com/sdboyer/vsolver/project_manager.go
index bd92a7f..dd10e6a 100644
--- a/vendor/github.com/sdboyer/vsolver/project_manager.go
+++ b/vendor/github.com/sdboyer/vsolver/project_manager.go
@@ -15,18 +15,6 @@
"github.com/termie/go-shutil"
)
-type ProjectManager interface {
- GetInfoAt(Version) (ProjectInfo, error)
- ListVersions() ([]Version, error)
- CheckExistence(ProjectExistence) bool
- ExportVersionTo(Version, string) error
- ListPackages(Version) (PackageTree, error)
-}
-
-type ProjectAnalyzer interface {
- GetInfo(build.Context, ProjectName) (Manifest, Lock, error)
-}
-
type projectManager struct {
// The identifier of the project. At this level, corresponds to the
// '$GOPATH/src'-relative path, *and* the network name.
@@ -59,20 +47,26 @@
type existence struct {
// The existence levels for which a search/check has been performed
- s ProjectExistence
+ s projectExistence
// The existence levels verified to be present through searching
- f ProjectExistence
+ f projectExistence
}
// TODO figure out shape of versions, then implement marshaling/unmarshaling
type projectDataCache struct {
Version string `json:"version"` // TODO use this
- Infos map[Revision]ProjectInfo `json:"infos"`
+ Infos map[Revision]projectInfo `json:"infos"`
VMap map[Version]Revision `json:"vmap"`
RMap map[Revision][]Version `json:"rmap"`
}
+// projectInfo holds manifest and lock
+type projectInfo struct {
+ Manifest
+ Lock
+}
+
type repo struct {
// Path to the root of the default working copy (NOT the repo itself)
rpath string
@@ -87,14 +81,14 @@
synced bool
}
-func (pm *projectManager) GetInfoAt(v Version) (ProjectInfo, error) {
+func (pm *projectManager) GetInfoAt(v Version) (Manifest, Lock, error) {
if err := pm.ensureCacheExistence(); err != nil {
- return ProjectInfo{}, err
+ return nil, nil, err
}
if r, exists := pm.dc.VMap[v]; exists {
if pi, exists := pm.dc.Infos[r]; exists {
- return pi, nil
+ return pi.Manifest, pi.Lock, nil
}
}
@@ -103,7 +97,7 @@
if !pm.crepo.synced {
err = pm.crepo.r.Update()
if err != nil {
- return ProjectInfo{}, fmt.Errorf("Could not fetch latest updates into repository")
+ return nil, nil, fmt.Errorf("Could not fetch latest updates into repository")
}
pm.crepo.synced = true
}
@@ -131,17 +125,19 @@
}
// If m is nil, prepManifest will provide an empty one.
- return ProjectInfo{
- // TODO disagreement between the manifest's name and N is still
- // scary
- V: v,
- N: pm.n,
+ pi := projectInfo{
Manifest: prepManifest(m, pm.n),
Lock: l,
- }, nil
+ }
+
+ if r, exists := pm.dc.VMap[v]; exists {
+ pm.dc.Infos[r] = pi
+ }
+
+ return pi.Manifest, pi.Lock, nil
}
- return ProjectInfo{}, err
+ return nil, nil, err
}
func (pm *projectManager) ListPackages(v Version) (PackageTree, error) {
@@ -180,14 +176,14 @@
// would allow weird state inconsistencies (cache exists, but no repo...how
// does that even happen?) that it'd be better to just not allow so that we
// don't have to think about it elsewhere
- if !pm.CheckExistence(ExistsInCache) {
- if pm.CheckExistence(ExistsUpstream) {
+ if !pm.CheckExistence(existsInCache) {
+ if pm.CheckExistence(existsUpstream) {
err := pm.crepo.r.Get()
if err != nil {
return fmt.Errorf("Failed to create repository cache for %s", pm.n)
}
- pm.ex.s |= ExistsInCache
- pm.ex.f |= ExistsInCache
+ pm.ex.s |= existsInCache
+ pm.ex.f |= existsInCache
} else {
return fmt.Errorf("Project repository cache for %s does not exist", pm.n)
}
@@ -199,7 +195,7 @@
func (pm *projectManager) ListVersions() (vlist []Version, err error) {
if !pm.cvsync {
// This check only guarantees that the upstream exists, not the cache
- pm.ex.s |= ExistsUpstream
+ pm.ex.s |= existsUpstream
vpairs, exbits, err := pm.crepo.getCurrentVersionPairs()
// But it *may* also check the local existence
pm.ex.s |= exbits
@@ -213,7 +209,7 @@
vlist = make([]Version, len(vpairs))
// mark our cache as synced if we got ExistsUpstream back
- if exbits&ExistsUpstream == ExistsUpstream {
+ if exbits&existsUpstream == existsUpstream {
pm.cvsync = true
}
@@ -245,29 +241,29 @@
// Note that this may perform read-ish operations on the cache repo, and it
// takes a lock accordingly. Deadlock may result from calling it during a
// segment where the cache repo mutex is already write-locked.
-func (pm *projectManager) CheckExistence(ex ProjectExistence) bool {
+func (pm *projectManager) CheckExistence(ex projectExistence) bool {
if pm.ex.s&ex != ex {
- if ex&ExistsInVendorRoot != 0 && pm.ex.s&ExistsInVendorRoot == 0 {
- pm.ex.s |= ExistsInVendorRoot
+ if ex&existsInVendorRoot != 0 && pm.ex.s&existsInVendorRoot == 0 {
+ pm.ex.s |= existsInVendorRoot
fi, err := os.Stat(path.Join(pm.vendordir, string(pm.n)))
if err == nil && fi.IsDir() {
- pm.ex.f |= ExistsInVendorRoot
+ pm.ex.f |= existsInVendorRoot
}
}
- if ex&ExistsInCache != 0 && pm.ex.s&ExistsInCache == 0 {
+ if ex&existsInCache != 0 && pm.ex.s&existsInCache == 0 {
pm.crepo.mut.RLock()
- pm.ex.s |= ExistsInCache
+ pm.ex.s |= existsInCache
if pm.crepo.r.CheckLocal() {
- pm.ex.f |= ExistsInCache
+ pm.ex.f |= existsInCache
}
pm.crepo.mut.RUnlock()
}
- if ex&ExistsUpstream != 0 && pm.ex.s&ExistsUpstream == 0 {
+ if ex&existsUpstream != 0 && pm.ex.s&existsUpstream == 0 {
pm.crepo.mut.RLock()
- pm.ex.s |= ExistsUpstream
+ pm.ex.s |= existsUpstream
if pm.crepo.r.Ping() {
- pm.ex.f |= ExistsUpstream
+ pm.ex.f |= existsUpstream
}
pm.crepo.mut.RUnlock()
}
@@ -280,7 +276,7 @@
return pm.crepo.exportVersionTo(v, to)
}
-func (r *repo) getCurrentVersionPairs() (vlist []PairedVersion, exbits ProjectExistence, err error) {
+func (r *repo) getCurrentVersionPairs() (vlist []PairedVersion, exbits projectExistence, err error) {
r.mut.Lock()
defer r.mut.Unlock()
@@ -307,7 +303,7 @@
}
// Upstream and cache must exist, so add that to exbits
- exbits |= ExistsUpstream | ExistsInCache
+ exbits |= existsUpstream | existsInCache
// Also, local is definitely now synced
r.synced = true
@@ -319,7 +315,7 @@
all = bytes.Split(bytes.TrimSpace(out), []byte("\n"))
}
// Local cache may not actually exist here, but upstream definitely does
- exbits |= ExistsUpstream
+ exbits |= existsUpstream
tmap := make(map[string]PairedVersion)
for _, pair := range all {
@@ -358,7 +354,7 @@
return
}
// Upstream and cache must exist, so add that to exbits
- exbits |= ExistsUpstream | ExistsInCache
+ exbits |= existsUpstream | existsInCache
// Also, local is definitely now synced
r.synced = true
@@ -383,7 +379,7 @@
}
// Upstream and cache must exist, so add that to exbits
- exbits |= ExistsUpstream | ExistsInCache
+ exbits |= existsUpstream | existsInCache
// Also, local is definitely now synced
r.synced = true
diff --git a/vendor/github.com/sdboyer/vsolver/remote.go b/vendor/github.com/sdboyer/vsolver/remote.go
index 37d95e4..b04b9ce 100644
--- a/vendor/github.com/sdboyer/vsolver/remote.go
+++ b/vendor/github.com/sdboyer/vsolver/remote.go
@@ -2,6 +2,8 @@
import (
"fmt"
+ "io"
+ "net/http"
"net/url"
"regexp"
"strings"
@@ -218,6 +220,87 @@
}
}
- // TODO use HTTP metadata to resolve vanity imports
- return nil, fmt.Errorf("unable to deduce repository and source type for: %q", path)
+ // No luck so far. maybe it's one of them vanity imports?
+ importroot, vcs, reporoot, err := parseMetadata(path)
+ if err != nil {
+ return nil, fmt.Errorf("unable to deduce repository and source type for: %q", path)
+ }
+
+ // If we got something back at all, then it supercedes the actual input for
+ // the real URL to hit
+ rr.CloneURL, err = url.Parse(reporoot)
+ if err != nil {
+ return nil, fmt.Errorf("server returned bad URL when searching for vanity import: %q", reporoot)
+ }
+
+ // We have a real URL. Set the other values and return.
+ rr.Base = importroot
+ rr.RelPkg = strings.TrimPrefix(path[len(importroot):], "/")
+
+ rr.VCS = []string{vcs}
+ if rr.CloneURL.Scheme != "" {
+ rr.Schemes = []string{rr.CloneURL.Scheme}
+ }
+
+ return rr, nil
+}
+
+// fetchMetadata fetchs the remote metadata for path.
+func fetchMetadata(path string) (rc io.ReadCloser, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("unable to determine remote metadata protocol: %s", err)
+ }
+ }()
+
+ // try https first
+ rc, err = doFetchMetadata("https", path)
+ if err == nil {
+ return
+ }
+
+ rc, err = doFetchMetadata("http", path)
+ return
+}
+
+func doFetchMetadata(scheme, path string) (io.ReadCloser, error) {
+ url := fmt.Sprintf("%s://%s?go-get=1", scheme, path)
+ switch scheme {
+ case "https", "http":
+ resp, err := http.Get(url)
+ if err != nil {
+ return nil, fmt.Errorf("failed to access url %q", url)
+ }
+ return resp.Body, nil
+ default:
+ return nil, fmt.Errorf("unknown remote protocol scheme: %q", scheme)
+ }
+}
+
+// parseMetadata fetches and decodes remote metadata for path.
+func parseMetadata(path string) (string, string, string, error) {
+ rc, err := fetchMetadata(path)
+ if err != nil {
+ return "", "", "", err
+ }
+ defer rc.Close()
+
+ imports, err := parseMetaGoImports(rc)
+ if err != nil {
+ return "", "", "", err
+ }
+ match := -1
+ for i, im := range imports {
+ if !strings.HasPrefix(path, im.Prefix) {
+ continue
+ }
+ if match != -1 {
+ return "", "", "", fmt.Errorf("multiple meta tags match import path %q", path)
+ }
+ match = i
+ }
+ if match == -1 {
+ return "", "", "", fmt.Errorf("go-import metadata not found")
+ }
+ return imports[match].Prefix, imports[match].VCS, imports[match].RepoRoot, nil
}
diff --git a/vendor/github.com/sdboyer/vsolver/remote_test.go b/vendor/github.com/sdboyer/vsolver/remote_test.go
index 10537ca..3bac9ae 100644
--- a/vendor/github.com/sdboyer/vsolver/remote_test.go
+++ b/vendor/github.com/sdboyer/vsolver/remote_test.go
@@ -8,6 +8,10 @@
)
func TestDeduceRemotes(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping remote deduction test in short mode")
+ }
+
fixtures := []struct {
path string
want *remoteRepo
@@ -365,7 +369,50 @@
VCS: []string{"git"},
},
},
- // Regression - gh does allow 2-letter usernames
+ // Vanity imports
+ {
+ "golang.org/x/exp",
+ &remoteRepo{
+ Base: "golang.org/x/exp",
+ RelPkg: "",
+ CloneURL: &url.URL{
+ Scheme: "https",
+ Host: "go.googlesource.com",
+ Path: "/exp",
+ },
+ Schemes: []string{"https"},
+ VCS: []string{"git"},
+ },
+ },
+ {
+ "golang.org/x/exp/inotify",
+ &remoteRepo{
+ Base: "golang.org/x/exp",
+ RelPkg: "inotify",
+ CloneURL: &url.URL{
+ Scheme: "https",
+ Host: "go.googlesource.com",
+ Path: "/exp",
+ },
+ Schemes: []string{"https"},
+ VCS: []string{"git"},
+ },
+ },
+ {
+ "rsc.io/pdf",
+ &remoteRepo{
+ Base: "rsc.io/pdf",
+ RelPkg: "",
+ CloneURL: &url.URL{
+ Scheme: "https",
+ Host: "github.com",
+ Path: "/rsc/pdf",
+ },
+ Schemes: []string{"https"},
+ VCS: []string{"git"},
+ },
+ },
+ // Regression - gh does allow two-letter usernames
{
"github.com/kr/pretty",
&remoteRepo{
diff --git a/vendor/github.com/sdboyer/vsolver/result.go b/vendor/github.com/sdboyer/vsolver/result.go
index 426be6a..e6e929e 100644
--- a/vendor/github.com/sdboyer/vsolver/result.go
+++ b/vendor/github.com/sdboyer/vsolver/result.go
@@ -7,6 +7,8 @@
"path/filepath"
)
+// A Result is returned by a solver run. It is mostly just a Lock, with some
+// additional methods that report information about the solve run.
type Result interface {
Lock
Attempts() int
diff --git a/vendor/github.com/sdboyer/vsolver/result_test.go b/vendor/github.com/sdboyer/vsolver/result_test.go
index 605328e..5419d32 100644
--- a/vendor/github.com/sdboyer/vsolver/result_test.go
+++ b/vendor/github.com/sdboyer/vsolver/result_test.go
@@ -8,7 +8,7 @@
)
var basicResult result
-var kub ProjectAtom
+var kub atom
// An analyzer that passes nothing back, but doesn't error. This expressly
// creates a situation that shouldn't be able to happen from a general solver
@@ -29,21 +29,21 @@
basicResult = result{
att: 1,
p: []LockedProject{
- pa2lp(ProjectAtom{
- Ident: pi("github.com/sdboyer/testrepo"),
- Version: NewBranch("master").Is(Revision("4d59fb584b15a94d7401e356d2875c472d76ef45")),
+ pa2lp(atom{
+ id: pi("github.com/sdboyer/testrepo"),
+ v: NewBranch("master").Is(Revision("4d59fb584b15a94d7401e356d2875c472d76ef45")),
}, nil),
- pa2lp(ProjectAtom{
- Ident: pi("github.com/Masterminds/VCSTestRepo"),
- Version: NewVersion("1.0.0").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
+ pa2lp(atom{
+ id: pi("github.com/Masterminds/VCSTestRepo"),
+ v: NewVersion("1.0.0").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
}, nil),
},
}
// just in case something needs punishing, kubernetes is happy to oblige
- kub = ProjectAtom{
- Ident: pi("github.com/kubernetes/kubernetes"),
- Version: NewVersion("1.0.0").Is(Revision("528f879e7d3790ea4287687ef0ab3f2a01cc2718")),
+ kub = atom{
+ id: pi("github.com/kubernetes/kubernetes"),
+ v: NewVersion("1.0.0").Is(Revision("528f879e7d3790ea4287687ef0ab3f2a01cc2718")),
}
}
@@ -58,7 +58,7 @@
tmp := path.Join(os.TempDir(), "vsolvtest")
os.RemoveAll(tmp)
- sm, err := NewSourceManager(path.Join(tmp, "cache"), path.Join(tmp, "base"), false, passthruAnalyzer{})
+ sm, err := NewSourceManager(passthruAnalyzer{}, path.Join(tmp, "cache"), path.Join(tmp, "base"), false)
if err != nil {
t.Errorf("NewSourceManager errored unexpectedly: %q", err)
}
@@ -79,7 +79,7 @@
tmp := path.Join(os.TempDir(), "vsolvtest")
clean := true
- sm, err := NewSourceManager(path.Join(tmp, "cache"), path.Join(tmp, "base"), true, passthruAnalyzer{})
+ sm, err := NewSourceManager(passthruAnalyzer{}, path.Join(tmp, "cache"), path.Join(tmp, "base"), true)
if err != nil {
b.Errorf("NewSourceManager errored unexpectedly: %q", err)
clean = false
@@ -87,7 +87,7 @@
// Prefetch the projects before timer starts
for _, lp := range r.p {
- _, err := sm.GetProjectInfo(lp.Ident().LocalName, lp.Version())
+ _, _, err := sm.GetProjectInfo(lp.Ident().LocalName, lp.Version())
if err != nil {
b.Errorf("failed getting project info during prefetch: %s", err)
clean = false
diff --git a/vendor/github.com/sdboyer/vsolver/satisfy.go b/vendor/github.com/sdboyer/vsolver/satisfy.go
index 174d95c..c431cdc 100644
--- a/vendor/github.com/sdboyer/vsolver/satisfy.go
+++ b/vendor/github.com/sdboyer/vsolver/satisfy.go
@@ -4,7 +4,7 @@
// that we want to select. It determines if selecting the atom would result in
// a state where all solver requirements are still satisfied.
func (s *solver) checkProject(a atomWithPackages) error {
- pa := a.atom
+ pa := a.a
if nilpa == pa {
// This shouldn't be able to happen, but if it does, it unequivocally
// indicates a logical bug somewhere, so blowing up is preferable
@@ -49,7 +49,7 @@
// already-selected project. It determines if selecting the packages would
// result in a state where all solver requirements are still satisfied.
func (s *solver) checkPackage(a atomWithPackages) error {
- if nilpa == a.atom {
+ if nilpa == a.a {
// This shouldn't be able to happen, but if it does, it unequivocally
// indicates a logical bug somewhere, so blowing up is preferable
panic("canary - checking version of empty ProjectAtom")
@@ -83,18 +83,18 @@
// checkAtomAllowable ensures that an atom itself is acceptable with respect to
// the constraints established by the current solution.
-func (s *solver) checkAtomAllowable(pa ProjectAtom) error {
- constraint := s.sel.getConstraint(pa.Ident)
- if s.b.matches(pa.Ident, constraint, pa.Version) {
+func (s *solver) checkAtomAllowable(pa atom) error {
+ constraint := s.sel.getConstraint(pa.id)
+ if s.b.matches(pa.id, constraint, pa.v) {
return nil
}
// TODO collect constraint failure reason (wait...aren't we, below?)
- deps := s.sel.getDependenciesOn(pa.Ident)
- var failparent []Dependency
+ deps := s.sel.getDependenciesOn(pa.id)
+ var failparent []dependency
for _, dep := range deps {
- if !s.b.matches(pa.Ident, dep.Dep.Constraint, pa.Version) {
- s.fail(dep.Depender.Ident)
+ if !s.b.matches(pa.id, dep.dep.Constraint, pa.v) {
+ s.fail(dep.depender.id)
failparent = append(failparent, dep)
}
}
@@ -112,28 +112,28 @@
// checkRequiredPackagesExist ensures that all required packages enumerated by
// existing dependencies on this atom are actually present in the atom.
func (s *solver) checkRequiredPackagesExist(a atomWithPackages) error {
- ptree, err := s.b.listPackages(a.atom.Ident, a.atom.Version)
+ ptree, err := s.b.listPackages(a.a.id, a.a.v)
if err != nil {
// TODO handle this more gracefully
return err
}
- deps := s.sel.getDependenciesOn(a.atom.Ident)
+ deps := s.sel.getDependenciesOn(a.a.id)
fp := make(map[string]errDeppers)
// We inspect these in a bit of a roundabout way, in order to incrementally
// build up the failure we'd return if there is, indeed, a missing package.
// TODO rechecking all of these every time is wasteful. Is there a shortcut?
for _, dep := range deps {
- for _, pkg := range dep.Dep.pl {
+ for _, pkg := range dep.dep.pl {
if errdep, seen := fp[pkg]; seen {
- errdep.deppers = append(errdep.deppers, dep.Depender)
+ errdep.deppers = append(errdep.deppers, dep.depender)
fp[pkg] = errdep
} else {
perr, has := ptree.Packages[pkg]
if !has || perr.Err != nil {
fp[pkg] = errDeppers{
err: perr.Err,
- deppers: []ProjectAtom{dep.Depender},
+ deppers: []atom{dep.depender},
}
}
}
@@ -142,7 +142,7 @@
if len(fp) > 0 {
e := &checkeeHasProblemPackagesFailure{
- goal: a.atom,
+ goal: a.a,
failpkg: fp,
}
s.logSolve(e)
@@ -164,11 +164,11 @@
siblings := s.sel.getDependenciesOn(dep.Ident)
// No admissible versions - visit all siblings and identify the disagreement(s)
- var failsib []Dependency
- var nofailsib []Dependency
+ var failsib []dependency
+ var nofailsib []dependency
for _, sibling := range siblings {
- if !s.b.matchesAny(dep.Ident, sibling.Dep.Constraint, dep.Constraint) {
- s.fail(sibling.Depender.Ident)
+ if !s.b.matchesAny(dep.Ident, sibling.dep.Constraint, dep.Constraint) {
+ s.fail(sibling.depender.id)
failsib = append(failsib, sibling)
} else {
nofailsib = append(nofailsib, sibling)
@@ -176,7 +176,7 @@
}
err := &disjointConstraintFailure{
- goal: Dependency{Depender: a.atom, Dep: cdep},
+ goal: dependency{depender: a.a, dep: cdep},
failsib: failsib,
nofailsib: nofailsib,
c: constraint,
@@ -191,12 +191,12 @@
func (s *solver) checkDepsDisallowsSelected(a atomWithPackages, cdep completeDep) error {
dep := cdep.ProjectDep
selected, exists := s.sel.selected(dep.Ident)
- if exists && !s.b.matches(dep.Ident, dep.Constraint, selected.atom.Version) {
+ if exists && !s.b.matches(dep.Ident, dep.Constraint, selected.a.v) {
s.fail(dep.Ident)
err := &constraintNotAllowedFailure{
- goal: Dependency{Depender: a.atom, Dep: cdep},
- v: selected.atom.Version,
+ goal: dependency{depender: a.a, dep: cdep},
+ v: selected.a.v,
}
s.logSolve(err)
return err
@@ -215,11 +215,11 @@
dep := cdep.ProjectDep
if cur, exists := s.names[dep.Ident.LocalName]; exists {
if cur != dep.Ident.netName() {
- deps := s.sel.getDependenciesOn(a.atom.Ident)
+ deps := s.sel.getDependenciesOn(a.a.id)
// Fail all the other deps, as there's no way atom can ever be
// compatible with them
for _, d := range deps {
- s.fail(d.Depender.Ident)
+ s.fail(d.depender.id)
}
err := &sourceMismatchFailure{
@@ -227,7 +227,7 @@
sel: deps,
current: cur,
mismatch: dep.Ident.netName(),
- prob: a.atom,
+ prob: a.a,
}
s.logSolve(err)
return err
@@ -246,18 +246,18 @@
return nil
}
- ptree, err := s.b.listPackages(sel.atom.Ident, sel.atom.Version)
+ ptree, err := s.b.listPackages(sel.a.id, sel.a.v)
if err != nil {
// TODO handle this more gracefully
return err
}
e := &depHasProblemPackagesFailure{
- goal: Dependency{
- Depender: a.atom,
- Dep: cdep,
+ goal: dependency{
+ depender: a.a,
+ dep: cdep,
},
- v: sel.atom.Version,
+ v: sel.a.v,
prob: make(map[string]error),
}
diff --git a/vendor/github.com/sdboyer/vsolver/selection.go b/vendor/github.com/sdboyer/vsolver/selection.go
index cfff305..9aaac4d 100644
--- a/vendor/github.com/sdboyer/vsolver/selection.go
+++ b/vendor/github.com/sdboyer/vsolver/selection.go
@@ -2,7 +2,7 @@
type selection struct {
projects []selected
- deps map[ProjectIdentifier][]Dependency
+ deps map[ProjectIdentifier][]dependency
sm sourceBridge
}
@@ -11,7 +11,7 @@
first bool
}
-func (s *selection) getDependenciesOn(id ProjectIdentifier) []Dependency {
+func (s *selection) getDependenciesOn(id ProjectIdentifier) []dependency {
if deps, exists := s.deps[id]; exists {
return deps
}
@@ -39,11 +39,11 @@
return sel.a, sel.first
}
-func (s *selection) pushDep(dep Dependency) {
- s.deps[dep.Dep.Ident] = append(s.deps[dep.Dep.Ident], dep)
+func (s *selection) pushDep(dep dependency) {
+ s.deps[dep.dep.Ident] = append(s.deps[dep.dep.Ident], dep)
}
-func (s *selection) popDep(id ProjectIdentifier) (dep Dependency) {
+func (s *selection) popDep(id ProjectIdentifier) (dep dependency) {
deps := s.deps[id]
dep, s.deps[id] = deps[len(deps)-1], deps[:len(deps)-1]
return dep
@@ -53,7 +53,7 @@
return len(s.deps[id])
}
-func (s *selection) setDependenciesOn(id ProjectIdentifier, deps []Dependency) {
+func (s *selection) setDependenciesOn(id ProjectIdentifier, deps []dependency) {
s.deps[id] = deps
}
@@ -65,7 +65,7 @@
// structure so that we can pop with zero cost.
uniq := make(map[string]int)
for _, dep := range s.deps[id] {
- for _, pkg := range dep.Dep.pl {
+ for _, pkg := range dep.dep.pl {
if count, has := uniq[pkg]; has {
count++
uniq[pkg] = count
@@ -87,7 +87,7 @@
// structure so that we can pop with zero cost.
uniq := make(map[string]int)
for _, p := range s.projects {
- if p.a.atom.Ident.eq(id) {
+ if p.a.a.id.eq(id) {
for _, pkg := range p.a.pl {
if count, has := uniq[pkg]; has {
count++
@@ -118,7 +118,7 @@
// Start with the open set
var ret Constraint = any
for _, dep := range deps {
- ret = s.sm.intersect(id, ret, dep.Dep.Constraint)
+ ret = s.sm.intersect(id, ret, dep.dep.Constraint)
}
return ret
@@ -133,12 +133,12 @@
// have happened later.
func (s *selection) selected(id ProjectIdentifier) (atomWithPackages, bool) {
for _, p := range s.projects {
- if p.a.atom.Ident.eq(id) {
+ if p.a.a.id.eq(id) {
return p.a, true
}
}
- return atomWithPackages{atom: nilpa}, false
+ return atomWithPackages{a: nilpa}, false
}
// TODO take a ProjectName, but optionally also a preferred version. This will
diff --git a/vendor/github.com/sdboyer/vsolver/solve_basic_test.go b/vendor/github.com/sdboyer/vsolver/solve_basic_test.go
index c4c4cb3..910cd05 100644
--- a/vendor/github.com/sdboyer/vsolver/solve_basic_test.go
+++ b/vendor/github.com/sdboyer/vsolver/solve_basic_test.go
@@ -68,7 +68,7 @@
//
// Splits the input string on a space, and uses the first two elements as the
// project name and constraint body, respectively.
-func mksvpa(info string) ProjectAtom {
+func mksvpa(info string) atom {
id, ver, rev := nsvrSplit(info)
_, err := semver.NewVersion(ver)
@@ -83,9 +83,9 @@
v = v.(UnpairedVersion).Is(rev)
}
- return ProjectAtom{
- Ident: id,
- Version: v,
+ return atom{
+ id: id,
+ v: v,
}
}
@@ -132,13 +132,13 @@
// First string is broken out into the name/semver of the main package.
func dsv(pi string, deps ...string) depspec {
pa := mksvpa(pi)
- if string(pa.Ident.LocalName) != pa.Ident.NetworkName {
+ if string(pa.id.LocalName) != pa.id.NetworkName {
panic("alternate source on self makes no sense")
}
ds := depspec{
- n: pa.Ident.LocalName,
- v: pa.Version,
+ n: pa.id.LocalName,
+ v: pa.v,
}
for _, dep := range deps {
@@ -161,7 +161,7 @@
l := make(fixLock, 0)
for _, s := range pairs {
pa := mksvpa(s)
- l = append(l, NewLockedProject(pa.Ident.LocalName, pa.Version, pa.Ident.netName(), "", nil))
+ l = append(l, NewLockedProject(pa.id.LocalName, pa.v, pa.id.netName(), "", nil))
}
return l
@@ -173,7 +173,7 @@
l := make(fixLock, 0)
for _, s := range pairs {
pa := mksvpa(s)
- l = append(l, NewLockedProject(pa.Ident.LocalName, pa.Version.(PairedVersion).Underlying(), pa.Ident.netName(), "", nil))
+ l = append(l, NewLockedProject(pa.id.LocalName, pa.v.(PairedVersion).Underlying(), pa.id.netName(), "", nil))
}
return l
@@ -863,37 +863,42 @@
type depspecSourceManager struct {
specs []depspec
rm reachMap
+ ig map[string]bool
}
type fixSM interface {
SourceManager
rootSpec() depspec
allSpecs() []depspec
+ ignore() map[string]bool
}
var _ fixSM = &depspecSourceManager{}
-func newdepspecSM(ds []depspec) *depspecSourceManager {
+func newdepspecSM(ds []depspec, ignore []string) *depspecSourceManager {
+ ig := make(map[string]bool)
+ if len(ignore) > 0 {
+ for _, pkg := range ignore {
+ ig[pkg] = true
+ }
+ }
+
return &depspecSourceManager{
specs: ds,
rm: computeBasicReachMap(ds),
+ ig: ig,
}
}
-func (sm *depspecSourceManager) GetProjectInfo(n ProjectName, v Version) (ProjectInfo, error) {
+func (sm *depspecSourceManager) GetProjectInfo(n ProjectName, v Version) (Manifest, Lock, error) {
for _, ds := range sm.specs {
if n == ds.n && v.Matches(ds.v) {
- return ProjectInfo{
- N: ds.n,
- V: ds.v,
- Manifest: ds,
- Lock: dummyLock{},
- }, nil
+ return ds, dummyLock{}, nil
}
}
// TODO proper solver-type errors
- return ProjectInfo{}, fmt.Errorf("Project '%s' at version '%s' could not be found", n, v)
+ return nil, nil, fmt.Errorf("Project '%s' at version '%s' could not be found", n, v)
}
func (sm *depspecSourceManager) ExternalReach(n ProjectName, v Version) (map[string][]string, error) {
@@ -976,25 +981,27 @@
return sm.specs
}
+func (sm *depspecSourceManager) ignore() map[string]bool {
+ return sm.ig
+}
+
type depspecBridge struct {
*bridge
}
// override computeRootReach() on bridge to read directly out of the depspecs
-func (b *depspecBridge) computeRootReach(path string) ([]string, error) {
+func (b *depspecBridge) computeRootReach() ([]string, error) {
// This only gets called for the root project, so grab that one off the test
// source manager
dsm := b.sm.(fixSM)
root := dsm.rootSpec()
- if string(root.n) != path {
- return nil, fmt.Errorf("Expected only root project %q to computeRootReach(), got %q", root.n, path)
- }
ptree, err := dsm.ListPackages(root.n, nil)
if err != nil {
return nil, err
}
- return ptree.ListExternalImports(true, true)
+
+ return ptree.ListExternalImports(true, true, dsm.ignore())
}
// override verifyRoot() on bridge to prevent any filesystem interaction
@@ -1008,7 +1015,7 @@
}
func (b *depspecBridge) listPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
- return b.sm.ListPackages(b.key(id), v)
+ return b.sm.(fixSM).ListPackages(b.key(id), v)
}
// override deduceRemoteRepo on bridge to make all our pkg/project mappings work
@@ -1032,12 +1039,12 @@
var _ Lock = fixLock{}
// impl Spec interface
-func (ds depspec) GetDependencies() []ProjectDep {
+func (ds depspec) DependencyConstraints() []ProjectDep {
return ds.deps
}
// impl Spec interface
-func (ds depspec) GetDevDependencies() []ProjectDep {
+func (ds depspec) TestDependencyConstraints() []ProjectDep {
return ds.devdeps
}
diff --git a/vendor/github.com/sdboyer/vsolver/solve_bimodal_test.go b/vendor/github.com/sdboyer/vsolver/solve_bimodal_test.go
index 119ced0..21df3cb 100644
--- a/vendor/github.com/sdboyer/vsolver/solve_bimodal_test.go
+++ b/vendor/github.com/sdboyer/vsolver/solve_bimodal_test.go
@@ -347,6 +347,46 @@
},
errp: []string{"d", "a", "d"},
},
+ // Check ignores on the root project
+ "ignore in double-subpkg": {
+ ds: []depspec{
+ dsp(dsv("root 0.0.0"),
+ pkg("root", "root/foo"),
+ pkg("root/foo", "root/bar", "b"),
+ pkg("root/bar", "a"),
+ ),
+ dsp(dsv("a 1.0.0"),
+ pkg("a"),
+ ),
+ dsp(dsv("b 1.0.0"),
+ pkg("b"),
+ ),
+ },
+ ignore: []string{"root/bar"},
+ r: mkresults(
+ "b 1.0.0",
+ ),
+ },
+ // Ignores on a dep pkg
+ "ignore through dep pkg": {
+ ds: []depspec{
+ dsp(dsv("root 0.0.0"),
+ pkg("root", "root/foo"),
+ pkg("root/foo", "a"),
+ ),
+ dsp(dsv("a 1.0.0"),
+ pkg("a", "a/bar"),
+ pkg("a/bar", "b"),
+ ),
+ dsp(dsv("b 1.0.0"),
+ pkg("b"),
+ ),
+ },
+ ignore: []string{"a/bar"},
+ r: mkresults(
+ "a 1.0.0",
+ ),
+ },
}
// tpkg is a representation of a single package. It has its own import path, as
@@ -375,6 +415,8 @@
errp []string
// request up/downgrade to all projects
changeall bool
+ // pkgs to ignore
+ ignore []string
}
func (f bimodalFixture) name() string {
@@ -406,9 +448,10 @@
var _ SourceManager = &bmSourceManager{}
-func newbmSM(ds []depspec) *bmSourceManager {
- sm := &bmSourceManager{}
- sm.specs = ds
+func newbmSM(ds []depspec, ignore []string) *bmSourceManager {
+ sm := &bmSourceManager{
+ depspecSourceManager: *newdepspecSM(ds, ignore),
+ }
sm.rm = computeBimodalExternalMap(ds)
return sm
diff --git a/vendor/github.com/sdboyer/vsolver/solve_test.go b/vendor/github.com/sdboyer/vsolver/solve_test.go
index 66dcd91..5c54683 100644
--- a/vendor/github.com/sdboyer/vsolver/solve_test.go
+++ b/vendor/github.com/sdboyer/vsolver/solve_test.go
@@ -6,6 +6,7 @@
"io/ioutil"
"log"
"os"
+ "reflect"
"sort"
"strings"
"testing"
@@ -59,13 +60,13 @@
if testing.Verbose() {
stderrlog.Printf("[[fixture %q]]", fix.n)
}
- sm := newdepspecSM(fix.ds)
+ sm := newdepspecSM(fix.ds, nil)
args := SolveArgs{
- Root: string(fix.ds[0].Name()),
- N: ProjectName(fix.ds[0].Name()),
- M: fix.ds[0],
- L: dummyLock{},
+ Root: string(fix.ds[0].Name()),
+ Name: ProjectName(fix.ds[0].Name()),
+ Manifest: fix.ds[0],
+ Lock: dummyLock{},
}
o := SolveOpts{
@@ -74,7 +75,7 @@
}
if fix.l != nil {
- args.L = fix.l
+ args.Lock = fix.l
}
res, err = fixSolve(args, o, sm)
@@ -112,13 +113,14 @@
if testing.Verbose() {
stderrlog.Printf("[[fixture %q]]", fix.n)
}
- sm := newbmSM(fix.ds)
+ sm := newbmSM(fix.ds, fix.ignore)
args := SolveArgs{
- Root: string(fix.ds[0].Name()),
- N: ProjectName(fix.ds[0].Name()),
- M: fix.ds[0],
- L: dummyLock{},
+ Root: string(fix.ds[0].Name()),
+ Name: ProjectName(fix.ds[0].Name()),
+ Manifest: fix.ds[0],
+ Lock: dummyLock{},
+ Ignore: fix.ignore,
}
o := SolveOpts{
@@ -127,7 +129,7 @@
}
if fix.l != nil {
- args.L = fix.l
+ args.Lock = fix.l
}
res, err = fixSolve(args, o, sm)
@@ -144,7 +146,7 @@
}
switch fail := err.(type) {
- case *BadOptsFailure:
+ case *badOptsFailure:
t.Errorf("(fixture: %q) Unexpected bad opts failure solve error: %s", fix.name(), err)
case *noVersionError:
if errp[0] != string(fail.pn.LocalName) { // TODO identifierify
@@ -191,7 +193,7 @@
t.Errorf("(fixture: %q) Solver succeeded, but expected failure", fix.name())
} else {
r := res.(result)
- if fix.maxTries() > 0 && r.att > fix.maxTries() {
+ if fix.maxTries() > 0 && r.Attempts() > fix.maxTries() {
t.Errorf("(fixture: %q) Solver completed in %v attempts, but expected %v or fewer", fix.name(), r.att, fix.maxTries())
}
@@ -199,7 +201,7 @@
rp := make(map[string]Version)
for _, p := range r.p {
pa := p.toAtom()
- rp[string(pa.Ident.LocalName)] = pa.Version
+ rp[string(pa.id.LocalName)] = pa.v
}
fixlen, rlen := len(fix.result()), len(rp)
@@ -265,17 +267,17 @@
pd.Constraint = Revision("foorev")
fix.ds[0].deps[0] = pd
- sm := newdepspecSM(fix.ds)
+ sm := newdepspecSM(fix.ds, nil)
l2 := make(fixLock, 1)
copy(l2, fix.l)
l2[0].v = nil
args := SolveArgs{
- Root: string(fix.ds[0].Name()),
- N: ProjectName(fix.ds[0].Name()),
- M: fix.ds[0],
- L: l2,
+ Root: string(fix.ds[0].Name()),
+ Name: ProjectName(fix.ds[0].Name()),
+ Manifest: fix.ds[0],
+ Lock: l2,
}
res, err := fixSolve(args, SolveOpts{}, sm)
@@ -289,29 +291,29 @@
projs = append(projs, string(e.pn.LocalName)) // TODO identifierify
case *disjointConstraintFailure:
for _, f := range e.failsib {
- projs = append(projs, string(f.Depender.Ident.LocalName))
+ projs = append(projs, string(f.depender.id.LocalName))
}
case *versionNotAllowedFailure:
for _, f := range e.failparent {
- projs = append(projs, string(f.Depender.Ident.LocalName))
+ projs = append(projs, string(f.depender.id.LocalName))
}
case *constraintNotAllowedFailure:
// No sane way of knowing why the currently selected version is
// selected, so do nothing
case *sourceMismatchFailure:
- projs = append(projs, string(e.prob.Ident.LocalName))
+ projs = append(projs, string(e.prob.id.LocalName))
for _, c := range e.sel {
- projs = append(projs, string(c.Depender.Ident.LocalName))
+ projs = append(projs, string(c.depender.id.LocalName))
}
case *checkeeHasProblemPackagesFailure:
- projs = append(projs, string(e.goal.Ident.LocalName))
+ projs = append(projs, string(e.goal.id.LocalName))
for _, errdep := range e.failpkg {
for _, atom := range errdep.deppers {
- projs = append(projs, string(atom.Ident.LocalName))
+ projs = append(projs, string(atom.id.LocalName))
}
}
case *depHasProblemPackagesFailure:
- projs = append(projs, string(e.goal.Depender.Ident.LocalName), string(e.goal.Dep.Ident.LocalName))
+ projs = append(projs, string(e.goal.depender.id.LocalName), string(e.goal.dep.Ident.LocalName))
default:
panic("unknown failtype")
}
@@ -320,7 +322,7 @@
}
func TestBadSolveOpts(t *testing.T) {
- sm := newdepspecSM(basicFixtures[0].ds)
+ sm := newdepspecSM(basicFixtures[0].ds, nil)
o := SolveOpts{}
args := SolveArgs{}
@@ -329,8 +331,8 @@
t.Errorf("Should have errored on missing manifest")
}
- p, _ := sm.GetProjectInfo(basicFixtures[0].ds[0].n, basicFixtures[0].ds[0].v)
- args.M = p.Manifest
+ m, _, _ := sm.GetProjectInfo(basicFixtures[0].ds[0].n, basicFixtures[0].ds[0].v)
+ args.Manifest = m
_, err = Prepare(args, o, sm)
if err == nil {
t.Errorf("Should have errored on empty root")
@@ -342,7 +344,7 @@
t.Errorf("Should have errored on empty name")
}
- args.N = "root"
+ args.Name = "root"
_, err = Prepare(args, o, sm)
if err != nil {
t.Errorf("Basic conditions satisfied, solve should have gone through, err was %s", err)
@@ -360,3 +362,27 @@
t.Errorf("Basic conditions re-satisfied, solve should have gone through, err was %s", err)
}
}
+
+func TestIgnoreDedupe(t *testing.T) {
+ fix := basicFixtures[0]
+
+ ig := []string{"foo", "foo", "bar"}
+ args := SolveArgs{
+ Root: string(fix.ds[0].Name()),
+ Name: ProjectName(fix.ds[0].Name()),
+ Manifest: fix.ds[0],
+ Ignore: ig,
+ }
+
+ s, _ := Prepare(args, SolveOpts{}, newdepspecSM(basicFixtures[0].ds, nil))
+ ts := s.(*solver)
+
+ expect := map[string]bool{
+ "foo": true,
+ "bar": true,
+ }
+
+ if !reflect.DeepEqual(ts.ig, expect) {
+ t.Errorf("Expected solver's ignore list to be deduplicated map, got %s", ts.ig)
+ }
+}
diff --git a/vendor/github.com/sdboyer/vsolver/solver.go b/vendor/github.com/sdboyer/vsolver/solver.go
index 9b3782a..0ea3dbe 100644
--- a/vendor/github.com/sdboyer/vsolver/solver.go
+++ b/vendor/github.com/sdboyer/vsolver/solver.go
@@ -15,8 +15,8 @@
var (
// With a random revision and no name, collisions are unlikely
- nilpa = ProjectAtom{
- Version: Revision(strconv.FormatInt(rand.Int63(), 36)),
+ nilpa = atom{
+ v: Revision(strconv.FormatInt(rand.Int63(), 36)),
}
)
@@ -27,17 +27,22 @@
// The 'name' of the project. Required. This should (must?) correspond to subpath of
// Root that exists under a GOPATH.
- N ProjectName
+ Name ProjectName
// The root manifest. Required. This contains all the dependencies, constraints, and
// other controls available to the root project.
- M Manifest
+ Manifest Manifest
// The root lock. Optional. Generally, this lock is the output of a previous solve run.
//
// If provided, the solver will attempt to preserve the versions specified
// in the lock, unless ToChange or ChangeAll settings indicate otherwise.
- L Lock
+ Lock Lock
+
+ // A list of packages (import paths) to ignore. These can be in the root
+ // project, or from elsewhere. Ignoring a package means that both it and its
+ // imports will be disregarded by all relevant solver operations.
+ Ignore []string
}
// SolveOpts holds additional options that govern solving behavior.
@@ -115,11 +120,15 @@
// removal.
unsel *unselected
+ // Map of packages to ignore. This is derived by converting SolveArgs.Ignore
+ // into a map during solver prep - which also, nicely, deduplicates it.
+ ig map[string]bool
+
// A list of all the currently active versionQueues in the solver. The set
// of projects represented here corresponds closely to what's in s.sel,
// although s.sel will always contain the root project, and s.versions never
// will.
- versions []*versionQueue // TODO rename to pvq
+ versions []*versionQueue // TODO rename to vq
// A map of the ProjectName (local names) that should be allowed to change
chng map[ProjectName]struct{}
@@ -143,32 +152,49 @@
Solve() (Result, error)
}
-// Prepare reads and validates the provided SolveArgs and SolveOpts.
+// Prepare readies a Solver for use.
//
-// If a problem with the inputs is detected, an error is returned. Otherwise, a
+// This function reads and validates the provided SolveArgs and SolveOpts. If a
+// problem with the inputs is detected, an error is returned. Otherwise, a
// Solver is returned, ready to hash and check inputs or perform a solving run.
-func Prepare(in SolveArgs, opts SolveOpts, sm SourceManager) (Solver, error) {
+func Prepare(args SolveArgs, opts SolveOpts, sm SourceManager) (Solver, error) {
// local overrides would need to be handled first.
// TODO local overrides! heh
- if in.M == nil {
- return nil, BadOptsFailure("Opts must include a manifest.")
+ if args.Manifest == nil {
+ return nil, badOptsFailure("Opts must include a manifest.")
}
- if in.Root == "" {
- return nil, BadOptsFailure("Opts must specify a non-empty string for the project root directory. If cwd is desired, use \".\"")
+ if args.Root == "" {
+ return nil, badOptsFailure("Opts must specify a non-empty string for the project root directory. If cwd is desired, use \".\"")
}
- if in.N == "" {
- return nil, BadOptsFailure("Opts must include a project name. This should be the intended root import path of the project.")
+ if args.Name == "" {
+ return nil, badOptsFailure("Opts must include a project name. This should be the intended root import path of the project.")
}
if opts.Trace && opts.TraceLogger == nil {
- return nil, BadOptsFailure("Trace requested, but no logger provided.")
+ return nil, badOptsFailure("Trace requested, but no logger provided.")
+ }
+
+ // Ensure the ignore map is at least initialized
+ ig := make(map[string]bool)
+ if len(args.Ignore) > 0 {
+ for _, pkg := range args.Ignore {
+ ig[pkg] = true
+ }
}
s := &solver{
- args: in,
+ args: args,
o: opts,
- b: newBridge(in.N, in.Root, sm, opts.Downgrade),
- tl: opts.TraceLogger,
+ ig: ig,
+ b: &bridge{
+ sm: sm,
+ sortdown: opts.Downgrade,
+ name: args.Name,
+ root: args.Root,
+ ignore: ig,
+ vlists: make(map[ProjectName][]Version),
+ },
+ tl: opts.TraceLogger,
}
// Initialize maps
@@ -178,7 +204,7 @@
// Initialize stacks and queues
s.sel = &selection{
- deps: make(map[ProjectIdentifier][]Dependency),
+ deps: make(map[ProjectIdentifier][]dependency),
sm: s.b,
}
s.unsel = &unselected{
@@ -202,10 +228,10 @@
}
// Prep safe, normalized versions of root manifest and lock data
- s.rm = prepManifest(s.args.M, s.args.N)
+ s.rm = prepManifest(s.args.Manifest, s.args.Name)
- if s.args.L != nil {
- for _, lp := range s.args.L.Projects() {
+ if s.args.Lock != nil {
+ for _, lp := range s.args.Lock.Projects() {
s.rlm[lp.Ident().normalize()] = lp
}
}
@@ -250,7 +276,7 @@
}
// solve is the top-level loop for the SAT solving process.
-func (s *solver) solve() (map[ProjectAtom]map[string]struct{}, error) {
+func (s *solver) solve() (map[atom]map[string]struct{}, error) {
// Main solving loop
for {
bmi, has := s.nextUnselected()
@@ -286,9 +312,9 @@
}
s.selectAtomWithPackages(atomWithPackages{
- atom: ProjectAtom{
- Ident: queue.id,
- Version: queue.current(),
+ a: atom{
+ id: queue.id,
+ v: queue.current(),
},
pl: bmi.pl,
})
@@ -307,9 +333,9 @@
// queue and just use the version given in what came back from
// s.sel.selected().
nawp := atomWithPackages{
- atom: ProjectAtom{
- Ident: bmi.id,
- Version: awp.atom.Version,
+ a: atom{
+ id: bmi.id,
+ v: awp.a.v,
},
pl: bmi.pl,
}
@@ -334,15 +360,15 @@
// Getting this far means we successfully found a solution. Combine the
// selected projects and packages.
- projs := make(map[ProjectAtom]map[string]struct{})
+ projs := make(map[atom]map[string]struct{})
// Skip the first project. It's always the root, and that shouldn't be
// included in results.
for _, sel := range s.sel.projects[1:] {
- pm, exists := projs[sel.a.atom]
+ pm, exists := projs[sel.a.a]
if !exists {
pm = make(map[string]struct{})
- projs[sel.a.atom] = pm
+ projs[sel.a.a] = pm
}
for _, path := range sel.a.pl {
@@ -355,18 +381,18 @@
// selectRoot is a specialized selectAtomWithPackages, used solely to initially
// populate the queues at the beginning of a solve run.
func (s *solver) selectRoot() error {
- pa := ProjectAtom{
- Ident: ProjectIdentifier{
- LocalName: s.args.N,
+ pa := atom{
+ id: ProjectIdentifier{
+ LocalName: s.args.Name,
},
// This is a hack so that the root project doesn't have a nil version.
// It's sort of OK because the root never makes it out into the results.
// We may need a more elegant solution if we discover other side
// effects, though.
- Version: Revision(""),
+ v: Revision(""),
}
- ptree, err := s.b.listPackages(pa.Ident, nil)
+ ptree, err := s.b.listPackages(pa.id, nil)
if err != nil {
return err
}
@@ -379,8 +405,8 @@
}
a := atomWithPackages{
- atom: pa,
- pl: list,
+ a: pa,
+ pl: list,
}
// Push the root project onto the queue.
@@ -389,8 +415,8 @@
// If we're looking for root's deps, get it from opts and local root
// analysis, rather than having the sm do it
- mdeps := append(s.rm.GetDependencies(), s.rm.GetDevDependencies()...)
- reach, err := s.b.computeRootReach(s.args.Root)
+ mdeps := append(s.rm.DependencyConstraints(), s.rm.TestDependencyConstraints()...)
+ reach, err := s.b.computeRootReach()
if err != nil {
return err
}
@@ -402,7 +428,7 @@
}
for _, dep := range deps {
- s.sel.pushDep(Dependency{Depender: pa, Dep: dep})
+ s.sel.pushDep(dependency{depender: pa, dep: dep})
// Add all to unselected queue
s.names[dep.Ident.LocalName] = dep.Ident.netName()
heap.Push(s.unsel, bimodalIdentifier{id: dep.Ident, pl: dep.pl})
@@ -414,23 +440,23 @@
func (s *solver) getImportsAndConstraintsOf(a atomWithPackages) ([]completeDep, error) {
var err error
- if s.rm.Name() == a.atom.Ident.LocalName {
+ if s.rm.Name() == a.a.id.LocalName {
panic("Should never need to recheck imports/constraints from root during solve")
}
// Work through the source manager to get project info and static analysis
// information.
- info, err := s.b.getProjectInfo(a.atom)
+ m, _, err := s.b.getProjectInfo(a.a)
if err != nil {
return nil, err
}
- ptree, err := s.b.listPackages(a.atom.Ident, a.atom.Version)
+ ptree, err := s.b.listPackages(a.a.id, a.a.v)
if err != nil {
return nil, err
}
- allex, err := ptree.ExternalReach(false, false)
+ allex, err := ptree.ExternalReach(false, false, s.ig)
if err != nil {
return nil, err
}
@@ -441,7 +467,7 @@
// the list
for _, pkg := range a.pl {
if expkgs, exists := allex[pkg]; !exists {
- return nil, fmt.Errorf("Package %s does not exist within project %s", pkg, a.atom.Ident.errString())
+ return nil, fmt.Errorf("package %s does not exist within project %s", pkg, a.a.id.errString())
} else {
for _, ex := range expkgs {
exmap[ex] = struct{}{}
@@ -456,7 +482,7 @@
k++
}
- deps := info.GetDependencies()
+ deps := m.DependencyConstraints()
// TODO add overrides here...if we impl the concept (which we should)
return s.intersectConstraintsWithImports(deps, reach)
@@ -615,9 +641,9 @@
for {
cur := q.current()
err := s.checkProject(atomWithPackages{
- atom: ProjectAtom{
- Ident: q.id,
- Version: cur,
+ a: atom{
+ id: q.id,
+ v: cur,
},
pl: pl,
})
@@ -636,7 +662,7 @@
}
}
- s.fail(s.sel.getDependenciesOn(q.id)[0].Depender.Ident)
+ s.fail(s.sel.getDependenciesOn(q.id)[0].depender.id)
// Return a compound error of all the new errors encountered during this
// attempt to find a new, valid version
@@ -655,7 +681,7 @@
//
// If any of these three conditions are true (or if the id cannot be found in
// the root lock), then no atom will be returned.
-func (s *solver) getLockVersionIfValid(id ProjectIdentifier) (ProjectAtom, error) {
+func (s *solver) getLockVersionIfValid(id ProjectIdentifier) (atom, error) {
// If the project is specifically marked for changes, then don't look for a
// locked version.
if _, explicit := s.chng[id.LocalName]; explicit || s.o.ChangeAll {
@@ -719,9 +745,9 @@
s.logSolve("using root lock's version of %s", id.errString())
- return ProjectAtom{
- Ident: id,
- Version: v,
+ return atom{
+ id: id,
+ v: v,
}, nil
}
@@ -762,7 +788,7 @@
awp, proj = s.unselectLast()
}
- if !q.id.eq(awp.atom.Ident) {
+ if !q.id.eq(awp.a.id) {
panic("canary - version queue stack and selected project stack are out of alignment")
}
@@ -776,9 +802,9 @@
// Found one! Put it back on the selected queue and stop
// backtracking
s.selectAtomWithPackages(atomWithPackages{
- atom: ProjectAtom{
- Ident: q.id,
- Version: q.current(),
+ a: atom{
+ id: q.id,
+ v: q.current(),
},
pl: awp.pl,
})
@@ -912,7 +938,7 @@
// new resultant deps to the unselected queue.
func (s *solver) selectAtomWithPackages(a atomWithPackages) {
s.unsel.remove(bimodalIdentifier{
- id: a.atom.Ident,
+ id: a.a.id,
pl: a.pl,
})
@@ -926,7 +952,7 @@
}
for _, dep := range deps {
- s.sel.pushDep(Dependency{Depender: a.atom, Dep: dep})
+ s.sel.pushDep(dependency{depender: a.a, dep: dep})
// Go through all the packages introduced on this dep, selecting only
// the ones where the only depper on them is what we pushed in. Then,
// put those into the unselected queue.
@@ -956,7 +982,7 @@
// order to enqueue the selection.
func (s *solver) selectPackages(a atomWithPackages) {
s.unsel.remove(bimodalIdentifier{
- id: a.atom.Ident,
+ id: a.a.id,
pl: a.pl,
})
@@ -970,7 +996,7 @@
}
for _, dep := range deps {
- s.sel.pushDep(Dependency{Depender: a.atom, Dep: dep})
+ s.sel.pushDep(dependency{depender: a.a, dep: dep})
// Go through all the packages introduced on this dep, selecting only
// the ones where the only depper on them is what we pushed in. Then,
// put those into the unselected queue.
@@ -994,7 +1020,7 @@
func (s *solver) unselectLast() (atomWithPackages, bool) {
awp, first := s.sel.popSelection()
- heap.Push(s.unsel, bimodalIdentifier{id: awp.atom.Ident, pl: awp.pl})
+ heap.Push(s.unsel, bimodalIdentifier{id: awp.a.id, pl: awp.pl})
deps, err := s.getImportsAndConstraintsOf(awp)
if err != nil {
@@ -1078,15 +1104,15 @@
}
// simple (temporary?) helper just to convert atoms into locked projects
-func pa2lp(pa ProjectAtom, pkgs map[string]struct{}) LockedProject {
+func pa2lp(pa atom, pkgs map[string]struct{}) LockedProject {
lp := LockedProject{
- pi: pa.Ident.normalize(), // shouldn't be necessary, but normalize just in case
+ pi: pa.id.normalize(), // shouldn't be necessary, but normalize just in case
// path is unnecessary duplicate information now, but if we ever allow
// nesting as a conflict resolution mechanism, it will become valuable
- path: string(pa.Ident.LocalName),
+ path: string(pa.id.LocalName),
}
- switch v := pa.Version.(type) {
+ switch v := pa.v.(type) {
case UnpairedVersion:
lp.v = v
case Revision:
@@ -1099,7 +1125,7 @@
}
for pkg := range pkgs {
- lp.pkgs = append(lp.pkgs, strings.TrimPrefix(pkg, string(pa.Ident.LocalName)+string(os.PathSeparator)))
+ lp.pkgs = append(lp.pkgs, strings.TrimPrefix(pkg, string(pa.id.LocalName)+string(os.PathSeparator)))
}
sort.Strings(lp.pkgs)
diff --git a/vendor/github.com/sdboyer/vsolver/source_manager.go b/vendor/github.com/sdboyer/vsolver/source_manager.go
index 6ab9c01..3100b37 100644
--- a/vendor/github.com/sdboyer/vsolver/source_manager.go
+++ b/vendor/github.com/sdboyer/vsolver/source_manager.go
@@ -10,15 +10,48 @@
"github.com/Masterminds/vcs"
)
+// A SourceManager is responsible for retrieving, managing, and interrogating
+// source repositories. Its primary purpose is to serve the needs of a Solver,
+// but it is handy for other purposes, as well.
+//
+// vsolver's built-in SourceManager, accessible via NewSourceManager(), is
+// intended to be generic and sufficient for any purpose. It provides some
+// additional semantics around the methods defined here.
type SourceManager interface {
- GetProjectInfo(ProjectName, Version) (ProjectInfo, error)
- ListVersions(ProjectName) ([]Version, error)
+ // RepoExists checks if a repository exists, either upstream or in the
+ // SourceManager's central repository cache.
RepoExists(ProjectName) (bool, error)
+
+ // VendorCodeExists checks if a code tree exists within the stored vendor
+ // directory for the the provided import path name.
VendorCodeExists(ProjectName) (bool, error)
+
+ // ListVersions retrieves a list of the available versions for a given
+ // repository name.
+ ListVersions(ProjectName) ([]Version, error)
+
+ // ListPackages retrieves a tree of the Go packages at or below the provided
+ // import path, at the provided version.
ListPackages(ProjectName, Version) (PackageTree, error)
+
+ // GetProjectInfo returns manifest and lock information for the provided
+ // import path. vsolver currently requires that projects be rooted at their
+ // repository root, which means that this ProjectName must also be a
+ // repository root.
+ GetProjectInfo(ProjectName, Version) (Manifest, Lock, error)
+
+ // ExportProject writes out the tree of the provided import path, at the
+ // provided version, to the provided directory.
ExportProject(ProjectName, Version, string) error
+
+ // Release lets go of any locks held by the SourceManager.
Release()
- // Flush()
+}
+
+// A ProjectAnalyzer is responsible for analyzing a path for Manifest and Lock
+// information. Tools relying on vsolver must implement one.
+type ProjectAnalyzer interface {
+ GetInfo(build.Context, ProjectName) (Manifest, Lock, error)
}
// ExistenceError is a specialized error type that, in addition to the standard
@@ -45,15 +78,33 @@
//pme map[ProjectName]error
}
-// Holds a ProjectManager, caches of the managed project's data, and information
+// Holds a projectManager, caches of the managed project's data, and information
// about the freshness of those caches
type pmState struct {
- pm ProjectManager
+ pm *projectManager
cf *os.File // handle for the cache file
vcur bool // indicates that we've called ListVersions()
}
-func NewSourceManager(cachedir, basedir string, force bool, an ProjectAnalyzer) (SourceManager, error) {
+// NewSourceManager produces an instance of vsolver's built-in SourceManager. It
+// takes a cache directory (where local instances of upstream repositories are
+// stored), a base directory for the project currently being worked on, and a
+// force flag indicating whether to overwrite the global cache lock file (if
+// present).
+//
+// The returned SourceManager aggressively caches
+// information wherever possible. It is recommended that, if tools need to do preliminary,
+// work involving upstream repository analysis prior to invoking a solve run,
+// that they create this SourceManager as early as possible and use it to their
+// ends. That way, the solver can benefit from any caches that may have already
+// been warmed.
+//
+// vsolver's SourceManager is intended to be threadsafe (if it's not, please
+// file a bug!). It should certainly be safe to reuse from one solving run to
+// the next; however, the fact that it takes a basedir as an argument makes it
+// much less useful for simultaneous use by separate solvers operating on
+// different root projects. This architecture may change in the future.
+func NewSourceManager(an ProjectAnalyzer, cachedir, basedir string, force bool) (SourceManager, error) {
if an == nil {
return nil, fmt.Errorf("A ProjectAnalyzer must be provided to the SourceManager.")
}
@@ -84,22 +135,32 @@
ctx: ctx,
an: an,
}, nil
- // recovery in a defer to be really proper, though
}
+// Release lets go of any locks held by the SourceManager.
+//
+// This will also call Flush(), which will write any relevant caches to disk.
func (sm *sourceManager) Release() {
os.Remove(path.Join(sm.cachedir, "sm.lock"))
}
-func (sm *sourceManager) GetProjectInfo(n ProjectName, v Version) (ProjectInfo, error) {
+// GetProjectInfo returns manifest and lock information for the provided import
+// path. vsolver currently requires that projects be rooted at their repository
+// root, which means that this ProjectName must also be a repository root.
+//
+// The work of producing the manifest and lock information is delegated to the
+// injected ProjectAnalyzer.
+func (sm *sourceManager) GetProjectInfo(n ProjectName, v Version) (Manifest, Lock, error) {
pmc, err := sm.getProjectManager(n)
if err != nil {
- return ProjectInfo{}, err
+ return nil, nil, err
}
return pmc.pm.GetInfoAt(v)
}
+// ListPackages retrieves a tree of the Go packages at or below the provided
+// import path, at the provided version.
func (sm *sourceManager) ListPackages(n ProjectName, v Version) (PackageTree, error) {
pmc, err := sm.getProjectManager(n)
if err != nil {
@@ -109,6 +170,17 @@
return pmc.pm.ListPackages(v)
}
+// ListVersions retrieves a list of the available versions for a given
+// repository name.
+//
+// The list is not sorted; while it may be retuend in the order that the
+// underlying VCS reports version information, no guarantee is made. It is
+// expected that the caller either not care about order, or sort the result
+// themselves.
+//
+// This list is always retrieved from upstream; if upstream is not accessible
+// (network outage, access issues, or the resource actually went away), an error
+// will be returned.
func (sm *sourceManager) ListVersions(n ProjectName) ([]Version, error) {
pmc, err := sm.getProjectManager(n)
if err != nil {
@@ -119,13 +191,15 @@
return pmc.pm.ListVersions()
}
+// VendorCodeExists checks if a code tree exists within the stored vendor
+// directory for the the provided import path name.
func (sm *sourceManager) VendorCodeExists(n ProjectName) (bool, error) {
pms, err := sm.getProjectManager(n)
if err != nil {
return false, err
}
- return pms.pm.CheckExistence(ExistsInVendorRoot), nil
+ return pms.pm.CheckExistence(existsInVendorRoot), nil
}
func (sm *sourceManager) RepoExists(n ProjectName) (bool, error) {
@@ -134,9 +208,11 @@
return false, err
}
- return pms.pm.CheckExistence(ExistsInCache) || pms.pm.CheckExistence(ExistsUpstream), nil
+ return pms.pm.CheckExistence(existsInCache) || pms.pm.CheckExistence(existsUpstream), nil
}
+// ExportProject writes out the tree of the provided import path, at the
+// provided version, to the provided directory.
func (sm *sourceManager) ExportProject(n ProjectName, v Version, to string) error {
pms, err := sm.getProjectManager(n)
if err != nil {
@@ -207,7 +283,7 @@
//}
dc = &projectDataCache{
- Infos: make(map[Revision]ProjectInfo),
+ Infos: make(map[Revision]projectInfo),
VMap: make(map[Version]Revision),
RMap: make(map[Revision][]Version),
}
diff --git a/vendor/github.com/sdboyer/vsolver/types.go b/vendor/github.com/sdboyer/vsolver/types.go
index ed15bf0..0cb54e7 100644
--- a/vendor/github.com/sdboyer/vsolver/types.go
+++ b/vendor/github.com/sdboyer/vsolver/types.go
@@ -64,14 +64,14 @@
type ProjectName string
-type ProjectAtom struct {
- Ident ProjectIdentifier
- Version Version
+type atom struct {
+ id ProjectIdentifier
+ v Version
}
type atomWithPackages struct {
- atom ProjectAtom
- pl []string
+ a atom
+ pl []string
}
type ProjectDep struct {
@@ -105,15 +105,7 @@
pl []string
}
-type Dependency struct {
- Depender ProjectAtom
- Dep completeDep
-}
-
-// ProjectInfo holds manifest and lock for a ProjectName at a Version
-type ProjectInfo struct {
- N ProjectName
- V Version
- Manifest
- Lock
+type dependency struct {
+ depender atom
+ dep completeDep
}
diff --git a/vendor/github.com/sdboyer/vsolver/version.go b/vendor/github.com/sdboyer/vsolver/version.go
index 804402f..bb30631 100644
--- a/vendor/github.com/sdboyer/vsolver/version.go
+++ b/vendor/github.com/sdboyer/vsolver/version.go
@@ -10,11 +10,10 @@
//
// Version is an interface, but it contains private methods, which restricts it
// to vsolver's own internal implementations. We do this for the confluence of
-// two reasons:
-// - the implementation of Versions is complete (there is no case in which we'd
-// need other types)
-// - the implementation relies on type magic under the hood, which would
-// be unsafe to do if other dynamic types could be hiding behind the interface.
+// two reasons: the implementation of Versions is complete (there is no case in
+// which we'd need other types), and the implementation relies on type magic
+// under the hood, which would be unsafe to do if other dynamic types could be
+// hiding behind the interface.
type Version interface {
Constraint
// Indicates the type of version - Revision, Branch, Version, or Semver
@@ -36,7 +35,7 @@
// VersionPair by indicating the version's corresponding, underlying Revision.
type UnpairedVersion interface {
Version
- // Is takes the underlying Revision that this (Unpaired)Version corresponds
+ // Is takes the underlying Revision that this UnpairedVersion corresponds
// to and unites them into a PairedVersion.
Is(Revision) PairedVersion
// Ensures it is impossible to be both a PairedVersion and an
diff --git a/vendor/github.com/sdboyer/vsolver/version_queue.go b/vendor/github.com/sdboyer/vsolver/version_queue.go
index 34382fc..22e7b0c 100644
--- a/vendor/github.com/sdboyer/vsolver/version_queue.go
+++ b/vendor/github.com/sdboyer/vsolver/version_queue.go
@@ -19,7 +19,7 @@
hasLock, allLoaded bool
}
-func newVersionQueue(id ProjectIdentifier, lockv ProjectAtom, sm sourceBridge) (*versionQueue, error) {
+func newVersionQueue(id ProjectIdentifier, lockv atom, sm sourceBridge) (*versionQueue, error) {
vq := &versionQueue{
id: id,
sm: sm,
@@ -27,7 +27,7 @@
if lockv != nilpa {
vq.hasLock = true
- vq.pi = append(vq.pi, lockv.Version)
+ vq.pi = append(vq.pi, lockv.v)
} else {
var err error
vq.pi, err = vq.sm.listVersions(vq.id)