Merge pull request #49 from 30x/XAPID-994

Xapid 994
diff --git a/.travis.yml b/.travis.yml
index a902a1c..5be0b7f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -12,4 +12,4 @@
   - glide install
 
 script:
-  - go test $(glide novendor)
+  - go test
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..5b6fba0
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,8 @@
+test:
+	go test
+
+cover:
+	./cover.sh
+
+dockertest:
+	./dockertests/dockerTest.sh
diff --git a/README.md b/README.md
index 6bccfad..430c15a 100644
--- a/README.md
+++ b/README.md
@@ -109,3 +109,16 @@
       setDB(db)
       log.Debug("Snapshot processed")
     }
+
+### Docker Test:
+Make sure you have transicator in your $GOPATH/src/github.com/apigee-labs/transicator before you run docker tests.
+
+    make dockertest
+
+This docker test assumes:
+
+If you're running it on Macbook, your docker host is localhost.
+If you're running it on Goobuntu, your docker ip is 192.168.9.1 (if you follow go/installdocker).
+
+
+
diff --git a/change_test.go b/change_test.go
index 4a1981b..09ef3dd 100644
--- a/change_test.go
+++ b/change_test.go
@@ -166,7 +166,7 @@
 	return nil
 }
 
-func (t *dummyTokenManager) getToken() *oauthToken {
+func (t *dummyTokenManager) getToken() *OauthToken {
 	return nil
 }
 
diff --git a/cover.sh b/cover.sh
index 422ebb3..c866ea1 100755
--- a/cover.sh
+++ b/cover.sh
@@ -19,11 +19,9 @@
 set -e
 echo "mode: atomic" > coverage.txt
 
-for d in $(go list ./... | grep -v vendor); do
-    go test -coverprofile=profile.out -covermode=atomic $d
-    if [ -f profile.out ]; then
-        tail +2 profile.out >> coverage.txt
-        rm profile.out
-    fi
-done
+go test -coverprofile=profile.out -covermode=atomic github.com/30x/apidApigeeSync
+if [ -f profile.out ]; then
+    tail -n +2 profile.out >> coverage.txt
+    rm profile.out
+fi
 go tool cover -html=coverage.txt -o cover.html
diff --git a/data.go b/data.go
index 8e7cca6..3bd5591 100644
--- a/data.go
+++ b/data.go
@@ -453,7 +453,7 @@
 			// first start - no row, generate a UUID and store it
 			err = nil
 			newInstanceID = true
-			info.InstanceID = generateUUID()
+			info.InstanceID = GenerateUUID()
 
 			log.Debugf("Inserting new apid instance id %s", info.InstanceID)
 			db.Exec("INSERT INTO APID (instance_id, apid_cluster_id, last_snapshot_info) VALUES (?,?,?)",
@@ -463,7 +463,7 @@
 		log.Debug("Detected apid cluster id change in config.  Apid will start clean")
 		err = nil
 		newInstanceID = true
-		info.InstanceID = generateUUID()
+		info.InstanceID = GenerateUUID()
 
 		db.Exec("REPLACE INTO APID (instance_id, apid_cluster_id, last_snapshot_info) VALUES (?,?,?)",
 			info.InstanceID, info.ClusterID, "")
@@ -501,7 +501,7 @@
  */
 
 //TODO: Change to https://tools.ietf.org/html/rfc4122 based implementation such as https://github.com/google/uuid
-func generateUUID() string {
+func GenerateUUID() string {
 
 	buff := make([]byte, 16)
 	numRead, err := rand.Read(buff)
diff --git a/dockertests/const.go b/dockertests/const.go
new file mode 100644
index 0000000..7127971
--- /dev/null
+++ b/dockertests/const.go
@@ -0,0 +1,27 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dockertests
+
+const (
+	pluginName               = "apigeeSyncDockerTest"
+	configApidClusterId      = "apigeesync_cluster_id"
+	configProxyServerBaseURI = "apigeesync_proxy_server_base"
+	configLocalStoragePath   = "local_storage_path"
+	configConsumerKey        = "apigeesync_consumer_key"
+	configConsumerSecret     = "apigeesync_consumer_secret"
+	configName               = "apigeesync_instance_name"
+	ApigeeSyncEventSelector  = "ApigeeSync"
+	testInitUser             = "dockerTestInit"
+)
diff --git a/dockertests/create-db.sql b/dockertests/create-db.sql
new file mode 100644
index 0000000..a1f46db
--- /dev/null
+++ b/dockertests/create-db.sql
@@ -0,0 +1,12 @@
+-- noinspection SqlDialectInspectionForFile
+-- noinspection SqlNoDataSourceInspectionForFile
+SET statement_timeout = 0;
+SET lock_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = on;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET row_security = off;
+
+CREATE DATABASE edgex WITH TEMPLATE = template0 ENCODING = 'UTF8';
+ALTER DATABASE edgex OWNER TO postgres;
diff --git a/dockertests/dockerCleanup.sh b/dockertests/dockerCleanup.sh
new file mode 100755
index 0000000..4ed8e0e
--- /dev/null
+++ b/dockertests/dockerCleanup.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+#
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+pgname=apidSync_test_pg
+ssname=apidSync_test_ss
+csname=apidSync_test_cs
+docker kill ${pgname} ${csname} ${ssname}
+docker rm -f ${pgname} ${csname} ${ssname}
\ No newline at end of file
diff --git a/dockertests/dockerSetup.sh b/dockertests/dockerSetup.sh
new file mode 100755
index 0000000..8df2b5d
--- /dev/null
+++ b/dockertests/dockerSetup.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+
+#
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+WORK_DIR=$(pwd)
+TRANSICATOR_DIR="$GOPATH/src/github.com/apigee-labs/transicator"
+DOCKER_IP="192.168.9.1"
+if [ "$(uname)" == Darwin ];
+then
+    DOCKER_IP="localhost"
+fi
+TEST_PG_BASE=postgres://postgres:changeme@$DOCKER_IP:5432
+TEST_PG_URL=postgres://postgres:changeme@$DOCKER_IP:5432/edgex
+echo ${TEST_PG_URL}
+
+export APIGEE_SYNC_DOCKER_PG_URL=${TEST_PG_URL}
+export APIGEE_SYNC_DOCKER_IP=${DOCKER_IP}
+
+pgnum=$(docker images "apigeelabs/transicator-postgres" | wc -l)
+ssnum=$(docker images "apigeelabs/transicator-snapshot" | wc -l)
+csnum=$(docker images "apigeelabs/transicator-changeserver" | wc -l)
+
+
+if (( !(pgnum>1 && ssnum>1 && csnum>1) ))
+then
+    cd ${TRANSICATOR_DIR}
+    make
+    make docker
+    cd ${WORK_DIR}
+fi
+
+echo "Starting Transicator docker"
+pgname=apidSync_test_pg
+ssname=apidSync_test_ss
+csname=apidSync_test_cs
+
+# run PG
+docker run --name ${pgname} -p 5432:5432 -d -e POSTGRES_PASSWORD=changeme apigeelabs/transicator-postgres
+
+# Wait for PG to be up -- it takes a few seconds
+while `true`
+do
+  sleep 1
+  psql -q -c 'select * from now()' ${TEST_PG_BASE}
+  if [ $? -eq 0 ]
+  then
+    break
+  fi
+done
+
+# init pg
+psql -f ${WORK_DIR}/dockertests/create-db.sql ${TEST_PG_BASE}
+psql -f ${WORK_DIR}/dockertests/master-schema.sql ${TEST_PG_URL}
+psql -f ${WORK_DIR}/dockertests/user-setup.sql ${TEST_PG_URL}
+
+# run SS and CS
+docker run --name ${ssname} -d -p 9001:9001 apigeelabs/transicator-snapshot -p 9001 -u ${TEST_PG_URL}
+docker run --name ${csname} -d -p 9000:9000 apigeelabs/transicator-changeserver -p 9000 -u ${TEST_PG_URL} -s testslot
+
+# Wait for SS to be up
+while `true`
+do
+  sleep 1
+  response=$(curl -i http://${DOCKER_IP}:9001/snapshots?selector=foo | head -n 1)
+  if [[ $response == *303* ]]
+  then
+    break
+  fi
+done
+
+# Wait for CS to be up
+while `true`
+do
+  sleep 1
+  response=$(curl -i http://${DOCKER_IP}:9000/changes | head -n 1)
+  if [[ $response == *200* ]]
+  then
+    break
+  fi
+done
+
+apid_config=`cat <<EOF
+apigeesync_instance_name: SQLLITAPID
+apigeesync_snapshot_server_base: http://${DOCKER_IP}:9001/
+apigeesync_change_server_base: http://${DOCKER_IP}:9000/
+apigeesync_snapshot_proto: sqlite
+log_level: Debug
+apigeesync_consumer_key: 33f39JNLosF1mDOXJoCfbauchVzPrGrl
+apigeesync_consumer_secret: LAolGShAx6H3vfNF
+apigeesync_cluster_id: 4c6bb536-0d64-43ca-abae-17c08f1a7e58
+local_storage_path: ${WORK_DIR}/tmp/sqlite
+EOF
+`
+rm -f ${WORK_DIR}/dockertests/apid_config.yaml
+echo "$apid_config" >> ${WORK_DIR}/dockertests/apid_config.yaml
diff --git a/dockertests/dockerTest.sh b/dockertests/dockerTest.sh
new file mode 100755
index 0000000..5bde023
--- /dev/null
+++ b/dockertests/dockerTest.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+#
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source ./dockertests/dockerSetup.sh
+go test ./dockertests/*.go -v
+./dockertests/dockerCleanup.sh
\ No newline at end of file
diff --git a/dockertests/docker_test.go b/dockertests/docker_test.go
new file mode 100644
index 0000000..93192dc
--- /dev/null
+++ b/dockertests/docker_test.go
@@ -0,0 +1,401 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dockertests
+
+import (
+	"encoding/json"
+	"github.com/30x/apid-core"
+	"github.com/30x/apid-core/factory"
+	_ "github.com/30x/apidApigeeSync"
+	"github.com/apigee-labs/transicator/common"
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+	"net/http/httptest"
+	"os"
+	"testing"
+	"time"
+)
+
+var (
+	services            apid.Services
+	log                 apid.LogService
+	dataService         apid.DataService
+	config              apid.ConfigService
+	pgUrl               string
+	pgManager           *ManagementPg
+	clusterIdFromConfig string
+)
+
+/*
+ * This test suite acts like a dummy plugin. It listens to events emitted by
+ * apidApigeeSync and runs tests.
+ */
+var _ = BeforeSuite(func() {
+	defer GinkgoRecover()
+	//hostname := "http://" + os.Getenv("APIGEE_SYNC_DOCKER_IP")
+	pgUrl = os.Getenv("APIGEE_SYNC_DOCKER_PG_URL") + "?sslmode=disable"
+	os.Setenv("APID_CONFIG_FILE", "./apid_config.yaml")
+
+	apid.Initialize(factory.DefaultServicesFactory())
+	config = apid.Config()
+
+	localStorage := config.GetString(configLocalStoragePath)
+	err := os.RemoveAll(localStorage)
+	Expect(err).Should(Succeed())
+	err = os.MkdirAll(localStorage, 0700)
+	Expect(err).Should(Succeed())
+
+	// init pg driver and data
+	pgManager, err = InitDb(pgUrl)
+	Expect(err).Should(Succeed())
+	initPgData()
+
+	// Auth Server
+	config.Set(configName, "dockerIT")
+	config.Set(configConsumerKey, "dummyKey")
+	config.Set(configConsumerSecret, "dummySecret")
+	testServer := initDummyAuthServer()
+
+	initDone := make(chan bool)
+	handler := &waitSnapshotHandler{initDone}
+
+	// hang until snapshot received
+	apid.Events().Listen(ApigeeSyncEventSelector, handler)
+
+	config.Set(configProxyServerBaseURI, testServer.URL)
+
+	// init plugin
+	apid.RegisterPlugin(initPlugin)
+	apid.InitializePlugins("dockerTest")
+
+	<-initDone
+}, 5)
+
+var _ = AfterSuite(func() {
+	err := pgManager.CleanupAll()
+	Expect(err).Should(Succeed())
+})
+
+var _ = Describe("dockerIT", func() {
+
+	/*
+	 * Isolation between tests is not perfect.
+	 * If in a test you listen to any event, please make sure you stop listening to it,
+	 * and don't let it mess up later tests.
+	 */
+	Context("Generic Replication", func() {
+		var _ = BeforeEach(func() {
+
+		})
+
+		var _ = AfterEach(func() {
+			err := pgManager.CleanupTest()
+			Expect(err).Should(Succeed())
+		})
+
+		It("should succesfully download new table from pg", func(done Done) {
+			tableName := "docker_test_download"
+			targetTablename := "edgex_" + tableName
+			handler := &newTableHandler{
+				targetTablename: targetTablename,
+				done:            done,
+				verifyFunc:      verifyTestTableExist,
+			}
+
+			apid.Events().Listen(ApigeeSyncEventSelector, handler)
+			createTestTable(tableName)
+
+		}, 1)
+
+		It("should get data according to data scope", func(done Done) {
+			tableName := "docker_test_scope"
+			targetTablename := "edgex_" + tableName
+			handler := &newTableHandler{
+				targetTablename: targetTablename,
+				done:            done,
+				verifyFunc:      verifyTestTableData,
+			}
+
+			apid.Events().Listen(ApigeeSyncEventSelector, handler)
+			createTestTableWithData(tableName)
+
+		}, 1)
+
+		It("should replicate ENUM type of pg correctly", func(done Done) {
+			tableName := "docker_test_enum"
+			targetTablename := "edgex_" + tableName
+			handler := &newTableHandler{
+				targetTablename: targetTablename,
+				done:            done,
+				verifyFunc:      verifyTestTableEnum,
+			}
+
+			apid.Events().Listen(ApigeeSyncEventSelector, handler)
+			createTestTableWithEnum(tableName)
+
+		}, 1)
+	})
+})
+
+func createTestTable(tableName string) {
+	tx, err := pgManager.BeginTransaction()
+	Expect(err).Should(Succeed())
+	defer tx.Rollback()
+	_, err = tx.Exec("CREATE TABLE edgex." + tableName + " (id varchar primary key, val integer, _change_selector varchar);")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("ALTER TABLE edgex." + tableName + " replica identity full;")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("INSERT INTO edgex." + tableName + " values ('three', 3, '" + clusterIdFromConfig + "');")
+	Expect(err).Should(Succeed())
+	tx.Commit()
+}
+
+func createTestTableWithData(tableName string) {
+	tx, err := pgManager.BeginTransaction()
+	Expect(err).Should(Succeed())
+	defer tx.Rollback()
+	_, err = tx.Exec("CREATE TABLE edgex." + tableName + " (id varchar primary key, val integer, _change_selector varchar);")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("ALTER TABLE edgex." + tableName + " replica identity full;")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("INSERT INTO edgex." + tableName + " values ('one', 1, 'foo');")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("INSERT INTO edgex." + tableName + " values ('two', 2, 'bar');")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("INSERT INTO edgex." + tableName + " values ('three', 3, '" + clusterIdFromConfig + "');")
+	Expect(err).Should(Succeed())
+	tx.Commit()
+}
+
+func createTestTableWithEnum(tableName string) {
+	tx, err := pgManager.BeginTransaction()
+	Expect(err).Should(Succeed())
+	defer tx.Rollback()
+	_, err = tx.Exec("CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy');")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("CREATE TABLE edgex." + tableName + " (id varchar primary key, val mood, _change_selector varchar);")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("ALTER TABLE edgex." + tableName + " replica identity full;")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("INSERT INTO edgex." + tableName + " values ('one', 'sad', 'foo');")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("INSERT INTO edgex." + tableName + " values ('two', 'ok', 'bar');")
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("INSERT INTO edgex." + tableName + " values ('three', 'happy', '" + clusterIdFromConfig + "');")
+	Expect(err).Should(Succeed())
+	tx.Commit()
+}
+
+func dropTestTable(targetTableName string, sqliteDb apid.DB) {
+	tx, err := pgManager.BeginTransaction()
+	Expect(err).Should(Succeed())
+	_, err = tx.Exec("DROP TABLE IF EXISTS edgex." + targetTableName + ";")
+	Expect(err).Should(Succeed())
+}
+
+func initDummyAuthServer() (testServer *httptest.Server) {
+	testRouter := apid.API().Router()
+	testServer = httptest.NewServer(testRouter)
+	mockAuthServer := &MockAuthServer{}
+	mockAuthServer.Start(testRouter)
+	return
+}
+
+func initPlugin(s apid.Services) (apid.PluginData, error) {
+	services = s
+	log = services.Log().ForModule(pluginName)
+	dataService = services.Data()
+
+	var pluginData = apid.PluginData{
+		Name:    pluginName,
+		Version: "0.0.1",
+		ExtraData: map[string]interface{}{
+			"schemaVersion": "0.0.1",
+		},
+	}
+
+	log.Info(pluginName + " initialized.")
+	return pluginData, nil
+}
+
+func initPgData() {
+	clusterIdFromConfig = config.GetString(configApidClusterId) //"4c6bb536-0d64-43ca-abae-17c08f1a7e58"
+	clusterId := clusterIdFromConfig
+	scopeId := "ae418890-2c22-4c6a-b218-69e261034b96"
+	deploymentId := "633af126-ee79-4a53-bef7-7ba30da8aad6"
+	bundleConfigId := "613ce223-6c73-43f4-932c-3c69b0c7c65d"
+	bundleConfigName := "good"
+	bundleUri := "https://gist.github.com/alexkhimich/843cf70ffd6a8b4d44442876ba0487b7/archive/d74360596ff9a4320775d590b3f5a91bdcdf61d2.zip"
+	t := time.Now()
+
+	cluster := &apidClusterRow{
+		id:             clusterId,
+		name:           "apidcA",
+		description:    "desc",
+		appName:        "UOA",
+		created:        t,
+		createdBy:      testInitUser,
+		updated:        t,
+		updatedBy:      testInitUser,
+		changeSelector: clusterId,
+	}
+
+	ds := &dataScopeRow{
+		id:             scopeId,
+		clusterId:      clusterId,
+		scope:          "abc1",
+		org:            "org1",
+		env:            "env1",
+		created:        t,
+		createdBy:      testInitUser,
+		updated:        t,
+		updatedBy:      testInitUser,
+		changeSelector: clusterId,
+	}
+
+	bf := bundleConfigData{
+		Id:        bundleConfigId,
+		Created:   t.Format(time.RFC3339),
+		CreatedBy: testInitUser,
+		Updated:   t.Format(time.RFC3339),
+		UpdatedBy: testInitUser,
+		Name:      bundleConfigName,
+		Uri:       bundleUri,
+	}
+
+	jsonBytes, err := json.Marshal(bf)
+	Expect(err).Should(Succeed())
+
+	bfr := &bundleConfigRow{
+		id:           bf.Id,
+		scopeId:      scopeId,
+		name:         bf.Name,
+		uri:          bf.Uri,
+		checksumType: "",
+		checksum:     "",
+		created:      t,
+		createdBy:    bf.CreatedBy,
+		updated:      t,
+		updatedBy:    bf.UpdatedBy,
+	}
+
+	d := &deploymentRow{
+		id:               deploymentId,
+		configId:         bundleConfigId,
+		clusterId:        clusterId,
+		scopeId:          scopeId,
+		bundleConfigName: bundleConfigName,
+		bundleConfigJson: string(jsonBytes),
+		configJson:       "{}",
+		created:          t,
+		createdBy:        testInitUser,
+		updated:          t,
+		updatedBy:        testInitUser,
+		changeSelector:   clusterId,
+	}
+
+	tx, err := pgManager.BeginTransaction()
+	Expect(err).Should(Succeed())
+	defer tx.Rollback()
+	err = pgManager.InsertApidCluster(tx, cluster)
+	Expect(err).Should(Succeed())
+	err = pgManager.InsertDataScope(tx, ds)
+	Expect(err).Should(Succeed())
+	err = pgManager.InsertBundleConfig(tx, bfr)
+	Expect(err).Should(Succeed())
+	err = pgManager.InsertDeployment(tx, d)
+	Expect(err).Should(Succeed())
+	err = tx.Commit()
+	Expect(err).Should(Succeed())
+}
+
+type waitSnapshotHandler struct {
+	initDone chan bool
+}
+
+func (w *waitSnapshotHandler) Handle(event apid.Event) {
+	if _, ok := event.(*common.Snapshot); ok {
+		apid.Events().StopListening(ApigeeSyncEventSelector, w)
+		w.initDone <- true
+	}
+}
+
+type newTableHandler struct {
+	targetTablename string
+	done            Done
+	verifyFunc      func (string, apid.DB)
+}
+
+func (n *newTableHandler) Handle(event apid.Event) {
+	if s, ok := event.(*common.Snapshot); ok {
+		defer GinkgoRecover()
+		sqliteDb, err := dataService.DBVersion(s.SnapshotInfo)
+		Expect(err).Should(Succeed())
+		n.verifyFunc(n.targetTablename, sqliteDb)
+		apid.Events().StopListening(ApigeeSyncEventSelector, n)
+		close(n.done)
+	}
+}
+
+func verifyTestTableExist(targetTableName string, sqliteDb apid.DB) {
+	rows, err := sqliteDb.Query("SELECT DISTINCT tableName FROM _transicator_tables;")
+	Expect(err).Should(Succeed())
+	defer rows.Close()
+	for rows.Next() {
+		var tableName string
+		err = rows.Scan(&tableName)
+		Expect(err).Should(Succeed())
+
+		if tableName == targetTableName {
+			return
+		}
+	}
+	Fail("Table " + targetTableName + " doesn'r exist!")
+}
+
+func verifyTestTableData(targetTableName string, sqliteDb apid.DB) {
+	rows, err := sqliteDb.Query("SELECT id FROM " + targetTableName + ";")
+	Expect(err).Should(Succeed())
+	defer rows.Close()
+	count := 0
+	for rows.Next() {
+		var id string
+		err = rows.Scan(&id)
+		Expect(err).Should(Succeed())
+		Expect(id).To(Equal("three"))
+		count += 1
+	}
+	Expect(count).To(Equal(1))
+}
+
+func verifyTestTableEnum(targetTableName string, sqliteDb apid.DB) {
+	rows, err := sqliteDb.Query("SELECT val FROM " + targetTableName + ";")
+	Expect(err).Should(Succeed())
+	defer rows.Close()
+	count := 0
+	for rows.Next() {
+		var val string
+		err = rows.Scan(&val)
+		Expect(err).Should(Succeed())
+		Expect(val).To(Equal("happy"))
+		count += 1
+	}
+	Expect(count).To(Equal(1))
+}
+
+func TestDockerApigeeSync(t *testing.T) {
+	RegisterFailHandler(Fail)
+	RunSpecs(t, "ApigeeSync Docker Suite")
+}
diff --git a/dockertests/management_pg.go b/dockertests/management_pg.go
new file mode 100644
index 0000000..bbe28e9
--- /dev/null
+++ b/dockertests/management_pg.go
@@ -0,0 +1,261 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dockertests
+
+import (
+	"database/sql"
+	_ "github.com/lib/pq"
+)
+
+var (
+	basicTables = map[string]bool{
+		"deployment_history": true,
+		"deployment":         true,
+		"bundle_config":      true,
+		"configuration":      true,
+		"apid_cluster":       true,
+		"data_scope":         true,
+	}
+)
+
+type ManagementPg struct {
+	url string
+	pg  *sql.DB
+}
+
+func InitDb(dbUrl string) (*ManagementPg, error) {
+	db, err := sql.Open("postgres", dbUrl)
+	if err != nil {
+		return nil, err
+	}
+
+	return &ManagementPg{
+		url: dbUrl,
+		pg:  db,
+	}, nil
+}
+
+func (m *ManagementPg) InsertApidCluster(tx *sql.Tx, cluster *apidClusterRow) error {
+	stmt, err := tx.Prepare(`INSERT INTO edgex.apid_cluster(
+			id,
+			name,
+			description,
+			umbrella_org_app_name,
+			created,
+			created_by,
+			updated,
+			updated_by,
+			_change_selector
+			)
+			VALUES($1,$2,$3,$4,$5,$6,$7,$8,$9)`)
+	if err != nil {
+		return err
+	}
+
+	_, err = stmt.Exec(
+		cluster.id,
+		cluster.name,
+		cluster.description,
+		cluster.appName,
+		cluster.created,
+		cluster.createdBy,
+		cluster.updated,
+		cluster.updatedBy,
+		cluster.changeSelector,
+	)
+
+	return err
+}
+
+func (m *ManagementPg) InsertDataScope(tx *sql.Tx, ds *dataScopeRow) error {
+	stmt, err := tx.Prepare(`INSERT INTO edgex.data_scope (
+			id,
+			apid_cluster_id,
+			scope,
+			org,
+			env,
+			created,
+			created_by,
+			updated,
+			updated_by,
+			_change_selector
+			)
+			VALUES($1,$2,$3,$4,$5,$6,$7,$8,$9,$10)`)
+	if err != nil {
+		return err
+	}
+
+	_, err = stmt.Exec(
+		ds.id,
+		ds.clusterId,
+		ds.scope,
+		ds.org,
+		ds.env,
+		ds.created,
+		ds.createdBy,
+		ds.updated,
+		ds.updatedBy,
+		ds.changeSelector,
+	)
+
+	return err
+}
+
+func (m *ManagementPg) InsertBundleConfig(tx *sql.Tx, bf *bundleConfigRow) error {
+	stmt, err := tx.Prepare(`INSERT INTO edgex.bundle_config (
+			id,
+			data_scope_id,
+			name,
+			uri,
+			checksumtype,
+			checksum,
+			created,
+			created_by,
+			updated,
+			updated_by
+			)
+			VALUES($1,$2,$3,$4,$5,$6,$7,$8,$9,$10)`)
+	if err != nil {
+		return err
+	}
+
+	_, err = stmt.Exec(
+		bf.id,
+		bf.scopeId,
+		bf.name,
+		bf.uri,
+		bf.checksumType,
+		bf.checksum,
+		bf.created,
+		bf.createdBy,
+		bf.updated,
+		bf.updatedBy,
+	)
+
+	return err
+}
+
+func (m *ManagementPg) InsertDeployment(tx *sql.Tx, d *deploymentRow) error {
+	stmt, err := tx.Prepare(`INSERT INTO edgex.deployment (
+			id,
+			bundle_config_id,
+			apid_cluster_id,
+			data_scope_id,
+			bundle_config_name,
+			bundle_config_json,
+			config_json,
+			created,
+			created_by,
+			updated,
+			updated_by,
+			_change_selector
+			)
+			VALUES($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12)`)
+	if err != nil {
+		return err
+	}
+
+	_, err = stmt.Exec(
+		d.id,
+		d.configId,
+		d.clusterId,
+		d.scopeId,
+		d.bundleConfigName,
+		d.bundleConfigJson,
+		d.configJson,
+		d.created,
+		d.createdBy,
+		d.updated,
+		d.updatedBy,
+		d.changeSelector,
+	)
+
+	return err
+}
+
+func (m *ManagementPg) BeginTransaction() (*sql.Tx, error) {
+	tx, err := m.pg.Begin()
+	return tx, err
+}
+
+/*
+ * Delete all new tables or rows created by a test from pg.
+ * Only test data for the whole suite will remain in the pg.
+ */
+func (m *ManagementPg) CleanupTest() error {
+
+	// clean tables
+	tablesToDelete := make([]string, 0)
+	rows, err := m.pg.Query("SELECT table_name FROM information_schema.tables WHERE table_schema='edgex';")
+	if err != nil {
+		return err
+	}
+	defer rows.Close()
+	for rows.Next() {
+		var tableName string
+		err = rows.Scan(&tableName)
+		if err != nil {
+			return err
+		}
+
+		if !basicTables[tableName] {
+			tablesToDelete = append(tablesToDelete, tableName)
+		}
+	}
+
+	for _, tableName := range tablesToDelete {
+		cleanupSql := "DROP TABLE edgex." + tableName + ";"
+		_, err := m.pg.Exec(cleanupSql)
+		if err != nil {
+			return err
+		}
+	}
+	cleanupSql := "DELETE FROM edgex.apid_cluster WHERE created_by!='" + testInitUser + "';"
+	_, err = m.pg.Exec(cleanupSql)
+	if err != nil {
+		return err
+	}
+
+	// clean enum types
+	typesToDelete := make([]string, 0)
+	typeRows, err := m.pg.Query("SELECT DISTINCT pg_type.typname AS enumtype FROM pg_type JOIN pg_enum ON pg_enum.enumtypid = pg_type.oid;")
+	if err != nil {
+		return err
+	}
+	defer typeRows.Close()
+	for typeRows.Next() {
+		var typeName string
+		err = typeRows.Scan(&typeName)
+		if err != nil {
+			return err
+		}
+		typesToDelete = append(typesToDelete, typeName)
+	}
+
+	for _, typeName := range typesToDelete {
+		cleanupSql := "DROP TYPE edgex." + typeName + ";"
+		_, err := m.pg.Exec(cleanupSql)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (m *ManagementPg) CleanupAll() error {
+	cleanupSql := "DELETE FROM edgex.apid_cluster;"
+	_, err := m.pg.Exec(cleanupSql)
+	return err
+}
diff --git a/dockertests/master-schema.sql b/dockertests/master-schema.sql
new file mode 100644
index 0000000..b53c815
--- /dev/null
+++ b/dockertests/master-schema.sql
@@ -0,0 +1,116 @@
+CREATE SCHEMA IF NOT EXISTS edgex;
+ALTER DATABASE edgex SET search_path TO edgex;
+SET search_path TO edgex;
+
+CREATE TABLE apid_cluster (
+    id character varying(36) NOT NULL,
+    name text NOT NULL,
+    description text,
+    umbrella_org_app_name text NOT NULL,
+    created timestamp without time zone,
+    created_by text,
+    updated timestamp without time zone,
+    updated_by text,
+    _change_selector text,
+    CONSTRAINT apid_cluster_pkey PRIMARY KEY (id)
+);
+
+CREATE INDEX apid_cluster___change_selector_idx ON apid_cluster USING btree (_change_selector);
+CREATE INDEX apid_cluster_created_by_idx ON apid_cluster USING btree (created_by);
+
+CREATE TABLE data_scope (
+    id character varying(36) NOT NULL,
+    apid_cluster_id character varying(36) NOT NULL,
+    scope text NOT NULL,
+    org text,
+    env text,
+    created timestamp without time zone,
+    created_by text,
+    updated timestamp without time zone,
+    updated_by text,
+    _change_selector text,
+    CONSTRAINT data_scope_pkey PRIMARY KEY (id),
+    CONSTRAINT data_scope_apid_cluster_id_fk FOREIGN KEY (apid_cluster_id)
+          REFERENCES apid_cluster (id)
+          ON UPDATE NO ACTION ON DELETE CASCADE
+);
+CREATE INDEX apid_cluster_scope__change_selector_idx ON data_scope USING btree (_change_selector);
+CREATE INDEX apid_cluster_scope_apid_cluster_id_idx ON data_scope USING btree (apid_cluster_id);
+CREATE UNIQUE INDEX apid_cluster_scope_apid_cluster_id_org_env_idx ON data_scope USING btree (apid_cluster_id, org, env);
+CREATE INDEX data_scope_created_by_idx ON apid_cluster USING btree (created_by);
+
+
+CREATE TABLE bundle_config (
+    id character varying(36) NOT NULL,
+    data_scope_id character varying(36) NOT NULL,
+    name text NOT NULL,
+    uri text NOT NULL,
+    checksumType text,
+    checksum text,
+    created timestamp without time zone,
+    created_by text,
+    updated timestamp without time zone,
+    updated_by text,
+    CONSTRAINT bundle_config_pkey PRIMARY KEY (id),
+    CONSTRAINT bundle_config_data_scope_id_fk FOREIGN KEY (data_scope_id)
+          REFERENCES data_scope (id)
+          ON UPDATE NO ACTION ON DELETE CASCADE
+);
+
+CREATE INDEX bundle_config_data_scope_id_idx ON bundle_config USING btree (data_scope_id);
+CREATE INDEX bundle_config_created_by_idx ON apid_cluster USING btree (created_by);
+
+CREATE TABLE deployment (
+    id character varying(36) NOT NULL,
+    bundle_config_id character varying(36) NOT NULL,
+    apid_cluster_id character varying(36) NOT NULL,
+    data_scope_id character varying(36) NOT NULL,
+    bundle_config_name text NOT NULL,
+    bundle_config_json text NOT NULL,
+    config_json text NOT NULL,
+    created timestamp without time zone,
+    created_by text,
+    updated timestamp without time zone,
+    updated_by text,
+    _change_selector text,
+    CONSTRAINT deployment_pkey PRIMARY KEY (id),
+    CONSTRAINT deployment_bundle_config_id_fk FOREIGN KEY (bundle_config_id)
+        REFERENCES bundle_config (id)
+        ON UPDATE NO ACTION ON DELETE CASCADE
+);
+
+CREATE TABLE deployment_history (
+        id character varying(36) NOT NULL,
+        deployment_id character varying(36) NOT NULL,
+        action text NOT NULL,
+        bundle_config_id character varying(36),
+        apid_cluster_id character varying(36) NOT NULL,
+        data_scope_id character varying(36) NOT NULL,
+        bundle_config_json text NOT NULL,
+        config_json text NOT NULL,
+        created timestamp without time zone,
+        created_by text,
+        updated timestamp without time zone,
+        updated_by text,
+        CONSTRAINT deployment_history_pkey PRIMARY KEY (id)
+);
+
+CREATE INDEX deployment__change_selector_idx ON deployment USING btree (_change_selector);
+CREATE INDEX deployment_apid_cluster_id_idx ON deployment USING btree (apid_cluster_id);
+CREATE INDEX deployment_bundle_config_id_idx ON deployment USING btree (bundle_config_id);
+CREATE INDEX deployment_data_scope_id_idx ON deployment USING btree (data_scope_id);
+CREATE INDEX deployment_created_by_idx ON apid_cluster USING btree (created_by);
+
+CREATE TABLE configuration (
+    id character varying(36) NOT NULL,
+    body text NOT NULL DEFAULT '{}',
+    created timestamp without time zone,
+    created_by text,
+    updated timestamp without time zone,
+    updated_by text,
+    CONSTRAINT configuration_pkey PRIMARY KEY (id)
+);
+
+ALTER TABLE apid_cluster REPLICA IDENTITY FULL;
+ALTER TABLE data_scope REPLICA IDENTITY FULL;
+ALTER TABLE deployment REPLICA IDENTITY FULL;
\ No newline at end of file
diff --git a/dockertests/mockAuthServer.go b/dockertests/mockAuthServer.go
new file mode 100644
index 0000000..0dd0327
--- /dev/null
+++ b/dockertests/mockAuthServer.go
@@ -0,0 +1,44 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dockertests
+
+import (
+	"encoding/json"
+	"github.com/30x/apid-core"
+	"github.com/30x/apidApigeeSync"
+	"net/http"
+)
+
+const oauthExpiresIn = 2 * 60
+
+type MockAuthServer struct {
+}
+
+func (m *MockAuthServer) sendToken(w http.ResponseWriter, req *http.Request) {
+	oauthToken := apidApigeeSync.GenerateUUID()
+	res := apidApigeeSync.OauthToken{
+		AccessToken: oauthToken,
+		ExpiresIn:   oauthExpiresIn,
+	}
+	body, err := json.Marshal(res)
+	if err != nil {
+		panic(err)
+	}
+	w.Write(body)
+}
+
+func (m *MockAuthServer) Start(router apid.Router) {
+	router.HandleFunc("/accesstoken", m.sendToken).Methods("POST")
+}
diff --git a/dockertests/pg_table_data.go b/dockertests/pg_table_data.go
new file mode 100644
index 0000000..0a1aecd
--- /dev/null
+++ b/dockertests/pg_table_data.go
@@ -0,0 +1,89 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dockertests
+
+import "time"
+
+type apidClusterRow struct {
+	id             string
+	name           string
+	description    string
+	appName        string
+	created        time.Time
+	createdBy      string
+	updated        time.Time
+	updatedBy      string
+	changeSelector string
+}
+
+/* FOREIGN KEY (apid_cluster_id)
+ * REFERENCES apid_cluster(id) ON DELETE CASCADE
+ */
+type dataScopeRow struct {
+	id             string
+	clusterId      string
+	scope          string
+	org            string
+	env            string
+	created        time.Time
+	createdBy      string
+	updated        time.Time
+	updatedBy      string
+	changeSelector string
+}
+
+/* FOREIGN KEY (data_scope_id)
+ * REFERENCES data_scope(id) ON DELETE CASCADE
+ */
+type bundleConfigRow struct {
+	id           string
+	scopeId      string
+	name         string
+	uri          string
+	checksumType string
+	checksum     string
+	created      time.Time
+	createdBy    string
+	updated      time.Time
+	updatedBy    string
+}
+
+/* FOREIGN KEY (bundle_config_id)
+ * REFERENCES bundle_config(id) ON DELETE CASCADE
+ */
+type deploymentRow struct {
+	id               string
+	configId         string
+	clusterId        string
+	scopeId          string
+	bundleConfigName string
+	bundleConfigJson string
+	configJson       string
+	created          time.Time
+	createdBy        string
+	updated          time.Time
+	updatedBy        string
+	changeSelector   string
+}
+
+type bundleConfigData struct {
+	Id        string `json:"id"`
+	Created   string `json:"created"`
+	CreatedBy string `json:"createdBy"`
+	Updated   string `json:"updated"`
+	UpdatedBy string `json:"updatedBy"`
+	Name      string `json:"name"`
+	Uri       string `json:"uri"`
+}
diff --git a/dockertests/user-setup.sql b/dockertests/user-setup.sql
new file mode 100644
index 0000000..8068c40
--- /dev/null
+++ b/dockertests/user-setup.sql
@@ -0,0 +1 @@
+ALTER USER postgres SET search_path = edgex;
\ No newline at end of file
diff --git a/glide.yaml b/glide.yaml
index 643a0c9..7f93679 100644
--- a/glide.yaml
+++ b/glide.yaml
@@ -19,3 +19,4 @@
 testImport:
 - package: github.com/onsi/ginkgo/ginkgo
 - package: github.com/onsi/gomega
+- package: github.com/lib/pq
diff --git a/managerInterfaces.go b/managerInterfaces.go
index ae80cec..5022bdd 100644
--- a/managerInterfaces.go
+++ b/managerInterfaces.go
@@ -22,7 +22,7 @@
 type tokenManager interface {
 	getBearerToken() string
 	invalidateToken() error
-	getToken() *oauthToken
+	getToken() *OauthToken
 	close()
 	getRetrieveNewTokenClosure(*url.URL) func(chan bool) error
 	start()
diff --git a/mock_server.go b/mock_server.go
index 4ce28bf..94a0651 100644
--- a/mock_server.go
+++ b/mock_server.go
@@ -248,8 +248,8 @@
 	err = json.Unmarshal(plInfo, &plugInfo)
 	Expect(err).NotTo(HaveOccurred())
 
-	m.oauthToken = generateUUID()
-	res := oauthToken{
+	m.oauthToken = GenerateUUID()
+	res := OauthToken{
 		AccessToken: m.oauthToken,
 		ExpiresIn:   oauthExpiresIn,
 	}
@@ -266,7 +266,7 @@
 
 	Expect(scopes).To(ContainElement(m.params.ClusterID))
 
-	w.Header().Set("Transicator-Snapshot-TXID", generateUUID())
+	w.Header().Set("Transicator-Snapshot-TXID", GenerateUUID())
 
 	if len(scopes) == 1 {
 		//send bootstrap db
@@ -405,7 +405,7 @@
 func (m *MockServer) createDeployment() tableRowMap {
 
 	deploymentID := m.nextDeploymentID()
-	bundleID := generateUUID()
+	bundleID := GenerateUUID()
 
 	listen := apid.Config().GetString("api_listen")
 	_, port, err := net.SplitHostPort(listen)
diff --git a/token.go b/token.go
index 0b7521e..1612025 100644
--- a/token.go
+++ b/token.go
@@ -47,7 +47,7 @@
 		closed:              make(chan bool),
 		getTokenChan:        make(chan bool),
 		invalidateTokenChan: make(chan bool),
-		returnTokenChan:     make(chan *oauthToken),
+		returnTokenChan:     make(chan *OauthToken),
 		invalidateDone:      make(chan bool),
 		isClosed:            &isClosedInt,
 	}
@@ -55,14 +55,14 @@
 }
 
 type simpleTokenManager struct {
-	token               *oauthToken
+	token               *OauthToken
 	isClosed            *int32
 	quitPollingForToken chan bool
 	closed              chan bool
 	getTokenChan        chan bool
 	invalidateTokenChan chan bool
 	refreshTimer        <-chan time.Time
-	returnTokenChan     chan *oauthToken
+	returnTokenChan     chan *OauthToken
 	invalidateDone      chan bool
 }
 
@@ -109,7 +109,7 @@
 	return nil
 }
 
-func (t *simpleTokenManager) getToken() *oauthToken {
+func (t *simpleTokenManager) getToken() *OauthToken {
 	//has been closed
 	if atomic.LoadInt32(t.isClosed) == int32(1) {
 		log.Debug("TokenManager: getToken() called on closed tokenManager")
@@ -190,7 +190,7 @@
 			return expected200Error{}
 		}
 
-		var token oauthToken
+		var token OauthToken
 		err = json.Unmarshal(body, &token)
 		if err != nil {
 			log.Errorf("unable to unmarshal JSON response '%s': %v", string(body), err)
@@ -222,7 +222,7 @@
 	}
 }
 
-type oauthToken struct {
+type OauthToken struct {
 	IssuedAt    int64    `json:"issuedAt"`
 	AppName     string   `json:"applicationName"`
 	Scope       string   `json:"scope"`
@@ -241,21 +241,21 @@
 
 var noTime time.Time
 
-func (t *oauthToken) isValid() bool {
+func (t *OauthToken) isValid() bool {
 	if t == nil || t.AccessToken == "" {
 		return false
 	}
 	return t.AccessToken != "" && time.Now().Before(t.ExpiresAt)
 }
 
-func (t *oauthToken) refreshIn() time.Duration {
+func (t *OauthToken) refreshIn() time.Duration {
 	if t == nil || t.ExpiresAt == noTime {
 		return time.Duration(0)
 	}
 	return t.ExpiresAt.Sub(time.Now()) - refreshFloatTime
 }
 
-func (t *oauthToken) needsRefresh() bool {
+func (t *OauthToken) needsRefresh() bool {
 	if t == nil || t.ExpiresAt == noTime {
 		return true
 	}
diff --git a/token_test.go b/token_test.go
index 472f790..85b71c7 100644
--- a/token_test.go
+++ b/token_test.go
@@ -36,7 +36,7 @@
 		It("should calculate valid token", func() {
 			log.Info("Starting token tests...")
 
-			t := &oauthToken{
+			t := &OauthToken{
 				AccessToken: "x",
 				ExpiresIn:   120,
 				ExpiresAt:   time.Now().Add(2 * time.Minute),
@@ -48,7 +48,7 @@
 
 		It("should calculate expired token", func() {
 
-			t := &oauthToken{
+			t := &OauthToken{
 				AccessToken: "x",
 				ExpiresIn:   0,
 				ExpiresAt:   time.Now(),
@@ -60,7 +60,7 @@
 
 		It("should calculate token needing refresh", func() {
 
-			t := &oauthToken{
+			t := &OauthToken{
 				AccessToken: "x",
 				ExpiresIn:   59,
 				ExpiresAt:   time.Now().Add(time.Minute - time.Second),
@@ -72,7 +72,7 @@
 
 		It("should calculate on empty token", func() {
 
-			t := &oauthToken{}
+			t := &OauthToken{}
 			Expect(t.refreshIn().Seconds()).To(BeNumerically("<=", 0))
 			Expect(t.needsRefresh()).To(BeTrue())
 			Expect(t.isValid()).To(BeFalse())
@@ -85,7 +85,7 @@
 			ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 				defer GinkgoRecover()
 
-				res := oauthToken{
+				res := OauthToken{
 					AccessToken: "ABCD",
 					ExpiresIn:   1,
 				}
@@ -113,8 +113,8 @@
 			ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 				defer GinkgoRecover()
 
-				res := oauthToken{
-					AccessToken: generateUUID(),
+				res := OauthToken{
+					AccessToken: GenerateUUID(),
 					ExpiresIn:   1,
 				}
 				body, err := json.Marshal(res)
@@ -153,7 +153,7 @@
 					finished <- true
 				}
 
-				res := oauthToken{
+				res := OauthToken{
 					AccessToken: string(count),
 					ExpiresIn:   1,
 				}
@@ -194,7 +194,7 @@
 					Expect(r.Header.Get("updated_at_apid")).NotTo(BeEmpty())
 					finished <- true
 				}
-				res := oauthToken{
+				res := OauthToken{
 					AccessToken: string(count),
 					ExpiresIn:   200,
 				}