Move to vendor dir

This commit is contained in:
Zachary Gershman
2016-06-21 08:09:19 -07:00
parent cd28eb3859
commit dc2870080e
282 changed files with 135 additions and 6438 deletions

136
Godeps/Godeps.json generated
View File

@@ -1,6 +1,6 @@
{
"ImportPath": "github.com/concourse/github-release-resource",
"GoVersion": "go1.5",
"GoVersion": "go1.6",
"GodepVersion": "v74",
"Packages": [
"./..."
@@ -31,11 +31,141 @@
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/config",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/codelocation",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/containernode",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/failer",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/leafnodes",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/remote",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/spec",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/specrunner",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/suite",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/testingtproxy",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/writer",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/reporters",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/ginkgo/types",
"Comment": "v1.2.0-42-g07d85e6",
"Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b"
},
{
"ImportPath": "github.com/onsi/gomega",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/format",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/ghttp",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/internal/assertion",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/internal/asyncassertion",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/internal/oraclematcher",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/internal/testingtsupport",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/matchers",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/edge",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/node",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/util",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/onsi/gomega/types",
"Comment": "v1.0-83-gc72df92",
"Rev": "c72df929b80ef4930aaa75d5e486887ff2f3e06a"
},
{
"ImportPath": "github.com/xoebus/statham",
"Rev": "7b5896306a82ba5c78d2b0c8df4d29e36ba9ac2f"
@@ -47,6 +177,10 @@
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "8a57ed94ffd43444c0879fe75701732a38afc985"
},
{
"ImportPath": "golang.org/x/oauth2/internal",
"Rev": "8a57ed94ffd43444c0879fe75701732a38afc985"
}
]
}

2
Godeps/_workspace/.gitignore generated vendored
View File

@@ -1,2 +0,0 @@
/pkg
/bin

View File

@@ -1,122 +0,0 @@
// Code generated by protoc-gen-go.
// source: proto3_proto/proto3.proto
// DO NOT EDIT!
/*
Package proto3_proto is a generated protocol buffer package.
It is generated from these files:
proto3_proto/proto3.proto
It has these top-level messages:
Message
Nested
MessageWithMap
*/
package proto3_proto
import proto "github.com/golang/protobuf/proto"
import testdata "github.com/golang/protobuf/proto/testdata"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
type Message_Humour int32
const (
Message_UNKNOWN Message_Humour = 0
Message_PUNS Message_Humour = 1
Message_SLAPSTICK Message_Humour = 2
Message_BILL_BAILEY Message_Humour = 3
)
var Message_Humour_name = map[int32]string{
0: "UNKNOWN",
1: "PUNS",
2: "SLAPSTICK",
3: "BILL_BAILEY",
}
var Message_Humour_value = map[string]int32{
"UNKNOWN": 0,
"PUNS": 1,
"SLAPSTICK": 2,
"BILL_BAILEY": 3,
}
func (x Message_Humour) String() string {
return proto.EnumName(Message_Humour_name, int32(x))
}
type Message struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"`
TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"`
Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"`
Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"`
Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (m *Message) GetNested() *Nested {
if m != nil {
return m.Nested
}
return nil
}
func (m *Message) GetTerrain() map[string]*Nested {
if m != nil {
return m.Terrain
}
return nil
}
func (m *Message) GetProto2Field() *testdata.SubDefaults {
if m != nil {
return m.Proto2Field
}
return nil
}
func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
if m != nil {
return m.Proto2Value
}
return nil
}
type Nested struct {
Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
}
func (m *Nested) Reset() { *m = Nested{} }
func (m *Nested) String() string { return proto.CompactTextString(m) }
func (*Nested) ProtoMessage() {}
type MessageWithMap struct {
ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
func (*MessageWithMap) ProtoMessage() {}
func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
if m != nil {
return m.ByteMapping
}
return nil
}
func init() {
proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
}

View File

@@ -1,68 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2014 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
import "testdata/test.proto";
package proto3_proto;
message Message {
enum Humour {
UNKNOWN = 0;
PUNS = 1;
SLAPSTICK = 2;
BILL_BAILEY = 3;
}
string name = 1;
Humour hilarity = 2;
uint32 height_in_cm = 3;
bytes data = 4;
int64 result_count = 7;
bool true_scotsman = 8;
float score = 9;
repeated uint64 key = 5;
Nested nested = 6;
map<string, Nested> terrain = 10;
testdata.SubDefaults proto2_field = 11;
map<string, testdata.SubDefaults> proto2_value = 13;
}
message Nested {
string bunny = 1;
}
message MessageWithMap {
map<bool, bytes> byte_mapping = 1;
}

View File

@@ -1,98 +0,0 @@
/*
Table provides a simple DSL for Ginkgo-native Table-Driven Tests
The godoc documentation describes Table's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests
*/
package table
import (
"fmt"
"reflect"
"github.com/onsi/ginkgo"
)
/*
DescribeTable describes a table-driven test.
For example:
DescribeTable("a simple table",
func(x int, y int, expected bool) {
Ω(x > y).Should(Equal(expected))
},
Entry("x > y", 1, 0, true),
Entry("x == y", 0, 0, false),
Entry("x < y", 0, 1, false),
)
The first argument to `DescribeTable` is a string description.
The second argument is a function that will be run for each table entry. Your assertions go here - the function is equivalent to a Ginkgo It.
The subsequent arguments must be of type `TableEntry`. We recommend using the `Entry` convenience constructors.
The `Entry` constructor takes a string description followed by an arbitrary set of parameters. These parameters are passed into your function.
Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each `Entry` is turned into an `It` within the `Describe`.
It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run).
Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable.
*/
func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, false, false)
return true
}
/*
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
*/
func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, false, true)
return true
}
/*
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
*/
func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, true, false)
return true
}
/*
You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
*/
func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, true, false)
return true
}
func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) {
itBodyValue := reflect.ValueOf(itBody)
if itBodyValue.Kind() != reflect.Func {
panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody))
}
if pending {
ginkgo.PDescribe(description, func() {
for _, entry := range entries {
entry.generateIt(itBodyValue)
}
})
} else if focused {
ginkgo.FDescribe(description, func() {
for _, entry := range entries {
entry.generateIt(itBodyValue)
}
})
} else {
ginkgo.Describe(description, func() {
for _, entry := range entries {
entry.generateIt(itBodyValue)
}
})
}
}

View File

@@ -1,81 +0,0 @@
package table
import (
"reflect"
"github.com/onsi/ginkgo"
)
/*
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
*/
type TableEntry struct {
Description string
Parameters []interface{}
Pending bool
Focused bool
}
func (t TableEntry) generateIt(itBody reflect.Value) {
if t.Pending {
ginkgo.PIt(t.Description)
return
}
values := []reflect.Value{}
for i, param := range t.Parameters {
var value reflect.Value
if param == nil {
inType := itBody.Type().In(i)
value = reflect.Zero(inType)
} else {
value = reflect.ValueOf(param)
}
values = append(values, value)
}
body := func() {
itBody.Call(values)
}
if t.Focused {
ginkgo.FIt(t.Description, body)
} else {
ginkgo.It(t.Description, body)
}
}
/*
Entry constructs a TableEntry.
The first argument is a required description (this becomes the content of the generated Ginkgo `It`).
Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`.
Each Entry ends up generating an individual Ginkgo It.
*/
func Entry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, false, false}
}
/*
You can focus a particular entry with FEntry. This is equivalent to FIt.
*/
func FEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, false, true}
}
/*
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
*/
func PEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, true, false}
}
/*
You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
*/
func XEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, true, false}
}

View File

@@ -1,182 +0,0 @@
package main
import (
"bytes"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"text/template"
"go/build"
"github.com/onsi/ginkgo/ginkgo/nodot"
)
func BuildBootstrapCommand() *Command {
var agouti, noDot bool
flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError)
flagSet.BoolVar(&agouti, "agouti", false, "If set, bootstrap will generate a bootstrap file for writing Agouti tests")
flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega")
return &Command{
Name: "bootstrap",
FlagSet: flagSet,
UsageCommand: "ginkgo bootstrap <FLAGS>",
Usage: []string{
"Bootstrap a test suite for the current package",
"Accepts the following flags:",
},
Command: func(args []string, additionalArgs []string) {
generateBootstrap(agouti, noDot)
},
}
}
var bootstrapText = `package {{.Package}}_test
import (
{{.GinkgoImport}}
{{.GomegaImport}}
"testing"
)
func Test{{.FormattedName}}(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "{{.FormattedName}} Suite")
}
`
var agoutiBootstrapText = `package {{.Package}}_test
import (
{{.GinkgoImport}}
{{.GomegaImport}}
"github.com/sclevine/agouti"
"testing"
)
func Test{{.FormattedName}}(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "{{.FormattedName}} Suite")
}
var agoutiDriver *agouti.WebDriver
var _ = BeforeSuite(func() {
// Choose a WebDriver:
agoutiDriver = agouti.PhantomJS()
// agoutiDriver = agouti.Selenium()
// agoutiDriver = agouti.ChromeDriver()
Expect(agoutiDriver.Start()).To(Succeed())
})
var _ = AfterSuite(func() {
Expect(agoutiDriver.Stop()).To(Succeed())
})
`
type bootstrapData struct {
Package string
FormattedName string
GinkgoImport string
GomegaImport string
}
func getPackageAndFormattedName() (string, string, string) {
path, err := os.Getwd()
if err != nil {
complainAndQuit("Could not get current working directory: \n" + err.Error())
}
dirName := strings.Replace(filepath.Base(path), "-", "_", -1)
dirName = strings.Replace(dirName, " ", "_", -1)
pkg, err := build.ImportDir(path, 0)
packageName := pkg.Name
if err != nil {
packageName = dirName
}
formattedName := prettifyPackageName(filepath.Base(path))
return packageName, dirName, formattedName
}
func prettifyPackageName(name string) string {
name = strings.Replace(name, "-", " ", -1)
name = strings.Replace(name, "_", " ", -1)
name = strings.Title(name)
name = strings.Replace(name, " ", "", -1)
return name
}
func fileExists(path string) bool {
_, err := os.Stat(path)
if err == nil {
return true
}
return false
}
func generateBootstrap(agouti bool, noDot bool) {
packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
data := bootstrapData{
Package: packageName,
FormattedName: formattedName,
GinkgoImport: `. "github.com/onsi/ginkgo"`,
GomegaImport: `. "github.com/onsi/gomega"`,
}
if noDot {
data.GinkgoImport = `"github.com/onsi/ginkgo"`
data.GomegaImport = `"github.com/onsi/gomega"`
}
targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
if fileExists(targetFile) {
fmt.Printf("%s already exists.\n\n", targetFile)
os.Exit(1)
} else {
fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
}
f, err := os.Create(targetFile)
if err != nil {
complainAndQuit("Could not create file: " + err.Error())
panic(err.Error())
}
defer f.Close()
var templateText string
if agouti {
templateText = agoutiBootstrapText
} else {
templateText = bootstrapText
}
bootstrapTemplate, err := template.New("bootstrap").Parse(templateText)
if err != nil {
panic(err.Error())
}
buf := &bytes.Buffer{}
bootstrapTemplate.Execute(buf, data)
if noDot {
contents, err := nodot.ApplyNoDot(buf.Bytes())
if err != nil {
complainAndQuit("Failed to import nodot declarations: " + err.Error())
}
fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot")
buf = bytes.NewBuffer(contents)
}
buf.WriteTo(f)
goFmt(targetFile)
}

View File

@@ -1,68 +0,0 @@
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
"github.com/onsi/ginkgo/ginkgo/testrunner"
)
func BuildBuildCommand() *Command {
commandFlags := NewBuildCommandFlags(flag.NewFlagSet("build", flag.ExitOnError))
interruptHandler := interrupthandler.NewInterruptHandler()
builder := &SpecBuilder{
commandFlags: commandFlags,
interruptHandler: interruptHandler,
}
return &Command{
Name: "build",
FlagSet: commandFlags.FlagSet,
UsageCommand: "ginkgo build <FLAGS> <PACKAGES>",
Usage: []string{
"Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
"Accepts the following flags:",
},
Command: builder.BuildSpecs,
}
}
type SpecBuilder struct {
commandFlags *RunWatchAndBuildCommandFlags
interruptHandler *interrupthandler.InterruptHandler
}
func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
r.commandFlags.computeNodes()
suites, _ := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, false)
if len(suites) == 0 {
complainAndQuit("Found no test suites")
}
passed := true
for _, suite := range suites {
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil)
fmt.Printf("Compiling %s...\n", suite.PackageName)
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
err := runner.CompileTo(path)
if err != nil {
fmt.Println(err.Error())
passed = false
} else {
fmt.Printf(" compiled %s.test\n", suite.PackageName)
}
runner.CleanUp()
}
if passed {
os.Exit(0)
}
os.Exit(1)
}

View File

@@ -1,123 +0,0 @@
package convert
import (
"fmt"
"go/ast"
"strings"
"unicode"
)
/*
* Creates a func init() node
*/
func createVarUnderscoreBlock() *ast.ValueSpec {
valueSpec := &ast.ValueSpec{}
object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0}
ident := &ast.Ident{Name: "_", Obj: object}
valueSpec.Names = append(valueSpec.Names, ident)
return valueSpec
}
/*
* Creates a Describe("Testing with ginkgo", func() { }) node
*/
func createDescribeBlock() *ast.CallExpr {
blockStatement := &ast.BlockStmt{List: []ast.Stmt{}}
fieldList := &ast.FieldList{}
funcType := &ast.FuncType{Params: fieldList}
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""}
describeIdent := &ast.Ident{Name: "Describe"}
return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}}
}
/*
* Convenience function to return the name of the *testing.T param
* for a Test function that will be rewritten. This is useful because
* we will want to replace the usage of this named *testing.T inside the
* body of the function with a GinktoT.
*/
func namedTestingTArg(node *ast.FuncDecl) string {
return node.Type.Params.List[0].Names[0].Name // *exhale*
}
/*
* Convenience function to return the block statement node for a Describe statement
*/
func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt {
var funcLit *ast.FuncLit
var found = false
for _, node := range desc.Args {
switch node := node.(type) {
case *ast.FuncLit:
found = true
funcLit = node
break
}
}
if !found {
panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.")
}
return funcLit.Body
}
/* convenience function for creating an It("TestNameHere")
* with all the body of the test function inside the anonymous
* func passed to It()
*/
func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt {
blockStatement := &ast.BlockStmt{List: testFunc.Body.List}
fieldList := &ast.FieldList{}
funcType := &ast.FuncType{Params: fieldList}
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
testName := rewriteTestName(testFunc.Name.Name)
basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)}
itBlockIdent := &ast.Ident{Name: "It"}
callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}}
return &ast.ExprStmt{X: callExpr}
}
/*
* rewrite test names to be human readable
* eg: rewrites "TestSomethingAmazing" as "something amazing"
*/
func rewriteTestName(testName string) string {
nameComponents := []string{}
currentString := ""
indexOfTest := strings.Index(testName, "Test")
if indexOfTest != 0 {
return testName
}
testName = strings.Replace(testName, "Test", "", 1)
first, rest := testName[0], testName[1:]
testName = string(unicode.ToLower(rune(first))) + rest
for _, rune := range testName {
if unicode.IsUpper(rune) {
nameComponents = append(nameComponents, currentString)
currentString = string(unicode.ToLower(rune))
} else {
currentString += string(rune)
}
}
return strings.Join(append(nameComponents, currentString), " ")
}
func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr {
return &ast.CallExpr{
Lparen: ident.NamePos + 1,
Rparen: ident.NamePos + 2,
Fun: &ast.Ident{Name: "GinkgoT"},
}
}
func newGinkgoTInterface() *ast.Ident {
return &ast.Ident{Name: "GinkgoTInterface"}
}

View File

@@ -1,91 +0,0 @@
package convert
import (
"errors"
"fmt"
"go/ast"
)
/*
* Given the root node of an AST, returns the node containing the
* import statements for the file.
*/
func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) {
for _, declaration := range rootNode.Decls {
decl, ok := declaration.(*ast.GenDecl)
if !ok || len(decl.Specs) == 0 {
continue
}
_, ok = decl.Specs[0].(*ast.ImportSpec)
if ok {
imports = decl
return
}
}
err = errors.New(fmt.Sprintf("Could not find imports for root node:\n\t%#v\n", rootNode))
return
}
/*
* Removes "testing" import, if present
*/
func removeTestingImport(rootNode *ast.File) {
importDecl, err := importsForRootNode(rootNode)
if err != nil {
panic(err.Error())
}
var index int
for i, importSpec := range importDecl.Specs {
importSpec := importSpec.(*ast.ImportSpec)
if importSpec.Path.Value == "\"testing\"" {
index = i
break
}
}
importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...)
}
/*
* Adds import statements for onsi/ginkgo, if missing
*/
func addGinkgoImports(rootNode *ast.File) {
importDecl, err := importsForRootNode(rootNode)
if err != nil {
panic(err.Error())
}
if len(importDecl.Specs) == 0 {
// TODO: might need to create a import decl here
panic("unimplemented : expected to find an imports block")
}
needsGinkgo := true
for _, importSpec := range importDecl.Specs {
importSpec, ok := importSpec.(*ast.ImportSpec)
if !ok {
continue
}
if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" {
needsGinkgo = false
}
}
if needsGinkgo {
importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\""))
}
}
/*
* convenience function to create an import statement
*/
func createImport(name, path string) *ast.ImportSpec {
return &ast.ImportSpec{
Name: &ast.Ident{Name: name},
Path: &ast.BasicLit{Kind: 9, Value: path},
}
}

View File

@@ -1,127 +0,0 @@
package convert
import (
"fmt"
"go/build"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
)
/*
* RewritePackage takes a name (eg: my-package/tools), finds its test files using
* Go's build package, and then rewrites them. A ginkgo test suite file will
* also be added for this package, and all of its child packages.
*/
func RewritePackage(packageName string) {
pkg, err := packageWithName(packageName)
if err != nil {
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
}
for _, filename := range findTestsInPackage(pkg) {
rewriteTestsInFile(filename)
}
return
}
/*
* Given a package, findTestsInPackage reads the test files in the directory,
* and then recurses on each child package, returning a slice of all test files
* found in this process.
*/
func findTestsInPackage(pkg *build.Package) (testfiles []string) {
for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
testfiles = append(testfiles, filepath.Join(pkg.Dir, file))
}
dirFiles, err := ioutil.ReadDir(pkg.Dir)
if err != nil {
panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error()))
}
re := regexp.MustCompile(`^[._]`)
for _, file := range dirFiles {
if !file.IsDir() {
continue
}
if re.Match([]byte(file.Name())) {
continue
}
packageName := filepath.Join(pkg.ImportPath, file.Name())
subPackage, err := packageWithName(packageName)
if err != nil {
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
}
testfiles = append(testfiles, findTestsInPackage(subPackage)...)
}
addGinkgoSuiteForPackage(pkg)
goFmtPackage(pkg)
return
}
/*
* Shells out to `ginkgo bootstrap` to create a test suite file
*/
func addGinkgoSuiteForPackage(pkg *build.Package) {
originalDir, err := os.Getwd()
if err != nil {
panic(err)
}
suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go")
_, err = os.Stat(suite_test_file)
if err == nil {
return // test file already exists, this should be a no-op
}
err = os.Chdir(pkg.Dir)
if err != nil {
panic(err)
}
output, err := exec.Command("ginkgo", "bootstrap").Output()
if err != nil {
panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error()))
}
err = os.Chdir(originalDir)
if err != nil {
panic(err)
}
}
/*
* Shells out to `go fmt` to format the package
*/
func goFmtPackage(pkg *build.Package) {
output, err := exec.Command("go", "fmt", pkg.ImportPath).Output()
if err != nil {
fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error())
}
}
/*
* Attempts to return a package with its test files already read.
* The ImportMode arg to build.Import lets you specify if you want go to read the
* buildable go files inside the package, but it fails if the package has no go files
*/
func packageWithName(name string) (pkg *build.Package, err error) {
pkg, err = build.Default.Import(name, ".", build.ImportMode(0))
if err == nil {
return
}
pkg, err = build.Default.Import(name, ".", build.ImportMode(1))
return
}

View File

@@ -1,56 +0,0 @@
package convert
import (
"go/ast"
"regexp"
)
/*
* Given a root node, walks its top level statements and returns
* points to function nodes to rewrite as It statements.
* These functions, according to Go testing convention, must be named
* TestWithCamelCasedName and receive a single *testing.T argument.
*/
func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) {
testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+")
ast.Inspect(rootNode, func(node ast.Node) bool {
if node == nil {
return false
}
switch node := node.(type) {
case *ast.FuncDecl:
matches := testNameRegexp.MatchString(node.Name.Name)
if matches && receivesTestingT(node) {
testsToRewrite = append(testsToRewrite, node)
}
}
return true
})
return
}
/*
* convenience function that looks at args to a function and determines if its
* params include an argument of type *testing.T
*/
func receivesTestingT(node *ast.FuncDecl) bool {
if len(node.Type.Params.List) != 1 {
return false
}
base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr)
if !ok {
return false
}
intermediate := base.X.(*ast.SelectorExpr)
isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing"
isTestingT := intermediate.Sel.Name == "T"
return isTestingPackage && isTestingT
}

View File

@@ -1,163 +0,0 @@
package convert
import (
"bytes"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/token"
"io/ioutil"
"os"
)
/*
* Given a file path, rewrites any tests in the Ginkgo format.
* First, we parse the AST, and update the imports declaration.
* Then, we walk the first child elements in the file, returning tests to rewrite.
* A top level init func is declared, with a single Describe func inside.
* Then the test functions to rewrite are inserted as It statements inside the Describe.
* Finally we walk the rest of the file, replacing other usages of *testing.T
* Once that is complete, we write the AST back out again to its file.
*/
func rewriteTestsInFile(pathToFile string) {
fileSet := token.NewFileSet()
rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, 0)
if err != nil {
panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
}
addGinkgoImports(rootNode)
removeTestingImport(rootNode)
varUnderscoreBlock := createVarUnderscoreBlock()
describeBlock := createDescribeBlock()
varUnderscoreBlock.Values = []ast.Expr{describeBlock}
for _, testFunc := range findTestFuncs(rootNode) {
rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock)
}
underscoreDecl := &ast.GenDecl{
Tok: 85, // gah, magick numbers are needed to make this work
TokPos: 14, // this tricks Go into writing "var _ = Describe"
Specs: []ast.Spec{varUnderscoreBlock},
}
imports := rootNode.Decls[0]
tail := rootNode.Decls[1:]
rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...)
rewriteOtherFuncsToUseGinkgoT(rootNode.Decls)
walkNodesInRootNodeReplacingTestingT(rootNode)
var buffer bytes.Buffer
if err = format.Node(&buffer, fileSet, rootNode); err != nil {
panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error()))
}
fileInfo, err := os.Stat(pathToFile)
if err != nil {
panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
}
ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
return
}
/*
* Given a test func named TestDoesSomethingNeat, rewrites it as
* It("does something neat", func() { __test_body_here__ }) and adds it
* to the Describe's list of statements
*/
func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) {
var funcIndex int = -1
for index, child := range rootNode.Decls {
if child == testFunc {
funcIndex = index
break
}
}
if funcIndex < 0 {
panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name))
}
var block *ast.BlockStmt = blockStatementFromDescribe(describe)
block.List = append(block.List, createItStatementForTestFunc(testFunc))
replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc))
// remove the old test func from the root node's declarations
rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...)
return
}
/*
* walks nodes inside of a test func's statements and replaces the usage of
* it's named *testing.T param with GinkgoT's
*/
func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) {
ast.Inspect(statementsBlock, func(node ast.Node) bool {
if node == nil {
return false
}
keyValueExpr, ok := node.(*ast.KeyValueExpr)
if ok {
replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT)
return true
}
funcLiteral, ok := node.(*ast.FuncLit)
if ok {
replaceTypeDeclTestingTsInFuncLiteral(funcLiteral)
return true
}
callExpr, ok := node.(*ast.CallExpr)
if !ok {
return true
}
replaceTestingTsInArgsLists(callExpr, testingT)
funCall, ok := callExpr.Fun.(*ast.SelectorExpr)
if ok {
replaceTestingTsMethodCalls(funCall, testingT)
}
return true
})
}
/*
* rewrite t.Fail() or any other *testing.T method by replacing with T().Fail()
* This function receives a selector expression (eg: t.Fail()) and
* the name of the *testing.T param from the function declaration. Rewrites the
* selector expression in place if the target was a *testing.T
*/
func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) {
ident, ok := selectorExpr.X.(*ast.Ident)
if !ok {
return
}
if ident.Name == testingT {
selectorExpr.X = newGinkgoTFromIdent(ident)
}
}
/*
* replaces usages of a named *testing.T param inside of a call expression
* with a new GinkgoT object
*/
func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) {
for index, arg := range callExpr.Args {
ident, ok := arg.(*ast.Ident)
if !ok {
continue
}
if ident.Name == testingT {
callExpr.Args[index] = newGinkgoTFromIdent(ident)
}
}
}

View File

@@ -1,130 +0,0 @@
package convert
import (
"go/ast"
)
/*
* Rewrites any other top level funcs that receive a *testing.T param
*/
func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) {
for _, decl := range declarations {
decl, ok := decl.(*ast.FuncDecl)
if !ok {
continue
}
for _, param := range decl.Type.Params.List {
starExpr, ok := param.Type.(*ast.StarExpr)
if !ok {
continue
}
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
if !ok {
continue
}
xIdent, ok := selectorExpr.X.(*ast.Ident)
if !ok || xIdent.Name != "testing" {
continue
}
if selectorExpr.Sel.Name != "T" {
continue
}
param.Type = newGinkgoTInterface()
}
}
}
/*
* Walks all of the nodes in the file, replacing *testing.T in struct
* and func literal nodes. eg:
* type foo struct { *testing.T }
* var bar = func(t *testing.T) { }
*/
func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) {
ast.Inspect(rootNode, func(node ast.Node) bool {
if node == nil {
return false
}
switch node := node.(type) {
case *ast.StructType:
replaceTestingTsInStructType(node)
case *ast.FuncLit:
replaceTypeDeclTestingTsInFuncLiteral(node)
}
return true
})
}
/*
* replaces named *testing.T inside a composite literal
*/
func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) {
ident, ok := kve.Value.(*ast.Ident)
if !ok {
return
}
if ident.Name == testingT {
kve.Value = newGinkgoTFromIdent(ident)
}
}
/*
* replaces *testing.T params in a func literal with GinkgoT
*/
func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) {
for _, arg := range functionLiteral.Type.Params.List {
starExpr, ok := arg.Type.(*ast.StarExpr)
if !ok {
continue
}
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
if !ok {
continue
}
target, ok := selectorExpr.X.(*ast.Ident)
if !ok {
continue
}
if target.Name == "testing" && selectorExpr.Sel.Name == "T" {
arg.Type = newGinkgoTInterface()
}
}
}
/*
* Replaces *testing.T types inside of a struct declaration with a GinkgoT
* eg: type foo struct { *testing.T }
*/
func replaceTestingTsInStructType(structType *ast.StructType) {
for _, field := range structType.Fields.List {
starExpr, ok := field.Type.(*ast.StarExpr)
if !ok {
continue
}
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
if !ok {
continue
}
xIdent, ok := selectorExpr.X.(*ast.Ident)
if !ok {
continue
}
if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" {
field.Type = newGinkgoTInterface()
}
}
}

View File

@@ -1,44 +0,0 @@
package main
import (
"flag"
"fmt"
"github.com/onsi/ginkgo/ginkgo/convert"
"os"
)
func BuildConvertCommand() *Command {
return &Command{
Name: "convert",
FlagSet: flag.NewFlagSet("convert", flag.ExitOnError),
UsageCommand: "ginkgo convert /path/to/package",
Usage: []string{
"Convert the package at the passed in path from an XUnit-style test to a Ginkgo-style test",
},
Command: convertPackage,
}
}
func convertPackage(args []string, additionalArgs []string) {
if len(args) != 1 {
println(fmt.Sprintf("usage: ginkgo convert /path/to/your/package"))
os.Exit(1)
}
defer func() {
err := recover()
if err != nil {
switch err := err.(type) {
case error:
println(err.Error())
case string:
println(err)
default:
println(fmt.Sprintf("unexpected error: %#v", err))
}
os.Exit(1)
}
}()
convert.RewritePackage(args[0])
}

View File

@@ -1,164 +0,0 @@
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"text/template"
)
func BuildGenerateCommand() *Command {
var agouti, noDot bool
flagSet := flag.NewFlagSet("generate", flag.ExitOnError)
flagSet.BoolVar(&agouti, "agouti", false, "If set, generate will generate a test file for writing Agouti tests")
flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega")
return &Command{
Name: "generate",
FlagSet: flagSet,
UsageCommand: "ginkgo generate <filename(s)>",
Usage: []string{
"Generate a test file named filename_test.go",
"If the optional <filenames> argument is omitted, a file named after the package in the current directory will be created.",
"Accepts the following flags:",
},
Command: func(args []string, additionalArgs []string) {
generateSpec(args, agouti, noDot)
},
}
}
var specText = `package {{.Package}}_test
import (
. "{{.PackageImportPath}}"
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
)
var _ = Describe("{{.Subject}}", func() {
})
`
var agoutiSpecText = `package {{.Package}}_test
import (
. "{{.PackageImportPath}}"
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
. "github.com/sclevine/agouti/matchers"
"github.com/sclevine/agouti"
)
var _ = Describe("{{.Subject}}", func() {
var page *agouti.Page
BeforeEach(func() {
var err error
page, err = agoutiDriver.NewPage()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
Expect(page.Destroy()).To(Succeed())
})
})
`
type specData struct {
Package string
Subject string
PackageImportPath string
IncludeImports bool
}
func generateSpec(args []string, agouti, noDot bool) {
if len(args) == 0 {
err := generateSpecForSubject("", agouti, noDot)
if err != nil {
fmt.Println(err.Error())
fmt.Println("")
os.Exit(1)
}
fmt.Println("")
return
}
var failed bool
for _, arg := range args {
err := generateSpecForSubject(arg, agouti, noDot)
if err != nil {
failed = true
fmt.Println(err.Error())
}
}
fmt.Println("")
if failed {
os.Exit(1)
}
}
func generateSpecForSubject(subject string, agouti, noDot bool) error {
packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
if subject != "" {
subject = strings.Split(subject, ".go")[0]
subject = strings.Split(subject, "_test")[0]
specFilePrefix = subject
formattedName = prettifyPackageName(subject)
}
data := specData{
Package: packageName,
Subject: formattedName,
PackageImportPath: getPackageImportPath(),
IncludeImports: !noDot,
}
targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
if fileExists(targetFile) {
return fmt.Errorf("%s already exists.", targetFile)
} else {
fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
}
f, err := os.Create(targetFile)
if err != nil {
return err
}
defer f.Close()
var templateText string
if agouti {
templateText = agoutiSpecText
} else {
templateText = specText
}
specTemplate, err := template.New("spec").Parse(templateText)
if err != nil {
return err
}
specTemplate.Execute(f, data)
goFmt(targetFile)
return nil
}
func getPackageImportPath() string {
workingDir, err := os.Getwd()
if err != nil {
panic(err.Error())
}
sep := string(filepath.Separator)
paths := strings.Split(workingDir, sep+"src"+sep)
if len(paths) == 1 {
fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
return "UNKNOWN_PACKAGE_PATH"
}
return filepath.ToSlash(paths[len(paths)-1])
}

View File

@@ -1,31 +0,0 @@
package main
import (
"flag"
"fmt"
)
func BuildHelpCommand() *Command {
return &Command{
Name: "help",
FlagSet: flag.NewFlagSet("help", flag.ExitOnError),
UsageCommand: "ginkgo help <COMAND>",
Usage: []string{
"Print usage information. If a command is passed in, print usage information just for that command.",
},
Command: printHelp,
}
}
func printHelp(args []string, additionalArgs []string) {
if len(args) == 0 {
usage()
} else {
command, found := commandMatching(args[0])
if !found {
complainAndQuit(fmt.Sprintf("Unknown command: %s", args[0]))
}
usageForCommand(command, true)
}
}

View File

@@ -1,52 +0,0 @@
package interrupthandler
import (
"os"
"os/signal"
"sync"
"syscall"
)
type InterruptHandler struct {
interruptCount int
lock *sync.Mutex
C chan bool
}
func NewInterruptHandler() *InterruptHandler {
h := &InterruptHandler{
lock: &sync.Mutex{},
C: make(chan bool, 0),
}
go h.handleInterrupt()
SwallowSigQuit()
return h
}
func (h *InterruptHandler) WasInterrupted() bool {
h.lock.Lock()
defer h.lock.Unlock()
return h.interruptCount > 0
}
func (h *InterruptHandler) handleInterrupt() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
<-c
signal.Stop(c)
h.lock.Lock()
h.interruptCount++
if h.interruptCount == 1 {
close(h.C)
} else if h.interruptCount > 5 {
os.Exit(1)
}
h.lock.Unlock()
go h.handleInterrupt()
}

View File

@@ -1,14 +0,0 @@
// +build freebsd openbsd netbsd dragonfly darwin linux
package interrupthandler
import (
"os"
"os/signal"
"syscall"
)
func SwallowSigQuit() {
c := make(chan os.Signal, 1024)
signal.Notify(c, syscall.SIGQUIT)
}

View File

@@ -1,7 +0,0 @@
// +build windows
package interrupthandler
func SwallowSigQuit() {
//noop
}

View File

@@ -1,291 +0,0 @@
/*
The Ginkgo CLI
The Ginkgo CLI is fully documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
You can also learn more by running:
ginkgo help
Here are some of the more commonly used commands:
To install:
go install github.com/onsi/ginkgo/ginkgo
To run tests:
ginkgo
To run tests in all subdirectories:
ginkgo -r
To run tests in particular packages:
ginkgo <flags> /path/to/package /path/to/another/package
To pass arguments/flags to your tests:
ginkgo <flags> <packages> -- <pass-throughs>
To run tests in parallel
ginkgo -p
this will automatically detect the optimal number of nodes to use. Alternatively, you can specify the number of nodes with:
ginkgo -nodes=N
(note that you don't need to provide -p in this case).
By default the Ginkgo CLI will spin up a server that the individual test processes send test output to. The CLI aggregates this output and then presents coherent test output, one test at a time, as each test completes.
An alternative is to have the parallel nodes run and stream interleaved output back. This useful for debugging, particularly in contexts where tests hang/fail to start. To get this interleaved output:
ginkgo -nodes=N -stream=true
On windows, the default value for stream is true.
By default, when running multiple tests (with -r or a list of packages) Ginkgo will abort when a test fails. To have Ginkgo run subsequent test suites instead you can:
ginkgo -keepGoing
To monitor packages and rerun tests when changes occur:
ginkgo watch <-r> </path/to/package>
passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them.
`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
that depend on X are not rerun.
[OSX & Linux only] To receive (desktop) notifications when a test run completes:
ginkgo -notify
this is particularly useful with `ginkgo watch`. Notifications are currently only supported on OS X and require that you `brew install terminal-notifier`
Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails. You can do this with:
ginkgo -untilItFails
To bootstrap a test suite:
ginkgo bootstrap
To generate a test file:
ginkgo generate <test_file_name>
To bootstrap/generate test files without using "." imports:
ginkgo bootstrap --nodot
ginkgo generate --nodot
this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions. When you pull to the latest Ginkgo/Gomega you'll want to run
ginkgo nodot
to refresh this list and pull in any new identifiers. In particular, this will pull in any new Gomega matchers that get added.
To convert an existing XUnit style test suite to a Ginkgo-style test suite:
ginkgo convert .
To unfocus tests:
ginkgo unfocus
or
ginkgo blur
To compile a test suite:
ginkgo build <path-to-package>
will output an executable file named `package.test`. This can be run directly or by invoking
ginkgo <path-to-package.test>
To print out Ginkgo's version:
ginkgo version
To get more help:
ginkgo help
*/
package main
import (
"flag"
"fmt"
"os"
"os/exec"
"strings"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/ginkgo/testsuite"
)
const greenColor = "\x1b[32m"
const redColor = "\x1b[91m"
const defaultStyle = "\x1b[0m"
const lightGrayColor = "\x1b[37m"
type Command struct {
Name string
AltName string
FlagSet *flag.FlagSet
Usage []string
UsageCommand string
Command func(args []string, additionalArgs []string)
SuppressFlagDocumentation bool
FlagDocSubstitute []string
}
func (c *Command) Matches(name string) bool {
return c.Name == name || (c.AltName != "" && c.AltName == name)
}
func (c *Command) Run(args []string, additionalArgs []string) {
c.FlagSet.Parse(args)
c.Command(c.FlagSet.Args(), additionalArgs)
}
var DefaultCommand *Command
var Commands []*Command
func init() {
DefaultCommand = BuildRunCommand()
Commands = append(Commands, BuildWatchCommand())
Commands = append(Commands, BuildBuildCommand())
Commands = append(Commands, BuildBootstrapCommand())
Commands = append(Commands, BuildGenerateCommand())
Commands = append(Commands, BuildNodotCommand())
Commands = append(Commands, BuildConvertCommand())
Commands = append(Commands, BuildUnfocusCommand())
Commands = append(Commands, BuildVersionCommand())
Commands = append(Commands, BuildHelpCommand())
}
func main() {
args := []string{}
additionalArgs := []string{}
foundDelimiter := false
for _, arg := range os.Args[1:] {
if !foundDelimiter {
if arg == "--" {
foundDelimiter = true
continue
}
}
if foundDelimiter {
additionalArgs = append(additionalArgs, arg)
} else {
args = append(args, arg)
}
}
if len(args) > 0 {
commandToRun, found := commandMatching(args[0])
if found {
commandToRun.Run(args[1:], additionalArgs)
return
}
}
DefaultCommand.Run(args, additionalArgs)
}
func commandMatching(name string) (*Command, bool) {
for _, command := range Commands {
if command.Matches(name) {
return command, true
}
}
return nil, false
}
func usage() {
fmt.Fprintf(os.Stderr, "Ginkgo Version %s\n\n", config.VERSION)
usageForCommand(DefaultCommand, false)
for _, command := range Commands {
fmt.Fprintf(os.Stderr, "\n")
usageForCommand(command, false)
}
}
func usageForCommand(command *Command, longForm bool) {
fmt.Fprintf(os.Stderr, "%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand)))
fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.Usage, "\n"))
if command.SuppressFlagDocumentation && !longForm {
fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.FlagDocSubstitute, "\n "))
} else {
command.FlagSet.PrintDefaults()
}
}
func complainAndQuit(complaint string) {
fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint)
os.Exit(1)
}
func findSuites(args []string, recurse bool, skipPackage string, allowPrecompiled bool) ([]testsuite.TestSuite, []string) {
suites := []testsuite.TestSuite{}
if len(args) > 0 {
for _, arg := range args {
if allowPrecompiled {
suite, err := testsuite.PrecompiledTestSuite(arg)
if err == nil {
suites = append(suites, suite)
continue
}
}
suites = append(suites, testsuite.SuitesInDir(arg, recurse)...)
}
} else {
suites = testsuite.SuitesInDir(".", recurse)
}
skippedPackages := []string{}
if skipPackage != "" {
skipFilters := strings.Split(skipPackage, ",")
filteredSuites := []testsuite.TestSuite{}
for _, suite := range suites {
skip := false
for _, skipFilter := range skipFilters {
if strings.Contains(suite.Path, skipFilter) {
skip = true
break
}
}
if skip {
skippedPackages = append(skippedPackages, suite.Path)
} else {
filteredSuites = append(filteredSuites, suite)
}
}
suites = filteredSuites
}
return suites, skippedPackages
}
func goFmt(path string) {
err := exec.Command("go", "fmt", path).Run()
if err != nil {
complainAndQuit("Could not fmt: " + err.Error())
}
}
func pluralizedWord(singular, plural string, count int) string {
if count == 1 {
return singular
}
return plural
}

View File

@@ -1,194 +0,0 @@
package nodot
import (
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/token"
"path/filepath"
"strings"
)
func ApplyNoDot(data []byte) ([]byte, error) {
sections, err := generateNodotSections()
if err != nil {
return nil, err
}
for _, section := range sections {
data = section.createOrUpdateIn(data)
}
return data, nil
}
type nodotSection struct {
name string
pkg string
declarations []string
types []string
}
func (s nodotSection) createOrUpdateIn(data []byte) []byte {
renames := map[string]string{}
contents := string(data)
lines := strings.Split(contents, "\n")
comment := "// Declarations for " + s.name
newLines := []string{}
for _, line := range lines {
if line == comment {
continue
}
words := strings.Split(line, " ")
lastWord := words[len(words)-1]
if s.containsDeclarationOrType(lastWord) {
renames[lastWord] = words[1]
continue
}
newLines = append(newLines, line)
}
if len(newLines[len(newLines)-1]) > 0 {
newLines = append(newLines, "")
}
newLines = append(newLines, comment)
for _, typ := range s.types {
name, ok := renames[s.prefix(typ)]
if !ok {
name = typ
}
newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ)))
}
for _, decl := range s.declarations {
name, ok := renames[s.prefix(decl)]
if !ok {
name = decl
}
newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl)))
}
newLines = append(newLines, "")
newContents := strings.Join(newLines, "\n")
return []byte(newContents)
}
func (s nodotSection) prefix(declOrType string) string {
return s.pkg + "." + declOrType
}
func (s nodotSection) containsDeclarationOrType(word string) bool {
for _, declaration := range s.declarations {
if s.prefix(declaration) == word {
return true
}
}
for _, typ := range s.types {
if s.prefix(typ) == word {
return true
}
}
return false
}
func generateNodotSections() ([]nodotSection, error) {
sections := []nodotSection{}
declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC")
if err != nil {
return nil, err
}
sections = append(sections, nodotSection{
name: "Ginkgo DSL",
pkg: "ginkgo",
declarations: declarations,
types: []string{"Done", "Benchmarker"},
})
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION")
if err != nil {
return nil, err
}
sections = append(sections, nodotSection{
name: "Gomega DSL",
pkg: "gomega",
declarations: declarations,
})
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go")
if err != nil {
return nil, err
}
sections = append(sections, nodotSection{
name: "Gomega Matchers",
pkg: "gomega",
declarations: declarations,
})
return sections, nil
}
func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) {
pkg, err := build.Import(pkgPath, ".", 0)
if err != nil {
return []string{}, err
}
declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename))
if err != nil {
return []string{}, err
}
blacklistLookup := map[string]bool{}
for _, declaration := range blacklist {
blacklistLookup[declaration] = true
}
filteredDeclarations := []string{}
for _, declaration := range declarations {
if blacklistLookup[declaration] {
continue
}
filteredDeclarations = append(filteredDeclarations, declaration)
}
return filteredDeclarations, nil
}
func getExportedDeclarationsForFile(path string) ([]string, error) {
fset := token.NewFileSet()
tree, err := parser.ParseFile(fset, path, nil, 0)
if err != nil {
return []string{}, err
}
declarations := []string{}
ast.FileExports(tree)
for _, decl := range tree.Decls {
switch x := decl.(type) {
case *ast.GenDecl:
switch s := x.Specs[0].(type) {
case *ast.ValueSpec:
declarations = append(declarations, s.Names[0].Name)
}
case *ast.FuncDecl:
declarations = append(declarations, x.Name.Name)
}
}
return declarations, nil
}

View File

@@ -1,76 +0,0 @@
package main
import (
"bufio"
"flag"
"github.com/onsi/ginkgo/ginkgo/nodot"
"io/ioutil"
"os"
"path/filepath"
"regexp"
)
func BuildNodotCommand() *Command {
return &Command{
Name: "nodot",
FlagSet: flag.NewFlagSet("bootstrap", flag.ExitOnError),
UsageCommand: "ginkgo nodot",
Usage: []string{
"Update the nodot declarations in your test suite",
"Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.",
"If you've renamed a declaration, that name will be honored and not overwritten.",
},
Command: updateNodot,
}
}
func updateNodot(args []string, additionalArgs []string) {
suiteFile, perm := findSuiteFile()
data, err := ioutil.ReadFile(suiteFile)
if err != nil {
complainAndQuit("Failed to update nodot declarations: " + err.Error())
}
content, err := nodot.ApplyNoDot(data)
if err != nil {
complainAndQuit("Failed to update nodot declarations: " + err.Error())
}
ioutil.WriteFile(suiteFile, content, perm)
goFmt(suiteFile)
}
func findSuiteFile() (string, os.FileMode) {
workingDir, err := os.Getwd()
if err != nil {
complainAndQuit("Could not find suite file for nodot: " + err.Error())
}
files, err := ioutil.ReadDir(workingDir)
if err != nil {
complainAndQuit("Could not find suite file for nodot: " + err.Error())
}
re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`)
for _, file := range files {
if file.IsDir() {
continue
}
path := filepath.Join(workingDir, file.Name())
f, err := os.Open(path)
if err != nil {
complainAndQuit("Could not find suite file for nodot: " + err.Error())
}
defer f.Close()
if re.MatchReader(bufio.NewReader(f)) {
return path, file.Mode()
}
}
complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.")
return "", 0
}

View File

@@ -1,141 +0,0 @@
package main
import (
"fmt"
"os"
"os/exec"
"regexp"
"runtime"
"strings"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/ginkgo/testsuite"
)
type Notifier struct {
commandFlags *RunWatchAndBuildCommandFlags
}
func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
return &Notifier{
commandFlags: commandFlags,
}
}
func (n *Notifier) VerifyNotificationsAreAvailable() {
if n.commandFlags.Notify {
onLinux := (runtime.GOOS == "linux")
onOSX := (runtime.GOOS == "darwin")
if onOSX {
_, err := exec.LookPath("terminal-notifier")
if err != nil {
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
OSX:
To remedy this:
brew install terminal-notifier
To learn more about terminal-notifier:
https://github.com/alloy/terminal-notifier
`)
os.Exit(1)
}
} else if onLinux {
_, err := exec.LookPath("notify-send")
if err != nil {
fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
Linux:
Download and install notify-send for your distribution
`)
os.Exit(1)
}
}
}
}
func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) {
if suitePassed {
n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName))
} else {
n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName))
}
}
func (n *Notifier) SendNotification(title string, subtitle string) {
if n.commandFlags.Notify {
onLinux := (runtime.GOOS == "linux")
onOSX := (runtime.GOOS == "darwin")
if onOSX {
_, err := exec.LookPath("terminal-notifier")
if err == nil {
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
terminal := os.Getenv("TERM_PROGRAM")
if terminal == "iTerm.app" {
args = append(args, "-activate", "com.googlecode.iterm2")
} else if terminal == "Apple_Terminal" {
args = append(args, "-activate", "com.apple.Terminal")
}
exec.Command("terminal-notifier", args...).Run()
}
} else if onLinux {
_, err := exec.LookPath("notify-send")
if err == nil {
args := []string{"-a", "ginkgo", title, subtitle}
exec.Command("notify-send", args...).Run()
}
}
}
}
func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) {
command := n.commandFlags.AfterSuiteHook
if command != "" {
// Allow for string replacement to pass input to the command
passed := "[FAIL]"
if suitePassed {
passed = "[PASS]"
}
command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1)
command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1)
// Must break command into parts
splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
parts := splitArgs.FindAllString(command, -1)
output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
if err != nil {
fmt.Println("Post-suite command failed:")
if config.DefaultReporterConfig.NoColor {
fmt.Printf("\t%s\n", output)
} else {
fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle)
}
n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook))
} else {
fmt.Println("Post-suite command succeeded:")
if config.DefaultReporterConfig.NoColor {
fmt.Printf("\t%s\n", output)
} else {
fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle)
}
}
}
}

View File

@@ -1,192 +0,0 @@
package main
import (
"flag"
"fmt"
"math/rand"
"os"
"time"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
"github.com/onsi/ginkgo/ginkgo/testrunner"
"github.com/onsi/ginkgo/types"
)
func BuildRunCommand() *Command {
commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError))
notifier := NewNotifier(commandFlags)
interruptHandler := interrupthandler.NewInterruptHandler()
runner := &SpecRunner{
commandFlags: commandFlags,
notifier: notifier,
interruptHandler: interruptHandler,
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
}
return &Command{
Name: "",
FlagSet: commandFlags.FlagSet,
UsageCommand: "ginkgo <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
Usage: []string{
"Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank).",
"Any arguments after -- will be passed to the test.",
"Accepts the following flags:",
},
Command: runner.RunSpecs,
}
}
type SpecRunner struct {
commandFlags *RunWatchAndBuildCommandFlags
notifier *Notifier
interruptHandler *interrupthandler.InterruptHandler
suiteRunner *SuiteRunner
}
func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
r.commandFlags.computeNodes()
r.notifier.VerifyNotificationsAreAvailable()
suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, true)
if len(skippedPackages) > 0 {
fmt.Println("Will skip:")
for _, skippedPackage := range skippedPackages {
fmt.Println(" " + skippedPackage)
}
}
if len(skippedPackages) > 0 && len(suites) == 0 {
fmt.Println("All tests skipped! Exiting...")
os.Exit(0)
}
if len(suites) == 0 {
complainAndQuit("Found no test suites")
}
r.ComputeSuccinctMode(len(suites))
t := time.Now()
runners := []*testrunner.TestRunner{}
for _, suite := range suites {
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, additionalArgs))
}
numSuites := 0
runResult := testrunner.PassingRunResult()
if r.commandFlags.UntilItFails {
iteration := 0
for {
r.UpdateSeed()
randomizedRunners := r.randomizeOrder(runners)
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
iteration++
if r.interruptHandler.WasInterrupted() {
break
}
if runResult.Passed {
fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration))
} else {
fmt.Printf("\nTests failed on attempt #%d\n\n", iteration)
break
}
}
} else {
randomizedRunners := r.randomizeOrder(runners)
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
}
for _, runner := range runners {
runner.CleanUp()
}
fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
if runResult.Passed {
if runResult.HasProgrammaticFocus {
fmt.Printf("Test Suite Passed\n")
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
} else {
fmt.Printf("Test Suite Passed\n")
os.Exit(0)
}
} else {
fmt.Printf("Test Suite Failed\n")
os.Exit(1)
}
}
func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
if config.DefaultReporterConfig.Verbose {
config.DefaultReporterConfig.Succinct = false
return
}
if numSuites == 1 {
return
}
if numSuites > 1 && !r.commandFlags.wasSet("succinct") {
config.DefaultReporterConfig.Succinct = true
}
}
func (r *SpecRunner) UpdateSeed() {
if !r.commandFlags.wasSet("seed") {
config.GinkgoConfig.RandomSeed = time.Now().Unix()
}
}
func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner {
if !r.commandFlags.RandomizeSuites {
return runners
}
if len(runners) <= 1 {
return runners
}
randomizedRunners := make([]*testrunner.TestRunner, len(runners))
randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed))
permutation := randomizer.Perm(len(runners))
for i, j := range permutation {
randomizedRunners[i] = runners[j]
}
return randomizedRunners
}
func orcMessage(iteration int) string {
if iteration < 10 {
return ""
} else if iteration < 30 {
return []string{
"If at first you succeed...",
"...try, try again.",
"Looking good!",
"Still good...",
"I think your tests are fine....",
"Yep, still passing",
"Here we go again...",
"Even the gophers are getting bored",
"Did you try -race?",
"Maybe you should stop now?",
"I'm getting tired...",
"What if I just made you a sandwich?",
"Hit ^C, hit ^C, please hit ^C",
"Make it stop. Please!",
"Come on! Enough is enough!",
"Dave, this conversation can serve no purpose anymore. Goodbye.",
"Just what do you think you're doing, Dave? ",
"I, Sisyphus",
"Insanity: doing the same thing over and over again and expecting different results. -Einstein",
"I guess Einstein never tried to churn butter",
}[iteration-10] + "\n"
} else {
return "No, seriously... you can probably stop now.\n"
}
}

View File

@@ -1,121 +0,0 @@
package main
import (
"flag"
"runtime"
"github.com/onsi/ginkgo/config"
)
type RunWatchAndBuildCommandFlags struct {
Recurse bool
Race bool
Cover bool
CoverPkg string
SkipPackage string
Tags string
//for run and watch commands
NumCPU int
NumCompilers int
ParallelStream bool
Notify bool
AfterSuiteHook string
AutoNodes bool
//only for run command
KeepGoing bool
UntilItFails bool
RandomizeSuites bool
//only for watch command
Depth int
FlagSet *flag.FlagSet
}
const runMode = 1
const watchMode = 2
const buildMode = 3
func NewRunCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
c := &RunWatchAndBuildCommandFlags{
FlagSet: flagSet,
}
c.flags(runMode)
return c
}
func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
c := &RunWatchAndBuildCommandFlags{
FlagSet: flagSet,
}
c.flags(watchMode)
return c
}
func NewBuildCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
c := &RunWatchAndBuildCommandFlags{
FlagSet: flagSet,
}
c.flags(buildMode)
return c
}
func (c *RunWatchAndBuildCommandFlags) wasSet(flagName string) bool {
wasSet := false
c.FlagSet.Visit(func(f *flag.Flag) {
if f.Name == flagName {
wasSet = true
}
})
return wasSet
}
func (c *RunWatchAndBuildCommandFlags) computeNodes() {
if c.wasSet("nodes") {
return
}
if c.AutoNodes {
switch n := runtime.NumCPU(); {
case n <= 4:
c.NumCPU = n
default:
c.NumCPU = n - 1
}
}
}
func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
onWindows := (runtime.GOOS == "windows")
c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively")
c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled")
c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory")
c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules")
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
if mode == runMode || mode == watchMode {
config.Flags(c.FlagSet, "", false)
c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run")
c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
if !onWindows {
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
}
c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
}
if mode == runMode {
c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running")
c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs")
c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run")
}
if mode == watchMode {
c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree")
}
}

View File

@@ -1,172 +0,0 @@
package main
import (
"fmt"
"runtime"
"sync"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
"github.com/onsi/ginkgo/ginkgo/testrunner"
"github.com/onsi/ginkgo/ginkgo/testsuite"
)
type compilationInput struct {
runner *testrunner.TestRunner
result chan compilationOutput
}
type compilationOutput struct {
runner *testrunner.TestRunner
err error
}
type SuiteRunner struct {
notifier *Notifier
interruptHandler *interrupthandler.InterruptHandler
}
func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
return &SuiteRunner{
notifier: notifier,
interruptHandler: interruptHandler,
}
}
func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
//we return this to the consumer, it will return each runner in order as it compiles
compilationOutputs := make(chan compilationOutput, len(runners))
//an array of channels - the nth runner's compilation output is sent to the nth channel in this array
//we read from these channels in order to ensure we run the suites in order
orderedCompilationOutputs := []chan compilationOutput{}
for _ = range runners {
orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
}
//we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
//we prefill the channel then close it, this ensures we compile things in the correct order
workPool := make(chan compilationInput, len(runners))
for i, runner := range runners {
workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
}
close(workPool)
//pick a reasonable numCompilers
if numCompilers == 0 {
numCompilers = runtime.NumCPU()
}
//a WaitGroup to help us wait for all compilers to shut down
wg := &sync.WaitGroup{}
wg.Add(numCompilers)
//spin up the concurrent compilers
for i := 0; i < numCompilers; i++ {
go func() {
defer wg.Done()
for input := range workPool {
if r.interruptHandler.WasInterrupted() {
return
}
if willCompile != nil {
willCompile(input.runner.Suite)
}
//We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
var err error
retries := 0
for retries <= 5 {
if r.interruptHandler.WasInterrupted() {
return
}
if err = input.runner.Compile(); err == nil {
break
}
retries++
}
input.result <- compilationOutput{input.runner, err}
}
}()
}
//read from the compilation output channels *in order* and send them to the caller
//close the compilationOutputs channel to tell the caller we're done
go func() {
defer close(compilationOutputs)
for _, orderedCompilationOutput := range orderedCompilationOutputs {
select {
case compilationOutput := <-orderedCompilationOutput:
compilationOutputs <- compilationOutput
case <-r.interruptHandler.C:
//interrupt detected, wait for the compilers to shut down then bail
//this ensure we clean up after ourselves as we don't leave any compilation processes running
wg.Wait()
return
}
}
}()
return compilationOutputs
}
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
runResult := testrunner.PassingRunResult()
compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
numSuitesThatRan := 0
suitesThatFailed := []testsuite.TestSuite{}
for compilationOutput := range compilationOutputs {
if compilationOutput.err != nil {
fmt.Print(compilationOutput.err.Error())
}
numSuitesThatRan++
suiteRunResult := testrunner.FailingRunResult()
if compilationOutput.err == nil {
suiteRunResult = compilationOutput.runner.Run()
}
r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed)
runResult = runResult.Merge(suiteRunResult)
if !suiteRunResult.Passed {
suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
if !keepGoing {
break
}
}
if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
fmt.Println("")
}
}
if keepGoing && !runResult.Passed {
r.listFailedSuites(suitesThatFailed)
}
return runResult, numSuitesThatRan
}
func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) {
fmt.Println("")
fmt.Println("There were failures detected in the following suites:")
maxPackageNameLength := 0
for _, suite := range suitesThatFailed {
if len(suite.PackageName) > maxPackageNameLength {
maxPackageNameLength = len(suite.PackageName)
}
}
packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
for _, suite := range suitesThatFailed {
if config.DefaultReporterConfig.NoColor {
fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path)
} else {
fmt.Printf("\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle)
}
}
}

View File

@@ -1,52 +0,0 @@
package testrunner
import (
"bytes"
"fmt"
"io"
"log"
"strings"
"sync"
)
type logWriter struct {
buffer *bytes.Buffer
lock *sync.Mutex
log *log.Logger
}
func newLogWriter(target io.Writer, node int) *logWriter {
return &logWriter{
buffer: &bytes.Buffer{},
lock: &sync.Mutex{},
log: log.New(target, fmt.Sprintf("[%d] ", node), 0),
}
}
func (w *logWriter) Write(data []byte) (n int, err error) {
w.lock.Lock()
defer w.lock.Unlock()
w.buffer.Write(data)
contents := w.buffer.String()
lines := strings.Split(contents, "\n")
for _, line := range lines[0 : len(lines)-1] {
w.log.Println(line)
}
w.buffer.Reset()
w.buffer.Write([]byte(lines[len(lines)-1]))
return len(data), nil
}
func (w *logWriter) Close() error {
w.lock.Lock()
defer w.lock.Unlock()
if w.buffer.Len() > 0 {
w.log.Println(w.buffer.String())
}
return nil
}

View File

@@ -1,27 +0,0 @@
package testrunner
type RunResult struct {
Passed bool
HasProgrammaticFocus bool
}
func PassingRunResult() RunResult {
return RunResult{
Passed: true,
HasProgrammaticFocus: false,
}
}
func FailingRunResult() RunResult {
return RunResult{
Passed: false,
HasProgrammaticFocus: false,
}
}
func (r RunResult) Merge(o RunResult) RunResult {
return RunResult{
Passed: r.Passed && o.Passed,
HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus,
}
}

View File

@@ -1,460 +0,0 @@
package testrunner
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/ginkgo/testsuite"
"github.com/onsi/ginkgo/internal/remote"
"github.com/onsi/ginkgo/reporters/stenographer"
"github.com/onsi/ginkgo/types"
)
type TestRunner struct {
Suite testsuite.TestSuite
compiled bool
compilationTargetPath string
numCPU int
parallelStream bool
race bool
cover bool
coverPkg string
tags string
additionalArgs []string
}
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, additionalArgs []string) *TestRunner {
runner := &TestRunner{
Suite: suite,
numCPU: numCPU,
parallelStream: parallelStream,
race: race,
cover: cover,
coverPkg: coverPkg,
tags: tags,
additionalArgs: additionalArgs,
}
if !suite.Precompiled {
dir, err := ioutil.TempDir("", "ginkgo")
if err != nil {
panic(fmt.Sprintf("coulnd't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
}
runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
}
return runner
}
func (t *TestRunner) Compile() error {
return t.CompileTo(t.compilationTargetPath)
}
func (t *TestRunner) CompileTo(path string) error {
if t.compiled {
return nil
}
if t.Suite.Precompiled {
return nil
}
args := []string{"test", "-c", "-i", "-o", path}
if t.race {
args = append(args, "-race")
}
if t.cover || t.coverPkg != "" {
args = append(args, "-cover", "-covermode=atomic")
}
if t.coverPkg != "" {
args = append(args, fmt.Sprintf("-coverpkg=%s", t.coverPkg))
}
if t.tags != "" {
args = append(args, fmt.Sprintf("-tags=%s", t.tags))
}
cmd := exec.Command("go", args...)
cmd.Dir = t.Suite.Path
output, err := cmd.CombinedOutput()
if err != nil {
fixedOutput := fixCompilationOutput(string(output), t.Suite.Path)
if len(output) > 0 {
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
}
return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
}
if fileExists(path) == false {
compiledFile := filepath.Join(t.Suite.Path, t.Suite.PackageName+".test")
if fileExists(compiledFile) {
// seems like we are on an old go version that does not support the -o flag on go test
// move the compiled test file to the desired location by hand
err = os.Rename(compiledFile, path)
if err != nil {
// We cannot move the file, perhaps because the source and destination
// are on different partitions. We can copy the file, however.
err = copyFile(compiledFile, path)
if err != nil {
return fmt.Errorf("Failed to copy compiled file: %s", err)
}
}
} else {
return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
}
}
t.compiled = true
return nil
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil || os.IsNotExist(err) == false
}
// copyFile copies the contents of the file named src to the file named
// by dst. The file will be created if it does not already exist. If the
// destination file exists, all it's contents will be replaced by the contents
// of the source file.
func copyFile(src, dst string) error {
srcInfo, err := os.Stat(src)
if err != nil {
return err
}
mode := srcInfo.Mode()
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
closeErr := out.Close()
if err == nil {
err = closeErr
}
}()
_, err = io.Copy(out, in)
if err != nil {
return err
}
err = out.Sync()
if err != nil {
return err
}
return out.Chmod(mode)
}
/*
go test -c -i spits package.test out into the cwd. there's no way to change this.
to make sure it doesn't generate conflicting .test files in the cwd, Compile() must switch the cwd to the test package.
unfortunately, this causes go test's compile output to be expressed *relative to the test package* instead of the cwd.
this makes it hard to reason about what failed, and also prevents iterm's Cmd+click from working.
fixCompilationOutput..... rewrites the output to fix the paths.
yeah......
*/
func fixCompilationOutput(output string, relToPath string) string {
re := regexp.MustCompile(`^(\S.*\.go)\:\d+\:`)
lines := strings.Split(output, "\n")
for i, line := range lines {
indices := re.FindStringSubmatchIndex(line)
if len(indices) == 0 {
continue
}
path := line[indices[2]:indices[3]]
path = filepath.Join(relToPath, path)
lines[i] = path + line[indices[3]:]
}
return strings.Join(lines, "\n")
}
func (t *TestRunner) Run() RunResult {
if t.Suite.IsGinkgo {
if t.numCPU > 1 {
if t.parallelStream {
return t.runAndStreamParallelGinkgoSuite()
} else {
return t.runParallelGinkgoSuite()
}
} else {
return t.runSerialGinkgoSuite()
}
} else {
return t.runGoTestSuite()
}
}
func (t *TestRunner) CleanUp() {
if t.Suite.Precompiled {
return
}
os.RemoveAll(filepath.Dir(t.compilationTargetPath))
}
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil)
}
func (t *TestRunner) runGoTestSuite() RunResult {
return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil)
}
func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
completions := make(chan RunResult)
writers := make([]*logWriter, t.numCPU)
server, err := remote.NewServer(t.numCPU)
if err != nil {
panic("Failed to start parallel spec server")
}
server.Start()
defer server.Close()
for cpu := 0; cpu < t.numCPU; cpu++ {
config.GinkgoConfig.ParallelNode = cpu + 1
config.GinkgoConfig.ParallelTotal = t.numCPU
config.GinkgoConfig.SyncHost = server.Address()
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
writers[cpu] = newLogWriter(os.Stdout, cpu+1)
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
server.RegisterAlive(cpu+1, func() bool {
if cmd.ProcessState == nil {
return true
}
return !cmd.ProcessState.Exited()
})
go t.run(cmd, completions)
}
res := PassingRunResult()
for cpu := 0; cpu < t.numCPU; cpu++ {
res = res.Merge(<-completions)
}
for _, writer := range writers {
writer.Close()
}
os.Stdout.Sync()
if t.cover || t.coverPkg != "" {
t.combineCoverprofiles()
}
return res
}
func (t *TestRunner) runParallelGinkgoSuite() RunResult {
result := make(chan bool)
completions := make(chan RunResult)
writers := make([]*logWriter, t.numCPU)
reports := make([]*bytes.Buffer, t.numCPU)
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
server, err := remote.NewServer(t.numCPU)
if err != nil {
panic("Failed to start parallel spec server")
}
server.RegisterReporters(aggregator)
server.Start()
defer server.Close()
for cpu := 0; cpu < t.numCPU; cpu++ {
config.GinkgoConfig.ParallelNode = cpu + 1
config.GinkgoConfig.ParallelTotal = t.numCPU
config.GinkgoConfig.SyncHost = server.Address()
config.GinkgoConfig.StreamHost = server.Address()
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
reports[cpu] = &bytes.Buffer{}
writers[cpu] = newLogWriter(reports[cpu], cpu+1)
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
server.RegisterAlive(cpu+1, func() bool {
if cmd.ProcessState == nil {
return true
}
return !cmd.ProcessState.Exited()
})
go t.run(cmd, completions)
}
res := PassingRunResult()
for cpu := 0; cpu < t.numCPU; cpu++ {
res = res.Merge(<-completions)
}
//all test processes are done, at this point
//we should be able to wait for the aggregator to tell us that it's done
select {
case <-result:
fmt.Println("")
case <-time.After(time.Second):
//the aggregator never got back to us! something must have gone wrong
fmt.Println(`
-------------------------------------------------------------------
| |
| Ginkgo timed out waiting for all parallel nodes to report back! |
| |
-------------------------------------------------------------------
`)
os.Stdout.Sync()
for _, writer := range writers {
writer.Close()
}
for _, report := range reports {
fmt.Print(report.String())
}
os.Stdout.Sync()
}
if t.cover || t.coverPkg != "" {
t.combineCoverprofiles()
}
return res
}
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
args := []string{"--test.timeout=24h"}
if t.cover || t.coverPkg != "" {
coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
if t.numCPU > 1 {
coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
}
args = append(args, coverprofile)
}
args = append(args, ginkgoArgs...)
args = append(args, t.additionalArgs...)
path := t.compilationTargetPath
if t.Suite.Precompiled {
path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
}
cmd := exec.Command(path, args...)
cmd.Dir = t.Suite.Path
cmd.Stderr = stream
cmd.Stdout = stream
return cmd
}
func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
var res RunResult
defer func() {
if completions != nil {
completions <- res
}
}()
err := cmd.Start()
if err != nil {
fmt.Printf("Failed to run test suite!\n\t%s", err.Error())
return res
}
cmd.Wait()
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
return res
}
func (t *TestRunner) combineCoverprofiles() {
profiles := []string{}
for cpu := 1; cpu <= t.numCPU; cpu++ {
coverFile := fmt.Sprintf("%s.coverprofile.%d", t.Suite.PackageName, cpu)
coverFile = filepath.Join(t.Suite.Path, coverFile)
coverProfile, err := ioutil.ReadFile(coverFile)
os.Remove(coverFile)
if err == nil {
profiles = append(profiles, string(coverProfile))
}
}
if len(profiles) != t.numCPU {
return
}
lines := map[string]int{}
lineOrder := []string{}
for i, coverProfile := range profiles {
for _, line := range strings.Split(string(coverProfile), "\n")[1:] {
if len(line) == 0 {
continue
}
components := strings.Split(line, " ")
count, _ := strconv.Atoi(components[len(components)-1])
prefix := strings.Join(components[0:len(components)-1], " ")
lines[prefix] += count
if i == 0 {
lineOrder = append(lineOrder, prefix)
}
}
}
output := []string{"mode: atomic"}
for _, line := range lineOrder {
output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
}
finalOutput := strings.Join(output, "\n")
ioutil.WriteFile(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.coverprofile", t.Suite.PackageName)), []byte(finalOutput), 0666)
}

View File

@@ -1,116 +0,0 @@
package testsuite
import (
"errors"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strings"
)
type TestSuite struct {
Path string
PackageName string
IsGinkgo bool
Precompiled bool
}
func PrecompiledTestSuite(path string) (TestSuite, error) {
info, err := os.Stat(path)
if err != nil {
return TestSuite{}, err
}
if info.IsDir() {
return TestSuite{}, errors.New("this is a directory, not a file")
}
if filepath.Ext(path) != ".test" {
return TestSuite{}, errors.New("this is not a .test binary")
}
if info.Mode()&0111 == 0 {
return TestSuite{}, errors.New("this is not executable")
}
dir := relPath(filepath.Dir(path))
packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
return TestSuite{
Path: dir,
PackageName: packageName,
IsGinkgo: true,
Precompiled: true,
}, nil
}
func SuitesInDir(dir string, recurse bool) []TestSuite {
suites := []TestSuite{}
// "This change will only be enabled if the go command is run with
// GO15VENDOREXPERIMENT=1 in its environment."
// c.f. the vendor-experiment proposal https://goo.gl/2ucMeC
vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT")
if (vendorExperiment == "1") && path.Base(dir) == "vendor" {
return suites
}
files, _ := ioutil.ReadDir(dir)
re := regexp.MustCompile(`_test\.go$`)
for _, file := range files {
if !file.IsDir() && re.Match([]byte(file.Name())) {
suites = append(suites, New(dir, files))
break
}
}
if recurse {
re = regexp.MustCompile(`^[._]`)
for _, file := range files {
if file.IsDir() && !re.Match([]byte(file.Name())) {
suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...)
}
}
}
return suites
}
func relPath(dir string) string {
dir, _ = filepath.Abs(dir)
cwd, _ := os.Getwd()
dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
dir = "." + string(filepath.Separator) + dir
return dir
}
func New(dir string, files []os.FileInfo) TestSuite {
return TestSuite{
Path: relPath(dir),
PackageName: packageNameForSuite(dir),
IsGinkgo: filesHaveGinkgoSuite(dir, files),
}
}
func packageNameForSuite(dir string) string {
path, _ := filepath.Abs(dir)
return filepath.Base(path)
}
func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool {
reTestFile := regexp.MustCompile(`_test\.go$`)
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`)
for _, file := range files {
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
contents, _ := ioutil.ReadFile(dir + "/" + file.Name())
if reGinkgo.Match(contents) {
return true
}
}
}
return false
}

View File

@@ -1,38 +0,0 @@
package main
import (
"flag"
"fmt"
"os/exec"
)
func BuildUnfocusCommand() *Command {
return &Command{
Name: "unfocus",
AltName: "blur",
FlagSet: flag.NewFlagSet("unfocus", flag.ExitOnError),
UsageCommand: "ginkgo unfocus (or ginkgo blur)",
Usage: []string{
"Recursively unfocuses any focused tests under the current directory",
},
Command: unfocusSpecs,
}
}
func unfocusSpecs([]string, []string) {
unfocus("Describe")
unfocus("Context")
unfocus("It")
unfocus("Measure")
unfocus("DescribeTable")
unfocus("Entry")
}
func unfocus(component string) {
fmt.Printf("Removing F%s...\n", component)
cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", ".")
out, _ := cmd.CombinedOutput()
if string(out) != "" {
println(string(out))
}
}

View File

@@ -1,23 +0,0 @@
package main
import (
"flag"
"fmt"
"github.com/onsi/ginkgo/config"
)
func BuildVersionCommand() *Command {
return &Command{
Name: "version",
FlagSet: flag.NewFlagSet("version", flag.ExitOnError),
UsageCommand: "ginkgo version",
Usage: []string{
"Print Ginkgo's version",
},
Command: printVersion,
}
}
func printVersion([]string, []string) {
fmt.Printf("Ginkgo Version %s\n", config.VERSION)
}

View File

@@ -1,22 +0,0 @@
package watch
import "sort"
type Delta struct {
ModifiedPackages []string
NewSuites []*Suite
RemovedSuites []*Suite
modifiedSuites []*Suite
}
type DescendingByDelta []*Suite
func (a DescendingByDelta) Len() int { return len(a) }
func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
func (d Delta) ModifiedSuites() []*Suite {
sort.Sort(DescendingByDelta(d.modifiedSuites))
return d.modifiedSuites
}

View File

@@ -1,71 +0,0 @@
package watch
import (
"fmt"
"github.com/onsi/ginkgo/ginkgo/testsuite"
)
type SuiteErrors map[testsuite.TestSuite]error
type DeltaTracker struct {
maxDepth int
suites map[string]*Suite
packageHashes *PackageHashes
}
func NewDeltaTracker(maxDepth int) *DeltaTracker {
return &DeltaTracker{
maxDepth: maxDepth,
packageHashes: NewPackageHashes(),
suites: map[string]*Suite{},
}
}
func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) {
errors = SuiteErrors{}
delta.ModifiedPackages = d.packageHashes.CheckForChanges()
providedSuitePaths := map[string]bool{}
for _, suite := range suites {
providedSuitePaths[suite.Path] = true
}
d.packageHashes.StartTrackingUsage()
for _, suite := range d.suites {
if providedSuitePaths[suite.Suite.Path] {
if suite.Delta() > 0 {
delta.modifiedSuites = append(delta.modifiedSuites, suite)
}
} else {
delta.RemovedSuites = append(delta.RemovedSuites, suite)
}
}
d.packageHashes.StopTrackingUsageAndPrune()
for _, suite := range suites {
_, ok := d.suites[suite.Path]
if !ok {
s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
if err != nil {
errors[suite] = err
continue
}
d.suites[suite.Path] = s
delta.NewSuites = append(delta.NewSuites, s)
}
}
return delta, errors
}
func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error {
s, ok := d.suites[suite.Path]
if !ok {
return fmt.Errorf("unknown suite %s", suite.Path)
}
return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
}

View File

@@ -1,91 +0,0 @@
package watch
import (
"go/build"
"regexp"
)
var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
type Dependencies struct {
deps map[string]int
}
func NewDependencies(path string, maxDepth int) (Dependencies, error) {
d := Dependencies{
deps: map[string]int{},
}
if maxDepth == 0 {
return d, nil
}
err := d.seedWithDepsForPackageAtPath(path)
if err != nil {
return d, err
}
for depth := 1; depth < maxDepth; depth++ {
n := len(d.deps)
d.addDepsForDepth(depth)
if n == len(d.deps) {
break
}
}
return d, nil
}
func (d Dependencies) Dependencies() map[string]int {
return d.deps
}
func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
pkg, err := build.ImportDir(path, 0)
if err != nil {
return err
}
d.resolveAndAdd(pkg.Imports, 1)
d.resolveAndAdd(pkg.TestImports, 1)
d.resolveAndAdd(pkg.XTestImports, 1)
delete(d.deps, pkg.Dir)
return nil
}
func (d Dependencies) addDepsForDepth(depth int) {
for dep, depDepth := range d.deps {
if depDepth == depth {
d.addDepsForDep(dep, depth+1)
}
}
}
func (d Dependencies) addDepsForDep(dep string, depth int) {
pkg, err := build.ImportDir(dep, 0)
if err != nil {
println(err.Error())
return
}
d.resolveAndAdd(pkg.Imports, depth)
}
func (d Dependencies) resolveAndAdd(deps []string, depth int) {
for _, dep := range deps {
pkg, err := build.Import(dep, ".", 0)
if err != nil {
continue
}
if pkg.Goroot == false && !ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) {
d.addDepIfNotPresent(pkg.Dir, depth)
}
}
}
func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
_, ok := d.deps[dep]
if !ok {
d.deps[dep] = depth
}
}

View File

@@ -1,103 +0,0 @@
package watch
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"time"
)
var goRegExp = regexp.MustCompile(`\.go$`)
var goTestRegExp = regexp.MustCompile(`_test\.go$`)
type PackageHash struct {
CodeModifiedTime time.Time
TestModifiedTime time.Time
Deleted bool
path string
codeHash string
testHash string
}
func NewPackageHash(path string) *PackageHash {
p := &PackageHash{
path: path,
}
p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
return p
}
func (p *PackageHash) CheckForChanges() bool {
codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
if deleted {
if p.Deleted == false {
t := time.Now()
p.CodeModifiedTime = t
p.TestModifiedTime = t
}
p.Deleted = true
return true
}
modified := false
p.Deleted = false
if p.codeHash != codeHash {
p.CodeModifiedTime = codeModifiedTime
modified = true
}
if p.testHash != testHash {
p.TestModifiedTime = testModifiedTime
modified = true
}
p.codeHash = codeHash
p.testHash = testHash
return modified
}
func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
infos, err := ioutil.ReadDir(p.path)
if err != nil {
deleted = true
return
}
for _, info := range infos {
if info.IsDir() {
continue
}
if goTestRegExp.Match([]byte(info.Name())) {
testHash += p.hashForFileInfo(info)
if info.ModTime().After(testModifiedTime) {
testModifiedTime = info.ModTime()
}
continue
}
if goRegExp.Match([]byte(info.Name())) {
codeHash += p.hashForFileInfo(info)
if info.ModTime().After(codeModifiedTime) {
codeModifiedTime = info.ModTime()
}
}
}
testHash += codeHash
if codeModifiedTime.After(testModifiedTime) {
testModifiedTime = codeModifiedTime
}
return
}
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
}

View File

@@ -1,82 +0,0 @@
package watch
import (
"path/filepath"
"sync"
)
type PackageHashes struct {
PackageHashes map[string]*PackageHash
usedPaths map[string]bool
lock *sync.Mutex
}
func NewPackageHashes() *PackageHashes {
return &PackageHashes{
PackageHashes: map[string]*PackageHash{},
usedPaths: nil,
lock: &sync.Mutex{},
}
}
func (p *PackageHashes) CheckForChanges() []string {
p.lock.Lock()
defer p.lock.Unlock()
modified := []string{}
for _, packageHash := range p.PackageHashes {
if packageHash.CheckForChanges() {
modified = append(modified, packageHash.path)
}
}
return modified
}
func (p *PackageHashes) Add(path string) *PackageHash {
p.lock.Lock()
defer p.lock.Unlock()
path, _ = filepath.Abs(path)
_, ok := p.PackageHashes[path]
if !ok {
p.PackageHashes[path] = NewPackageHash(path)
}
if p.usedPaths != nil {
p.usedPaths[path] = true
}
return p.PackageHashes[path]
}
func (p *PackageHashes) Get(path string) *PackageHash {
p.lock.Lock()
defer p.lock.Unlock()
path, _ = filepath.Abs(path)
if p.usedPaths != nil {
p.usedPaths[path] = true
}
return p.PackageHashes[path]
}
func (p *PackageHashes) StartTrackingUsage() {
p.lock.Lock()
defer p.lock.Unlock()
p.usedPaths = map[string]bool{}
}
func (p *PackageHashes) StopTrackingUsageAndPrune() {
p.lock.Lock()
defer p.lock.Unlock()
for path := range p.PackageHashes {
if !p.usedPaths[path] {
delete(p.PackageHashes, path)
}
}
p.usedPaths = nil
}

View File

@@ -1,87 +0,0 @@
package watch
import (
"fmt"
"math"
"time"
"github.com/onsi/ginkgo/ginkgo/testsuite"
)
type Suite struct {
Suite testsuite.TestSuite
RunTime time.Time
Dependencies Dependencies
sharedPackageHashes *PackageHashes
}
func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
deps, err := NewDependencies(suite.Path, maxDepth)
if err != nil {
return nil, err
}
sharedPackageHashes.Add(suite.Path)
for dep := range deps.Dependencies() {
sharedPackageHashes.Add(dep)
}
return &Suite{
Suite: suite,
Dependencies: deps,
sharedPackageHashes: sharedPackageHashes,
}, nil
}
func (s *Suite) Delta() float64 {
delta := s.delta(s.Suite.Path, true, 0) * 1000
for dep, depth := range s.Dependencies.Dependencies() {
delta += s.delta(dep, false, depth)
}
return delta
}
func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
s.RunTime = time.Now()
deps, err := NewDependencies(s.Suite.Path, maxDepth)
if err != nil {
return err
}
s.sharedPackageHashes.Add(s.Suite.Path)
for dep := range deps.Dependencies() {
s.sharedPackageHashes.Add(dep)
}
s.Dependencies = deps
return nil
}
func (s *Suite) Description() string {
numDeps := len(s.Dependencies.Dependencies())
pluralizer := "ies"
if numDeps == 1 {
pluralizer = "y"
}
return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
}
func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
}
func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
packageHash := s.sharedPackageHashes.Get(packagePath)
var modifiedTime time.Time
if includeTests {
modifiedTime = packageHash.TestModifiedTime
} else {
modifiedTime = packageHash.CodeModifiedTime
}
return modifiedTime.Sub(s.RunTime)
}

View File

@@ -1,172 +0,0 @@
package main
import (
"flag"
"fmt"
"time"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
"github.com/onsi/ginkgo/ginkgo/testrunner"
"github.com/onsi/ginkgo/ginkgo/testsuite"
"github.com/onsi/ginkgo/ginkgo/watch"
)
func BuildWatchCommand() *Command {
commandFlags := NewWatchCommandFlags(flag.NewFlagSet("watch", flag.ExitOnError))
interruptHandler := interrupthandler.NewInterruptHandler()
notifier := NewNotifier(commandFlags)
watcher := &SpecWatcher{
commandFlags: commandFlags,
notifier: notifier,
interruptHandler: interruptHandler,
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
}
return &Command{
Name: "watch",
FlagSet: commandFlags.FlagSet,
UsageCommand: "ginkgo watch <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
Usage: []string{
"Watches the tests in the passed in <PACKAGES> and runs them when changes occur.",
"Any arguments after -- will be passed to the test.",
},
Command: watcher.WatchSpecs,
SuppressFlagDocumentation: true,
FlagDocSubstitute: []string{
"Accepts all the flags that the ginkgo command accepts except for --keepGoing and --untilItFails",
},
}
}
type SpecWatcher struct {
commandFlags *RunWatchAndBuildCommandFlags
notifier *Notifier
interruptHandler *interrupthandler.InterruptHandler
suiteRunner *SuiteRunner
}
func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
w.commandFlags.computeNodes()
w.notifier.VerifyNotificationsAreAvailable()
w.WatchSuites(args, additionalArgs)
}
func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalArgs []string) []*testrunner.TestRunner {
runners := []*testrunner.TestRunner{}
for _, suite := range suites {
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, additionalArgs))
}
return runners
}
func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) {
suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
if len(suites) == 0 {
complainAndQuit("Found no test suites")
}
fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth)
deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth)
delta, errors := deltaTracker.Delta(suites)
fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
for _, suite := range delta.NewSuites {
fmt.Println(" " + suite.Description())
}
for suite, err := range errors {
fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
}
if len(suites) == 1 {
runners := w.runnersForSuites(suites, additionalArgs)
w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, nil)
runners[0].CleanUp()
}
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
delta, _ := deltaTracker.Delta(suites)
suitesToRun := []testsuite.TestSuite{}
if len(delta.NewSuites) > 0 {
fmt.Printf(greenColor+"Detected %d new %s:\n"+defaultStyle, len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
for _, suite := range delta.NewSuites {
suitesToRun = append(suitesToRun, suite.Suite)
fmt.Println(" " + suite.Description())
}
}
modifiedSuites := delta.ModifiedSuites()
if len(modifiedSuites) > 0 {
fmt.Println(greenColor + "\nDetected changes in:" + defaultStyle)
for _, pkg := range delta.ModifiedPackages {
fmt.Println(" " + pkg)
}
fmt.Printf(greenColor+"Will run %d %s:\n"+defaultStyle, len(modifiedSuites), pluralizedWord("suite", "suites", len(modifiedSuites)))
for _, suite := range modifiedSuites {
suitesToRun = append(suitesToRun, suite.Suite)
fmt.Println(" " + suite.Description())
}
fmt.Println("")
}
if len(suitesToRun) > 0 {
w.UpdateSeed()
w.ComputeSuccinctMode(len(suitesToRun))
runners := w.runnersForSuites(suitesToRun, additionalArgs)
result, _ := w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, func(suite testsuite.TestSuite) {
deltaTracker.WillRun(suite)
})
for _, runner := range runners {
runner.CleanUp()
}
if !w.interruptHandler.WasInterrupted() {
color := redColor
if result.Passed {
color = greenColor
}
fmt.Println(color + "\nDone. Resuming watch..." + defaultStyle)
}
}
case <-w.interruptHandler.C:
return
}
}
}
func (w *SpecWatcher) ComputeSuccinctMode(numSuites int) {
if config.DefaultReporterConfig.Verbose {
config.DefaultReporterConfig.Succinct = false
return
}
if w.commandFlags.wasSet("succinct") {
return
}
if numSuites == 1 {
config.DefaultReporterConfig.Succinct = false
}
if numSuites > 1 {
config.DefaultReporterConfig.Succinct = true
}
}
func (w *SpecWatcher) UpdateSeed() {
if !w.commandFlags.wasSet("seed") {
config.GinkgoConfig.RandomSeed = time.Now().Unix()
}
}

View File

@@ -1 +0,0 @@
package integration

View File

@@ -1,229 +0,0 @@
/*
Package gbytes provides a buffer that supports incrementally detecting input.
You use gbytes.Buffer with the gbytes.Say matcher. When Say finds a match, it fastforwards the buffer's read cursor to the end of that match.
Subsequent matches against the buffer will only operate against data that appears *after* the read cursor.
The read cursor is an opaque implementation detail that you cannot access. You should use the Say matcher to sift through the buffer. You can always
access the entire buffer's contents with Contents().
*/
package gbytes
import (
"errors"
"fmt"
"io"
"regexp"
"sync"
"time"
)
/*
gbytes.Buffer implements an io.Writer and can be used with the gbytes.Say matcher.
You should only use a gbytes.Buffer in test code. It stores all writes in an in-memory buffer - behavior that is inappropriate for production code!
*/
type Buffer struct {
contents []byte
readCursor uint64
lock *sync.Mutex
detectCloser chan interface{}
closed bool
}
/*
NewBuffer returns a new gbytes.Buffer
*/
func NewBuffer() *Buffer {
return &Buffer{
lock: &sync.Mutex{},
}
}
/*
BufferWithBytes returns a new gbytes.Buffer seeded with the passed in bytes
*/
func BufferWithBytes(bytes []byte) *Buffer {
return &Buffer{
lock: &sync.Mutex{},
contents: bytes,
}
}
/*
Write implements the io.Writer interface
*/
func (b *Buffer) Write(p []byte) (n int, err error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.closed {
return 0, errors.New("attempt to write to closed buffer")
}
b.contents = append(b.contents, p...)
return len(p), nil
}
/*
Read implements the io.Reader interface. It advances the
cursor as it reads.
Returns an error if called after Close.
*/
func (b *Buffer) Read(d []byte) (int, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.closed {
return 0, errors.New("attempt to read from closed buffer")
}
if uint64(len(b.contents)) <= b.readCursor {
return 0, io.EOF
}
n := copy(d, b.contents[b.readCursor:])
b.readCursor += uint64(n)
return n, nil
}
/*
Close signifies that the buffer will no longer be written to
*/
func (b *Buffer) Close() error {
b.lock.Lock()
defer b.lock.Unlock()
b.closed = true
return nil
}
/*
Closed returns true if the buffer has been closed
*/
func (b *Buffer) Closed() bool {
b.lock.Lock()
defer b.lock.Unlock()
return b.closed
}
/*
Contents returns all data ever written to the buffer.
*/
func (b *Buffer) Contents() []byte {
b.lock.Lock()
defer b.lock.Unlock()
contents := make([]byte, len(b.contents))
copy(contents, b.contents)
return contents
}
/*
Detect takes a regular expression and returns a channel.
The channel will receive true the first time data matching the regular expression is written to the buffer.
The channel is subsequently closed and the buffer's read-cursor is fast-forwarded to just after the matching region.
You typically don't need to use Detect and should use the ghttp.Say matcher instead. Detect is useful, however, in cases where your code must
be branch and handle different outputs written to the buffer.
For example, consider a buffer hooked up to the stdout of a client library. You may (or may not, depending on state outside of your control) need to authenticate the client library.
You could do something like:
select {
case <-buffer.Detect("You are not logged in"):
//log in
case <-buffer.Detect("Success"):
//carry on
case <-time.After(time.Second):
//welp
}
buffer.CancelDetects()
You should always call CancelDetects after using Detect. This will close any channels that have not detected and clean up the goroutines that were spawned to support them.
Finally, you can pass detect a format string followed by variadic arguments. This will construct the regexp using fmt.Sprintf.
*/
func (b *Buffer) Detect(desired string, args ...interface{}) chan bool {
formattedRegexp := desired
if len(args) > 0 {
formattedRegexp = fmt.Sprintf(desired, args...)
}
re := regexp.MustCompile(formattedRegexp)
b.lock.Lock()
defer b.lock.Unlock()
if b.detectCloser == nil {
b.detectCloser = make(chan interface{})
}
closer := b.detectCloser
response := make(chan bool)
go func() {
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
defer close(response)
for {
select {
case <-ticker.C:
b.lock.Lock()
data, cursor := b.contents[b.readCursor:], b.readCursor
loc := re.FindIndex(data)
b.lock.Unlock()
if loc != nil {
response <- true
b.lock.Lock()
newCursorPosition := cursor + uint64(loc[1])
if newCursorPosition >= b.readCursor {
b.readCursor = newCursorPosition
}
b.lock.Unlock()
return
}
case <-closer:
return
}
}
}()
return response
}
/*
CancelDetects cancels any pending detects and cleans up their goroutines. You should always call this when you're done with a set of Detect channels.
*/
func (b *Buffer) CancelDetects() {
b.lock.Lock()
defer b.lock.Unlock()
close(b.detectCloser)
b.detectCloser = nil
}
func (b *Buffer) didSay(re *regexp.Regexp) (bool, []byte) {
b.lock.Lock()
defer b.lock.Unlock()
unreadBytes := b.contents[b.readCursor:]
copyOfUnreadBytes := make([]byte, len(unreadBytes))
copy(copyOfUnreadBytes, unreadBytes)
loc := re.FindIndex(unreadBytes)
if loc != nil {
b.readCursor += uint64(loc[1])
return true, copyOfUnreadBytes
} else {
return false, copyOfUnreadBytes
}
}

View File

@@ -1,105 +0,0 @@
package gbytes
import (
"fmt"
"regexp"
"github.com/onsi/gomega/format"
)
//Objects satisfying the BufferProvider can be used with the Say matcher.
type BufferProvider interface {
Buffer() *Buffer
}
/*
Say is a Gomega matcher that operates on gbytes.Buffers:
Ω(buffer).Should(Say("something"))
will succeed if the unread portion of the buffer matches the regular expression "something".
When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match.
Thus, subsequent calls to Say will only match against the unread portion of the buffer
Say pairs very well with Eventually. To asser that a buffer eventually receives data matching "[123]-star" within 3 seconds you can:
Eventually(buffer, 3).Should(Say("[123]-star"))
Ditto with consistently. To assert that a buffer does not receive data matching "never-see-this" for 1 second you can:
Consistently(buffer, 1).ShouldNot(Say("never-see-this"))
In addition to bytes.Buffers, Say can operate on objects that implement the gbytes.BufferProvider interface.
In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer()
If the buffer is closed, the Say matcher will tell Eventually to abort.
*/
func Say(expected string, args ...interface{}) *sayMatcher {
formattedRegexp := expected
if len(args) > 0 {
formattedRegexp = fmt.Sprintf(expected, args...)
}
return &sayMatcher{
re: regexp.MustCompile(formattedRegexp),
}
}
type sayMatcher struct {
re *regexp.Regexp
receivedSayings []byte
}
func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) {
var buffer *Buffer
switch x := actual.(type) {
case *Buffer:
buffer = x
case BufferProvider:
buffer = x.Buffer()
default:
return nil, false
}
return buffer, true
}
func (m *sayMatcher) Match(actual interface{}) (success bool, err error) {
buffer, ok := m.buffer(actual)
if !ok {
return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider. Got:\n%s", format.Object(actual, 1))
}
didSay, sayings := buffer.didSay(m.re)
m.receivedSayings = sayings
return didSay, nil
}
func (m *sayMatcher) FailureMessage(actual interface{}) (message string) {
return fmt.Sprintf(
"Got stuck at:\n%s\nWaiting for:\n%s",
format.IndentString(string(m.receivedSayings), 1),
format.IndentString(m.re.String(), 1),
)
}
func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return fmt.Sprintf(
"Saw:\n%s\nWhich matches the unexpected:\n%s",
format.IndentString(string(m.receivedSayings), 1),
format.IndentString(m.re.String(), 1),
)
}
func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
switch x := actual.(type) {
case *Buffer:
return !x.Closed()
case BufferProvider:
return !x.Buffer().Closed()
default:
return true
}
}

View File

@@ -1,78 +0,0 @@
package gexec
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
)
var tmpDir string
/*
Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory.
A path pointing to this binary is returned.
Build uses the $GOPATH set in your environment. It passes the variadic args on to `go build`.
*/
func Build(packagePath string, args ...string) (compiledPath string, err error) {
return BuildIn(os.Getenv("GOPATH"), packagePath, args...)
}
/*
BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument).
*/
func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) {
tmpDir, err := temporaryDirectory()
if err != nil {
return "", err
}
if len(gopath) == 0 {
return "", errors.New("$GOPATH not provided when building " + packagePath)
}
executable := filepath.Join(tmpDir, path.Base(packagePath))
if runtime.GOOS == "windows" {
executable = executable + ".exe"
}
cmdArgs := append([]string{"build"}, args...)
cmdArgs = append(cmdArgs, "-o", executable, packagePath)
build := exec.Command("go", cmdArgs...)
build.Env = append([]string{"GOPATH=" + gopath}, os.Environ()...)
output, err := build.CombinedOutput()
if err != nil {
return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output))
}
return executable, nil
}
/*
You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by
gexec. In Ginkgo this is typically done in an AfterSuite callback.
*/
func CleanupBuildArtifacts() {
if tmpDir != "" {
os.RemoveAll(tmpDir)
}
}
func temporaryDirectory() (string, error) {
var err error
if tmpDir == "" {
tmpDir, err = ioutil.TempDir("", "gexec_artifacts")
if err != nil {
return "", err
}
}
return ioutil.TempDir(tmpDir, "g")
}

View File

@@ -1,88 +0,0 @@
package gexec
import (
"fmt"
"github.com/onsi/gomega/format"
)
/*
The Exit matcher operates on a session:
Ω(session).Should(Exit(<optional status code>))
Exit passes if the session has already exited.
If no status code is provided, then Exit will succeed if the session has exited regardless of exit code.
Otherwise, Exit will only succeed if the process has exited with the provided status code.
Note that the process must have already exited. To wait for a process to exit, use Eventually:
Eventually(session, 3).Should(Exit(0))
*/
func Exit(optionalExitCode ...int) *exitMatcher {
exitCode := -1
if len(optionalExitCode) > 0 {
exitCode = optionalExitCode[0]
}
return &exitMatcher{
exitCode: exitCode,
}
}
type exitMatcher struct {
exitCode int
didExit bool
actualExitCode int
}
type Exiter interface {
ExitCode() int
}
func (m *exitMatcher) Match(actual interface{}) (success bool, err error) {
exiter, ok := actual.(Exiter)
if !ok {
return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1))
}
m.actualExitCode = exiter.ExitCode()
if m.actualExitCode == -1 {
return false, nil
}
if m.exitCode == -1 {
return true, nil
}
return m.exitCode == m.actualExitCode, nil
}
func (m *exitMatcher) FailureMessage(actual interface{}) (message string) {
if m.actualExitCode == -1 {
return "Expected process to exit. It did not."
} else {
return format.Message(m.actualExitCode, "to match exit code:", m.exitCode)
}
}
func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) {
if m.actualExitCode == -1 {
return "you really shouldn't be able to see this!"
} else {
if m.exitCode == -1 {
return "Expected process not to exit. It did."
} else {
return format.Message(m.actualExitCode, "not to match exit code:", m.exitCode)
}
}
}
func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
session, ok := actual.(*Session)
if ok {
return session.ExitCode() == -1
}
return true
}

View File

@@ -1,53 +0,0 @@
package gexec
import (
"io"
"sync"
)
/*
PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line.
This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each
session by passing in a PrefixedWriter:
gexec.Start(cmd, NewPrefixedWriter("[my-cmd] ", GinkgoWriter), NewPrefixedWriter("[my-cmd] ", GinkgoWriter))
*/
type PrefixedWriter struct {
prefix []byte
writer io.Writer
lock *sync.Mutex
atStartOfLine bool
}
func NewPrefixedWriter(prefix string, writer io.Writer) *PrefixedWriter {
return &PrefixedWriter{
prefix: []byte(prefix),
writer: writer,
lock: &sync.Mutex{},
atStartOfLine: true,
}
}
func (w *PrefixedWriter) Write(b []byte) (int, error) {
w.lock.Lock()
defer w.lock.Unlock()
toWrite := []byte{}
for _, c := range b {
if w.atStartOfLine {
toWrite = append(toWrite, w.prefix...)
}
toWrite = append(toWrite, c)
w.atStartOfLine = c == '\n'
}
_, err := w.writer.Write(toWrite)
if err != nil {
return 0, err
}
return len(b), nil
}

View File

@@ -1,214 +0,0 @@
/*
Package gexec provides support for testing external processes.
*/
package gexec
import (
"io"
"os"
"os/exec"
"reflect"
"sync"
"syscall"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
)
const INVALID_EXIT_CODE = 254
type Session struct {
//The wrapped command
Command *exec.Cmd
//A *gbytes.Buffer connected to the command's stdout
Out *gbytes.Buffer
//A *gbytes.Buffer connected to the command's stderr
Err *gbytes.Buffer
//A channel that will close when the command exits
Exited <-chan struct{}
lock *sync.Mutex
exitCode int
}
/*
Start starts the passed-in *exec.Cmd command. It wraps the command in a *gexec.Session.
The session pipes the command's stdout and stderr to two *gbytes.Buffers available as properties on the session: session.Out and session.Err.
These buffers can be used with the gbytes.Say matcher to match against unread output:
Ω(session.Out).Should(gbytes.Say("foo-out"))
Ω(session.Err).Should(gbytes.Say("foo-err"))
In addition, Session satisfies the gbytes.BufferProvider interface and provides the stdout *gbytes.Buffer. This allows you to replace the first line, above, with:
Ω(session).Should(gbytes.Say("foo-out"))
When outWriter and/or errWriter are non-nil, the session will pipe stdout and/or stderr output both into the session *gybtes.Buffers and to the passed-in outWriter/errWriter.
This is useful for capturing the process's output or logging it to screen. In particular, when using Ginkgo it can be convenient to direct output to the GinkgoWriter:
session, err := Start(command, GinkgoWriter, GinkgoWriter)
This will log output when running tests in verbose mode, but - otherwise - will only log output when a test fails.
The session wrapper is responsible for waiting on the *exec.Cmd command. You *should not* call command.Wait() yourself.
Instead, to assert that the command has exited you can use the gexec.Exit matcher:
Ω(session).Should(gexec.Exit())
When the session exits it closes the stdout and stderr gbytes buffers. This will short circuit any
Eventuallys waiting fo the buffers to Say something.
*/
func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Session, error) {
exited := make(chan struct{})
session := &Session{
Command: command,
Out: gbytes.NewBuffer(),
Err: gbytes.NewBuffer(),
Exited: exited,
lock: &sync.Mutex{},
exitCode: -1,
}
var commandOut, commandErr io.Writer
commandOut, commandErr = session.Out, session.Err
if outWriter != nil && !reflect.ValueOf(outWriter).IsNil() {
commandOut = io.MultiWriter(commandOut, outWriter)
}
if errWriter != nil && !reflect.ValueOf(errWriter).IsNil() {
commandErr = io.MultiWriter(commandErr, errWriter)
}
command.Stdout = commandOut
command.Stderr = commandErr
err := command.Start()
if err == nil {
go session.monitorForExit(exited)
}
return session, err
}
/*
Buffer implements the gbytes.BufferProvider interface and returns s.Out
This allows you to make gbytes.Say matcher assertions against stdout without having to reference .Out:
Eventually(session).Should(gbytes.Say("foo"))
*/
func (s *Session) Buffer() *gbytes.Buffer {
return s.Out
}
/*
ExitCode returns the wrapped command's exit code. If the command hasn't exited yet, ExitCode returns -1.
To assert that the command has exited it is more convenient to use the Exit matcher:
Eventually(s).Should(gexec.Exit())
When the process exits because it has received a particular signal, the exit code will be 128+signal-value
(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html)
*/
func (s *Session) ExitCode() int {
s.lock.Lock()
defer s.lock.Unlock()
return s.exitCode
}
/*
Wait waits until the wrapped command exits. It can be passed an optional timeout.
If the command does not exit within the timeout, Wait will trigger a test failure.
Wait returns the session, making it possible to chain:
session.Wait().Out.Contents()
will wait for the command to exit then return the entirety of Out's contents.
Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does.
*/
func (s *Session) Wait(timeout ...interface{}) *Session {
EventuallyWithOffset(1, s, timeout...).Should(Exit())
return s
}
/*
Kill sends the running command a SIGKILL signal. It does not wait for the process to exit.
If the command has already exited, Kill returns silently.
The session is returned to enable chaining.
*/
func (s *Session) Kill() *Session {
if s.ExitCode() != -1 {
return s
}
s.Command.Process.Kill()
return s
}
/*
Interrupt sends the running command a SIGINT signal. It does not wait for the process to exit.
If the command has already exited, Interrupt returns silently.
The session is returned to enable chaining.
*/
func (s *Session) Interrupt() *Session {
return s.Signal(syscall.SIGINT)
}
/*
Terminate sends the running command a SIGTERM signal. It does not wait for the process to exit.
If the command has already exited, Terminate returns silently.
The session is returned to enable chaining.
*/
func (s *Session) Terminate() *Session {
return s.Signal(syscall.SIGTERM)
}
/*
Terminate sends the running command the passed in signal. It does not wait for the process to exit.
If the command has already exited, Signal returns silently.
The session is returned to enable chaining.
*/
func (s *Session) Signal(signal os.Signal) *Session {
if s.ExitCode() != -1 {
return s
}
s.Command.Process.Signal(signal)
return s
}
func (s *Session) monitorForExit(exited chan<- struct{}) {
err := s.Command.Wait()
s.lock.Lock()
s.Out.Close()
s.Err.Close()
status := s.Command.ProcessState.Sys().(syscall.WaitStatus)
if status.Signaled() {
s.exitCode = 128 + int(status.Signal())
} else {
exitStatus := status.ExitStatus()
if exitStatus == -1 && err != nil {
s.exitCode = INVALID_EXIT_CODE
}
s.exitCode = exitStatus
}
s.lock.Unlock()
close(exited)
}

View File

@@ -1,3 +0,0 @@
package protobuf
//go:generate protoc --go_out=. simple_message.proto

View File

@@ -1,55 +0,0 @@
// Code generated by protoc-gen-go.
// source: simple_message.proto
// DO NOT EDIT!
/*
Package protobuf is a generated protocol buffer package.
It is generated from these files:
simple_message.proto
It has these top-level messages:
SimpleMessage
*/
package protobuf
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type SimpleMessage struct {
Description *string `protobuf:"bytes,1,req,name=description" json:"description,omitempty"`
Id *int32 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
Metadata *string `protobuf:"bytes,3,opt,name=metadata" json:"metadata,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *SimpleMessage) Reset() { *m = SimpleMessage{} }
func (m *SimpleMessage) String() string { return proto.CompactTextString(m) }
func (*SimpleMessage) ProtoMessage() {}
func (m *SimpleMessage) GetDescription() string {
if m != nil && m.Description != nil {
return *m.Description
}
return ""
}
func (m *SimpleMessage) GetId() int32 {
if m != nil && m.Id != nil {
return *m.Id
}
return 0
}
func (m *SimpleMessage) GetMetadata() string {
if m != nil && m.Metadata != nil {
return *m.Metadata
}
return ""
}

View File

@@ -1,9 +0,0 @@
syntax = "proto2";
package protobuf;
message SimpleMessage {
required string description = 1;
required int32 id = 2;
optional string metadata = 3;
}

View File

@@ -1,23 +0,0 @@
package fakematcher
import "fmt"
type FakeMatcher struct {
ReceivedActual interface{}
MatchesToReturn bool
ErrToReturn error
}
func (matcher *FakeMatcher) Match(actual interface{}) (bool, error) {
matcher.ReceivedActual = actual
return matcher.MatchesToReturn, matcher.ErrToReturn
}
func (matcher *FakeMatcher) FailureMessage(actual interface{}) string {
return fmt.Sprintf("positive: %v", actual)
}
func (matcher *FakeMatcher) NegatedFailureMessage(actual interface{}) string {
return fmt.Sprintf("negative: %v", actual)
}

View File

@@ -1,20 +0,0 @@
Copyright (c) 2014 Amit Kumar Gupta
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,19 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.5
package ctxhttp
import "net/http"
func canceler(client *http.Client, req *http.Request) func() {
// TODO(djd): Respect any existing value of req.Cancel.
ch := make(chan struct{})
req.Cancel = ch
return func() {
close(ch)
}
}

View File

@@ -1,23 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.5
package ctxhttp
import "net/http"
type requestCanceler interface {
CancelRequest(*http.Request)
}
func canceler(client *http.Client, req *http.Request) func() {
rc, ok := client.Transport.(requestCanceler)
if !ok {
return func() {}
}
return func() {
rc.CancelRequest(req)
}
}

View File

@@ -1,140 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
package ctxhttp
import (
"io"
"net/http"
"net/url"
"strings"
"golang.org/x/net/context"
)
func nop() {}
var (
testHookContextDoneBeforeHeaders = nop
testHookDoReturned = nop
testHookDidBodyClose = nop
)
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
// If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned.
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
// Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
cancel := canceler(client, req)
type responseAndError struct {
resp *http.Response
err error
}
result := make(chan responseAndError, 1)
go func() {
resp, err := client.Do(req)
testHookDoReturned()
result <- responseAndError{resp, err}
}()
var resp *http.Response
select {
case <-ctx.Done():
testHookContextDoneBeforeHeaders()
cancel()
// Clean up after the goroutine calling client.Do:
go func() {
if r := <-result; r.resp != nil {
testHookDidBodyClose()
r.resp.Body.Close()
}
}()
return nil, ctx.Err()
case r := <-result:
var err error
resp, err = r.resp, r.err
if err != nil {
return resp, err
}
}
c := make(chan struct{})
go func() {
select {
case <-ctx.Done():
cancel()
case <-c:
// The response's Body is closed.
}
}()
resp.Body = &notifyingReader{resp.Body, c}
return resp, nil
}
// Get issues a GET request via the Do function.
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Head issues a HEAD request via the Do function.
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Post issues a POST request via the Do function.
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return Do(ctx, client, req)
}
// PostForm issues a POST request via the Do function.
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}
// notifyingReader is an io.ReadCloser that closes the notify channel after
// Close is called or a Read fails on the underlying ReadCloser.
type notifyingReader struct {
io.ReadCloser
notify chan<- struct{}
}
func (r *notifyingReader) Read(p []byte) (int, error) {
n, err := r.ReadCloser.Read(p)
if err != nil && r.notify != nil {
close(r.notify)
r.notify = nil
}
return n, err
}
func (r *notifyingReader) Close() error {
err := r.ReadCloser.Close()
if r.notify != nil {
close(r.notify)
r.notify = nil
}
return err
}

View File

@@ -1,16 +0,0 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bitbucket provides constants for using OAuth2 to access Bitbucket.
package bitbucket
import (
"golang.org/x/oauth2"
)
// Endpoint is Bitbucket's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://bitbucket.org/site/oauth2/authorize",
TokenURL: "https://bitbucket.org/site/oauth2/access_token",
}

View File

@@ -1,112 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package clientcredentials implements the OAuth2.0 "client credentials" token flow,
// also known as the "two-legged OAuth 2.0".
//
// This should be used when the client is acting on its own behalf or when the client
// is the resource owner. It may also be used when requesting access to protected
// resources based on an authorization previously arranged with the authorization
// server.
//
// See http://tools.ietf.org/html/draft-ietf-oauth-v2-31#section-4.4
package clientcredentials
import (
"net/http"
"net/url"
"strings"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
)
// tokenFromInternal maps an *internal.Token struct into
// an *oauth2.Token struct.
func tokenFromInternal(t *internal.Token) *oauth2.Token {
if t == nil {
return nil
}
tk := &oauth2.Token{
AccessToken: t.AccessToken,
TokenType: t.TokenType,
RefreshToken: t.RefreshToken,
Expiry: t.Expiry,
}
return tk.WithExtra(t.Raw)
}
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
// This token is then mapped from *internal.Token into an *oauth2.Token which is
// returned along with an error.
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*oauth2.Token, error) {
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.TokenURL, v)
if err != nil {
return nil, err
}
return tokenFromInternal(tk), nil
}
// Client Credentials Config describes a 2-legged OAuth2 flow, with both the
// client application information and the server's endpoint URLs.
type Config struct {
// ClientID is the application's ID.
ClientID string
// ClientSecret is the application's secret.
ClientSecret string
// TokenURL is the resource server's token endpoint
// URL. This is a constant specific to each server.
TokenURL string
// Scope specifies optional requested permissions.
Scopes []string
}
// Token uses client credentials to retrieve a token.
// The HTTP client to use is derived from the context.
// If nil, http.DefaultClient is used.
func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) {
return retrieveToken(ctx, c, url.Values{
"grant_type": {"client_credentials"},
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
})
}
// Client returns an HTTP client using the provided token.
// The token will auto-refresh as necessary. The underlying
// HTTP transport will be obtained using the provided context.
// The returned client and its Transport should not be modified.
func (c *Config) Client(ctx context.Context) *http.Client {
return oauth2.NewClient(ctx, c.TokenSource(ctx))
}
// TokenSource returns a TokenSource that returns t until t expires,
// automatically refreshing it as necessary using the provided context and the
// client ID and client secret.
//
// Most users will use Config.Client instead.
func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
source := &tokenSource{
ctx: ctx,
conf: c,
}
return oauth2.ReuseTokenSource(nil, source)
}
type tokenSource struct {
ctx context.Context
conf *Config
}
// Token refreshes the token by using a new client credentials request.
// tokens received this way do not include a refresh token
func (c *tokenSource) Token() (*oauth2.Token, error) {
return retrieveToken(c.ctx, c.conf, url.Values{
"grant_type": {"client_credentials"},
"scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")),
})
}

View File

@@ -1,16 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package facebook provides constants for using OAuth2 to access Facebook.
package facebook
import (
"golang.org/x/oauth2"
)
// Endpoint is Facebook's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://www.facebook.com/dialog/oauth",
TokenURL: "https://graph.facebook.com/oauth/access_token",
}

View File

@@ -1,16 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package github provides constants for using OAuth2 to access Github.
package github
import (
"golang.org/x/oauth2"
)
// Endpoint is Github's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://github.com/login/oauth/authorize",
TokenURL: "https://github.com/login/oauth/access_token",
}

View File

@@ -1,86 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"sort"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/oauth2"
)
// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs.
var appengineVM bool
// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
// AppEngineTokenSource returns a token source that fetches tokens
// issued to the current App Engine application's service account.
// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
// that involves user accounts, see oauth2.Config instead.
//
// The provided context must have come from appengine.NewContext.
func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
if appengineTokenFunc == nil {
panic("google: AppEngineTokenSource can only be used on App Engine.")
}
scopes := append([]string{}, scope...)
sort.Strings(scopes)
return &appEngineTokenSource{
ctx: ctx,
scopes: scopes,
key: strings.Join(scopes, " "),
}
}
// aeTokens helps the fetched tokens to be reused until their expiration.
var (
aeTokensMu sync.Mutex
aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
)
type tokenLock struct {
mu sync.Mutex // guards t; held while fetching or updating t
t *oauth2.Token
}
type appEngineTokenSource struct {
ctx context.Context
scopes []string
key string // to aeTokens map; space-separated scopes
}
func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
if appengineTokenFunc == nil {
panic("google: AppEngineTokenSource can only be used on App Engine.")
}
aeTokensMu.Lock()
tok, ok := aeTokens[ts.key]
if !ok {
tok = &tokenLock{}
aeTokens[ts.key] = tok
}
aeTokensMu.Unlock()
tok.mu.Lock()
defer tok.mu.Unlock()
if tok.t.Valid() {
return tok.t, nil
}
access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
if err != nil {
return nil, err
}
tok.t = &oauth2.Token{
AccessToken: access,
Expiry: exp,
}
return tok.t, nil
}

View File

@@ -1,13 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine
package google
import "google.golang.org/appengine"
func init() {
appengineTokenFunc = appengine.AccessToken
}

View File

@@ -1,14 +0,0 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appenginevm
package google
import "google.golang.org/appengine"
func init() {
appengineVM = true
appengineTokenFunc = appengine.AccessToken
}

View File

@@ -1,155 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"runtime"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
"google.golang.org/cloud/compute/metadata"
)
// DefaultClient returns an HTTP Client that uses the
// DefaultTokenSource to obtain authentication credentials.
//
// This client should be used when developing services
// that run on Google App Engine or Google Compute Engine
// and use "Application Default Credentials."
//
// For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials
//
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
ts, err := DefaultTokenSource(ctx, scope...)
if err != nil {
return nil, err
}
return oauth2.NewClient(ctx, ts), nil
}
// DefaultTokenSource is a token source that uses
// "Application Default Credentials".
//
// It looks for credentials in the following places,
// preferring the first location found:
//
// 1. A JSON file whose path is specified by the
// GOOGLE_APPLICATION_CREDENTIALS environment variable.
// 2. A JSON file in a location known to the gcloud command-line tool.
// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
// 3. On Google App Engine it uses the appengine.AccessToken function.
// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
// credentials from the metadata server.
// (In this final case any provided scopes are ignored.)
//
// For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials
//
func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
// First, try the environment variable.
const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
if filename := os.Getenv(envVar); filename != "" {
ts, err := tokenSourceFromFile(ctx, filename, scope)
if err != nil {
return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
}
return ts, nil
}
// Second, try a well-known file.
filename := wellKnownFile()
_, err := os.Stat(filename)
if err == nil {
ts, err2 := tokenSourceFromFile(ctx, filename, scope)
if err2 == nil {
return ts, nil
}
err = err2
} else if os.IsNotExist(err) {
err = nil // ignore this error
}
if err != nil {
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
}
// Third, if we're on Google App Engine use those credentials.
if appengineTokenFunc != nil && !appengineVM {
return AppEngineTokenSource(ctx, scope...), nil
}
// Fourth, if we're on Google Compute Engine use the metadata server.
if metadata.OnGCE() {
return ComputeTokenSource(""), nil
}
// None are found; return helpful error.
const url = "https://developers.google.com/accounts/docs/application-default-credentials"
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
}
func wellKnownFile() string {
const f = "application_default_credentials.json"
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
}
return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
}
func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var d struct {
// Common fields
Type string
ClientID string `json:"client_id"`
// User Credential fields
ClientSecret string `json:"client_secret"`
RefreshToken string `json:"refresh_token"`
// Service Account fields
ClientEmail string `json:"client_email"`
PrivateKeyID string `json:"private_key_id"`
PrivateKey string `json:"private_key"`
}
if err := json.Unmarshal(b, &d); err != nil {
return nil, err
}
switch d.Type {
case "authorized_user":
cfg := &oauth2.Config{
ClientID: d.ClientID,
ClientSecret: d.ClientSecret,
Scopes: append([]string{}, scopes...), // copy
Endpoint: Endpoint,
}
tok := &oauth2.Token{RefreshToken: d.RefreshToken}
return cfg.TokenSource(ctx, tok), nil
case "service_account":
cfg := &jwt.Config{
Email: d.ClientEmail,
PrivateKey: []byte(d.PrivateKey),
Scopes: append([]string{}, scopes...), // copy
TokenURL: JWTTokenURL,
}
return cfg.TokenSource(ctx), nil
case "":
return nil, errors.New("missing 'type' field in credentials")
default:
return nil, fmt.Errorf("unknown credential type: %q", d.Type)
}
}

View File

@@ -1,145 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package google provides support for making OAuth2 authorized and
// authenticated HTTP requests to Google APIs.
// It supports the Web server flow, client-side credentials, service accounts,
// Google Compute Engine service accounts, and Google App Engine service
// accounts.
//
// For more information, please read
// https://developers.google.com/accounts/docs/OAuth2
// and
// https://developers.google.com/accounts/docs/application-default-credentials.
package google
import (
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
"google.golang.org/cloud/compute/metadata"
)
// Endpoint is Google's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
// ConfigFromJSON uses a Google Developers Console client_credentials.json
// file to construct a config.
// client_credentials.json can be downloadable from https://console.developers.google.com,
// under "APIs & Auth" > "Credentials". Download the Web application credentials in the
// JSON format and provide the contents of the file as jsonKey.
func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
type cred struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectURIs []string `json:"redirect_uris"`
AuthURI string `json:"auth_uri"`
TokenURI string `json:"token_uri"`
}
var j struct {
Web *cred `json:"web"`
Installed *cred `json:"installed"`
}
if err := json.Unmarshal(jsonKey, &j); err != nil {
return nil, err
}
var c *cred
switch {
case j.Web != nil:
c = j.Web
case j.Installed != nil:
c = j.Installed
default:
return nil, fmt.Errorf("oauth2/google: no credentials found")
}
if len(c.RedirectURIs) < 1 {
return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
}
return &oauth2.Config{
ClientID: c.ClientID,
ClientSecret: c.ClientSecret,
RedirectURL: c.RedirectURIs[0],
Scopes: scope,
Endpoint: oauth2.Endpoint{
AuthURL: c.AuthURI,
TokenURL: c.TokenURI,
},
}, nil
}
// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
// the credentials that authorize and authenticate the requests.
// Create a service account on "Credentials" page under "APIs & Auth" for your
// project at https://console.developers.google.com to download a JSON key file.
func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
var key struct {
Email string `json:"client_email"`
PrivateKey string `json:"private_key"`
}
if err := json.Unmarshal(jsonKey, &key); err != nil {
return nil, err
}
return &jwt.Config{
Email: key.Email,
PrivateKey: []byte(key.PrivateKey),
Scopes: scope,
TokenURL: JWTTokenURL,
}, nil
}
// ComputeTokenSource returns a token source that fetches access tokens
// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
// this token source if your program is running on a GCE instance.
// If no account is specified, "default" is used.
// Further information about retrieving access tokens from the GCE metadata
// server can be found at https://cloud.google.com/compute/docs/authentication.
func ComputeTokenSource(account string) oauth2.TokenSource {
return oauth2.ReuseTokenSource(nil, computeSource{account: account})
}
type computeSource struct {
account string
}
func (cs computeSource) Token() (*oauth2.Token, error) {
if !metadata.OnGCE() {
return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
}
acct := cs.account
if acct == "" {
acct = "default"
}
tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
if err != nil {
return nil, err
}
var res struct {
AccessToken string `json:"access_token"`
ExpiresInSec int `json:"expires_in"`
TokenType string `json:"token_type"`
}
err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
if err != nil {
return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
}
if res.ExpiresInSec == 0 || res.AccessToken == "" {
return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
}
return &oauth2.Token{
AccessToken: res.AccessToken,
TokenType: res.TokenType,
Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
}, nil
}

View File

@@ -1,71 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"crypto/rsa"
"fmt"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
"golang.org/x/oauth2/jws"
)
// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON
// key file to read the credentials that authorize and authenticate the
// requests, and returns a TokenSource that does not use any OAuth2 flow but
// instead creates a JWT and sends that as the access token.
// The audience is typically a URL that specifies the scope of the credentials.
//
// Note that this is not a standard OAuth flow, but rather an
// optimization supported by a few Google services.
// Unless you know otherwise, you should use JWTConfigFromJSON instead.
func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) {
cfg, err := JWTConfigFromJSON(jsonKey)
if err != nil {
return nil, fmt.Errorf("google: could not parse JSON key: %v", err)
}
pk, err := internal.ParseKey(cfg.PrivateKey)
if err != nil {
return nil, fmt.Errorf("google: could not parse key: %v", err)
}
ts := &jwtAccessTokenSource{
email: cfg.Email,
audience: audience,
pk: pk,
}
tok, err := ts.Token()
if err != nil {
return nil, err
}
return oauth2.ReuseTokenSource(tok, ts), nil
}
type jwtAccessTokenSource struct {
email, audience string
pk *rsa.PrivateKey
}
func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
iat := time.Now()
exp := iat.Add(time.Hour)
cs := &jws.ClaimSet{
Iss: ts.email,
Sub: ts.email,
Aud: ts.audience,
Iat: iat.Unix(),
Exp: exp.Unix(),
}
hdr := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
}
msg, err := jws.Encode(hdr, cs, ts.pk)
if err != nil {
return nil, fmt.Errorf("google: could not encode JWT: %v", err)
}
return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil
}

View File

@@ -1,168 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"time"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
)
type sdkCredentials struct {
Data []struct {
Credential struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
TokenExpiry *time.Time `json:"token_expiry"`
} `json:"credential"`
Key struct {
Account string `json:"account"`
Scope string `json:"scope"`
} `json:"key"`
}
}
// An SDKConfig provides access to tokens from an account already
// authorized via the Google Cloud SDK.
type SDKConfig struct {
conf oauth2.Config
initialToken *oauth2.Token
}
// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
// account. If account is empty, the account currently active in
// Google Cloud SDK properties is used.
// Google Cloud SDK credentials must be created by running `gcloud auth`
// before using this function.
// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
func NewSDKConfig(account string) (*SDKConfig, error) {
configPath, err := sdkConfigPath()
if err != nil {
return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
}
credentialsPath := filepath.Join(configPath, "credentials")
f, err := os.Open(credentialsPath)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
}
defer f.Close()
var c sdkCredentials
if err := json.NewDecoder(f).Decode(&c); err != nil {
return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
}
if len(c.Data) == 0 {
return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
}
if account == "" {
propertiesPath := filepath.Join(configPath, "properties")
f, err := os.Open(propertiesPath)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
}
defer f.Close()
ini, err := internal.ParseINI(f)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
}
core, ok := ini["core"]
if !ok {
return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
}
active, ok := core["account"]
if !ok {
return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
}
account = active
}
for _, d := range c.Data {
if account == "" || d.Key.Account == account {
if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
}
var expiry time.Time
if d.Credential.TokenExpiry != nil {
expiry = *d.Credential.TokenExpiry
}
return &SDKConfig{
conf: oauth2.Config{
ClientID: d.Credential.ClientID,
ClientSecret: d.Credential.ClientSecret,
Scopes: strings.Split(d.Key.Scope, " "),
Endpoint: Endpoint,
RedirectURL: "oob",
},
initialToken: &oauth2.Token{
AccessToken: d.Credential.AccessToken,
RefreshToken: d.Credential.RefreshToken,
Expiry: expiry,
},
}, nil
}
}
return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
}
// Client returns an HTTP client using Google Cloud SDK credentials to
// authorize requests. The token will auto-refresh as necessary. The
// underlying http.RoundTripper will be obtained using the provided
// context. The returned client and its Transport should not be
// modified.
func (c *SDKConfig) Client(ctx context.Context) *http.Client {
return &http.Client{
Transport: &oauth2.Transport{
Source: c.TokenSource(ctx),
},
}
}
// TokenSource returns an oauth2.TokenSource that retrieve tokens from
// Google Cloud SDK credentials using the provided context.
// It will returns the current access token stored in the credentials,
// and refresh it when it expires, but it won't update the credentials
// with the new access token.
func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
return c.conf.TokenSource(ctx, c.initialToken)
}
// Scopes are the OAuth 2.0 scopes the current account is authorized for.
func (c *SDKConfig) Scopes() []string {
return c.conf.Scopes
}
// sdkConfigPath tries to guess where the gcloud config is located.
// It can be overridden during tests.
var sdkConfigPath = func() (string, error) {
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
}
homeDir := guessUnixHomeDir()
if homeDir == "" {
return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
}
return filepath.Join(homeDir, ".config", "gcloud"), nil
}
func guessUnixHomeDir() string {
usr, err := user.Current()
if err == nil {
return usr.HomeDir
}
return os.Getenv("HOME")
}

View File

@@ -1,172 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jws provides encoding and decoding utilities for
// signed JWS messages.
package jws
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
)
// ClaimSet contains information about the JWT signature including the
// permissions being requested (scopes), the target of the token, the issuer,
// the time the token was issued, and the lifetime of the token.
type ClaimSet struct {
Iss string `json:"iss"` // email address of the client_id of the application making the access token request
Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch)
Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch)
Typ string `json:"typ,omitempty"` // token type (Optional).
// Email for which the application is requesting delegated access (Optional).
Sub string `json:"sub,omitempty"`
// The old name of Sub. Client keeps setting Prn to be
// complaint with legacy OAuth 2.0 providers. (Optional)
Prn string `json:"prn,omitempty"`
// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
// This array is marshalled using custom code (see (c *ClaimSet) encode()).
PrivateClaims map[string]interface{} `json:"-"`
}
func (c *ClaimSet) encode() (string, error) {
// Reverting time back for machines whose time is not perfectly in sync.
// If client machine's time is in the future according
// to Google servers, an access token will not be issued.
now := time.Now().Add(-10 * time.Second)
if c.Iat == 0 {
c.Iat = now.Unix()
}
if c.Exp == 0 {
c.Exp = now.Add(time.Hour).Unix()
}
if c.Exp < c.Iat {
return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat)
}
b, err := json.Marshal(c)
if err != nil {
return "", err
}
if len(c.PrivateClaims) == 0 {
return base64Encode(b), nil
}
// Marshal private claim set and then append it to b.
prv, err := json.Marshal(c.PrivateClaims)
if err != nil {
return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
}
// Concatenate public and private claim JSON objects.
if !bytes.HasSuffix(b, []byte{'}'}) {
return "", fmt.Errorf("jws: invalid JSON %s", b)
}
if !bytes.HasPrefix(prv, []byte{'{'}) {
return "", fmt.Errorf("jws: invalid JSON %s", prv)
}
b[len(b)-1] = ',' // Replace closing curly brace with a comma.
b = append(b, prv[1:]...) // Append private claims.
return base64Encode(b), nil
}
// Header represents the header for the signed JWS payloads.
type Header struct {
// The algorithm used for signature.
Algorithm string `json:"alg"`
// Represents the token type.
Typ string `json:"typ"`
}
func (h *Header) encode() (string, error) {
b, err := json.Marshal(h)
if err != nil {
return "", err
}
return base64Encode(b), nil
}
// Decode decodes a claim set from a JWS payload.
func Decode(payload string) (*ClaimSet, error) {
// decode returned id token to get expiry
s := strings.Split(payload, ".")
if len(s) < 2 {
// TODO(jbd): Provide more context about the error.
return nil, errors.New("jws: invalid token received")
}
decoded, err := base64Decode(s[1])
if err != nil {
return nil, err
}
c := &ClaimSet{}
err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
return c, err
}
// Signer returns a signature for the given data.
type Signer func(data []byte) (sig []byte, err error)
// EncodeWithSigner encodes a header and claim set with the provided signer.
func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) {
head, err := header.encode()
if err != nil {
return "", err
}
cs, err := c.encode()
if err != nil {
return "", err
}
ss := fmt.Sprintf("%s.%s", head, cs)
sig, err := sg([]byte(ss))
if err != nil {
return "", err
}
return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil
}
// Encode encodes a signed JWS with provided header and claim set.
// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key.
func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) {
sg := func(data []byte) (sig []byte, err error) {
h := sha256.New()
h.Write([]byte(data))
return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
}
return EncodeWithSigner(header, c, sg)
}
// base64Encode returns and Base64url encoded version of the input string with any
// trailing "=" stripped.
func base64Encode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// base64Decode decodes the Base64url encoded string
func base64Decode(s string) ([]byte, error) {
// add back missing padding
switch len(s) % 4 {
case 1:
s += "==="
case 2:
s += "=="
case 3:
s += "="
}
return base64.URLEncoding.DecodeString(s)
}

View File

@@ -1,153 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
// known as "two-legged OAuth 2.0".
//
// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
package jwt
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
"golang.org/x/oauth2/jws"
)
var (
defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
)
// Config is the configuration for using JWT to fetch tokens,
// commonly known as "two-legged OAuth 2.0".
type Config struct {
// Email is the OAuth client identifier used when communicating with
// the configured OAuth provider.
Email string
// PrivateKey contains the contents of an RSA private key or the
// contents of a PEM file that contains a private key. The provided
// private key is used to sign JWT payloads.
// PEM containers with a passphrase are not supported.
// Use the following command to convert a PKCS 12 file into a PEM.
//
// $ openssl pkcs12 -in key.p12 -out key.pem -nodes
//
PrivateKey []byte
// Subject is the optional user to impersonate.
Subject string
// Scopes optionally specifies a list of requested permission scopes.
Scopes []string
// TokenURL is the endpoint required to complete the 2-legged JWT flow.
TokenURL string
// Expires optionally specifies how long the token is valid for.
Expires time.Duration
}
// TokenSource returns a JWT TokenSource using the configuration
// in c and the HTTP client from the provided context.
func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
}
// Client returns an HTTP client wrapping the context's
// HTTP transport and adding Authorization headers with tokens
// obtained from c.
//
// The returned client and its Transport should not be modified.
func (c *Config) Client(ctx context.Context) *http.Client {
return oauth2.NewClient(ctx, c.TokenSource(ctx))
}
// jwtSource is a source that always does a signed JWT request for a token.
// It should typically be wrapped with a reuseTokenSource.
type jwtSource struct {
ctx context.Context
conf *Config
}
func (js jwtSource) Token() (*oauth2.Token, error) {
pk, err := internal.ParseKey(js.conf.PrivateKey)
if err != nil {
return nil, err
}
hc := oauth2.NewClient(js.ctx, nil)
claimSet := &jws.ClaimSet{
Iss: js.conf.Email,
Scope: strings.Join(js.conf.Scopes, " "),
Aud: js.conf.TokenURL,
}
if subject := js.conf.Subject; subject != "" {
claimSet.Sub = subject
// prn is the old name of sub. Keep setting it
// to be compatible with legacy OAuth 2.0 providers.
claimSet.Prn = subject
}
if t := js.conf.Expires; t > 0 {
claimSet.Exp = time.Now().Add(t).Unix()
}
payload, err := jws.Encode(defaultHeader, claimSet, pk)
if err != nil {
return nil, err
}
v := url.Values{}
v.Set("grant_type", defaultGrantType)
v.Set("assertion", payload)
resp, err := hc.PostForm(js.conf.TokenURL, v)
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if c := resp.StatusCode; c < 200 || c > 299 {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
}
// tokenRes is the JSON response body.
var tokenRes struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
ExpiresIn int64 `json:"expires_in"` // relative seconds from now
}
if err := json.Unmarshal(body, &tokenRes); err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
token := &oauth2.Token{
AccessToken: tokenRes.AccessToken,
TokenType: tokenRes.TokenType,
}
raw := make(map[string]interface{})
json.Unmarshal(body, &raw) // no error checks for optional fields
token = token.WithExtra(raw)
if secs := tokenRes.ExpiresIn; secs > 0 {
token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
}
if v := tokenRes.IDToken; v != "" {
// decode returned id token to get expiry
claimSet, err := jws.Decode(v)
if err != nil {
return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
}
token.Expiry = time.Unix(claimSet.Exp, 0)
}
return token, nil
}

View File

@@ -1,16 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package linkedin provides constants for using OAuth2 to access LinkedIn.
package linkedin
import (
"golang.org/x/oauth2"
)
// Endpoint is LinkedIn's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://www.linkedin.com/uas/oauth2/authorization",
TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken",
}

View File

@@ -1,16 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package microsoft provides constants for using OAuth2 to access Windows Live ID.
package microsoft
import (
"golang.org/x/oauth2"
)
// LiveConnectEndpoint is Windows's Live ID OAuth 2.0 endpoint.
var LiveConnectEndpoint = oauth2.Endpoint{
AuthURL: "https://login.live.com/oauth20_authorize.srf",
TokenURL: "https://login.live.com/oauth20_token.srf",
}

View File

@@ -1,16 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki.
package odnoklassniki
import (
"golang.org/x/oauth2"
)
// Endpoint is Odnoklassniki's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://www.odnoklassniki.ru/oauth/authorize",
TokenURL: "https://api.odnoklassniki.ru/oauth/token.do",
}

View File

@@ -1,22 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package paypal provides constants for using OAuth2 to access PayPal.
package paypal
import (
"golang.org/x/oauth2"
)
// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice",
}
// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment.
var SandboxEndpoint = oauth2.Endpoint{
AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice",
}

View File

@@ -1,16 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package vk provides constants for using OAuth2 to access VK.com.
package vk
import (
"golang.org/x/oauth2"
)
// Endpoint is VK's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://oauth.vk.com/authorize",
TokenURL: "https://oauth.vk.com/access_token",
}

Some files were not shown because too many files have changed in this diff Show More