diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0940537 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +# locally generated HTML ior PDF version of the Markdown documentation +README.html +README.pdf +TODO.html +TODO.pdf + diff --git a/.travis.yml b/.travis.yml index 27b75ec..a531c19 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,37 @@ language: go +sudo: false -go: - - tip +env: + global: + - ESCHER=src + +matrix: + include: + - go: 1.12.x + os: linux + env: ONCE=true + - go: 1.11.x + os: osx + - name: Cross-Compile for windows (from Linux) + go: 1.11.x + os: linux + env: GOOS=windows GOARCH=amd64 CGO_ENABLED=0 + +# Allow use in forks +go_import_path: github.com/hoijui/escher + +before_install: +- if [ "$ONCE" = "true" ]; then sudo apt-get -y install inkscape; fi + +install: +- go get -v github.com/hoijui/circuit/client +- go get -v github.com/hoijui/circuit/client/docker +- go get -v -t ./... + +script: +- make +- escher "*test.All" +- mkdir -p public +- if [ "$ONCE" = "true" ]; then scripts/build_handbook.sh public; fi +- if [ "$ONCE" = "true" ]; then scripts/tutorials.sh; fi +- if [ "$ONCE" = "true" ]; then scripts/tests.sh; fi diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..ebcd333 --- /dev/null +++ b/Makefile @@ -0,0 +1,7 @@ + +nix: + cd escher && go install + +clean: + rm $(GOPATH)/bin/escher + diff --git a/README.md b/README.md index 0ac3cd4..b847066 100644 --- a/README.md +++ b/README.md @@ -1,35 +1,423 @@ -![Escher in language](https://github.com/gocircuit/escher/raw/master/misc/img/main.png) +### NOTE: This repository is deprecated; please go to [the successor](https://github.com/hoijui/escher) -# Escher: A language for programming in pure metaphors +This is left here for reference. -Escher is a simple syntax for representing generic labeled graphs called _circuits_, much like -XML is a (not-so-simple) syntax for representing generic labeled trees. +**Go to for the current repo!** -Escher graphs can also be interpreted as executable circuits of independent processing -units. In this way, Escher circuits (used as code) can manipulate Escher circuits (representing data), -thus achieving linguistic uniformity between algorithm and data. +--- -In contrast, the standard language for manipulating XML—JavaScript—being designed -after-the-fact, constitutes an altogether unrelated semantic and syntax compared to XML. +--- -The analogy between Escher programs manipulating Escher circuits and JavaScript programs -manipulating XML is not co-incidental. The Escher Runtime is, in fact, our proposal -for a new “headless browser”, which we sometimes also call the “backend browser”, because -with Escher one is able to manipulate entire data centers the way one manipulates the -DOM of a webpage. +Old content following ... -Escher was designed to enable a new programming paradigm for software -that needs to control large heterogenous distributed systems consisting -of failing components and connections. +# [Escher - A language for programming in pure metaphors](https://hoijui.github.io/escher-old-history/) -With Escher, one views the entire technology stack of a typical Internet company -(backend services, mobile apps, outsourced cloud apps, and so on) within -a single program. +[![Build Status](https://travis-ci.org/hoijui/escher-old-history.svg?branch=master)](https://travis-ci.org/hoijui/escher-old-history) +[![GoDoc](https://godoc.org/github.com/hoijui/escher-old-history?status.png)](https://godoc.org/github.com/hoijui/escher-old-history) +[![Go Report Card](https://goreportcard.com/badge/github.com/hoijui/escher-old-history)](https://goreportcard.com/report/github.com/hoijui/escher-old-history) + +> ___NOTE___\ +> _Escher -- in the original repository -- is stagnant since 2015._\ +> This repo is archived since April 2020.\ +> **Go to for the current repo!** + +See [the projects website](https://hoijui.github.io/escher-old-history/) +for more info about the language. + +Enclosed | Enclosing +--- | --- +![Escher code](misc/img/main.svg) | ![Escher Hand with Reflecting Sphere](https://upload.wikimedia.org/wikipedia/en/6/66/Hand_with_Reflecting_Sphere.jpg) +Discrete | Continuous + +Escher is a programming language for everything. +It can naturally represent both process and data, +while being simpler than a calculator grammar. + +Escher is a language for building intelligent real-time translations between the semantics of +different physical devices accessible through chains or networks of digital or electrical technologies. + +In Escher, you can program from first- and third-person point-of-view at the same time; +just like Physics is particles and waves at the same time. + +An early "proposal" for the design of Escher, +[Escher: A black-and-white language for data and process representation](http://www.maymounkov.org/memex/abstract), +might be an informative (but not necessary) read for the theoretically inclined. + +## Application domains + +Anything that filters information from some input sources, in real-time, +and sends a transformed form to output devices. + +* Definition and generation of _synthetic worlds_ governed by Physical laws, + as in Augmented Reality and the Gaming Industry +* _General purpose concurrent and distributed programming_, + such as Internet services and cloud applications +* _Relational data representation_, as in databases and CAD file formats +* _Real-time control loops_, as in Robotics +* Numerical and scientific computation pipelines +* ... + +## Computational Model + +___Attention:___ _Non-Turing Mathematics ahead_ + +The Escher abstraction of the world is NOT Turing-compatible: +From the point-of-view of an Escher program, +there is no input and output: +There are only emergences and disappearances of events. + +Escher presents the world in a model called +[Choiceless Computation](http://arxiv.org/pdf/math/9705225.pdf). + +Understanding the difference between _Turing Machines_ and _Choiceless Computation_, +while not entirely necessary, sheds much light on the profound difference between +Escher and other languages. +The relevant publications are quoted in the [bibliography](#bibliography) at the end. + +### Choiceless Programming by example + +The following puzzle demonstrates choiceless programming via a simple, relatable high-school +Math puzzle: + +--- + +Four beer caps are placed on the corners of a square table with arbitrary orientations. +There is a robot on the table that acts upon three commands: + +1. flip a corner cap +2. flip two diagonal caps +3. flip two caps along a side + +Upon action there is no guarantee as to which corner, diagonal +or side, respectively, the robot will choose to flip. + +Devise a sequence of commands that forces the robot to turn all caps in a +configuration where they all have the same orientation. + +Can you devise a sequence that ensures they all face up? Down? + +--- + +This is a great introduction to the notion of choiceless programming. + +## Quick start + +Escher is an interpreter comprising a singular executable binary. +It is written in [Go](http://golang.org), +and can be built for Linux, OSX and Windows. + +Given that the [Go Language compiler is installed](http://golang.org/doc/install), +you can build and install the circuit binary with one line: + +```bash +go get github.com/hoijui/escher/escher +``` + +Go to the Escher base directory and run one of the tutorials + +```bash +cd $GOPATH/src/github.com/hoijui/escher +escher -src src/tutorial "*helloworld.escher" +``` + +## Structure + +Please refer to [the projects website](https://hoijui.github.io/escher-old-history/). + +## Syntax (files) and faculties (directories) structure + +Escher programs are designated by a local root directory and all its descendants. +That directory is represented as the root in the faculty name-space +inside the Escher programming environment. + +Escher compiles all files ending in `.escher` and attaches the resulting circuit designs +to the name-spaces corresponding to their directory parents. + +To materialize (i.e. run) an Escher program, use the mandatory `-x` flag +to specify the path to the local source directory. + +```bash +escher -x tutorial/helloworld +``` + +Escher materializes the circuit design named `main` in the root source directory, e.g. + +```escher +// The main circuit is always the one materialized (executed). +main { + s @show + s.Action = "¡Hello, world!" +} +``` + +### Recombining programs + +To facilitate different embodiments (aka implementations) of gate functionalities, +Escher allows the mixing of two source hierarchies into a single execution. + +For instance, the hierarchy `acid/karajan` contains circuit definitions +(in terms of gates or other circuits), +while the hierarchy `tutorial/circuit/autorespawn` contains a root `main` circuit. +To execute the latter, using the former as a "library" available in the visible name-space, +run for instance + +```bash +escher \ + -x tutorial/circuit/autorespawn \ + -y acid/karajan \ + -n ExecutionName \ + -s Server=Xa3242440007ad57b \ + -d 228.8.8.8:22 +``` + +![Auto-respawn tutorial](misc/img/proc1.svg) + +## Basic gates + +By default, the Escher environment provides a basic set of gates (a basis), +which enable a rich (infinite) language of possibilities in data manipulation. + +Collectively, they are data (concept) and transformation (sentence) gates. + +These gates are not part of Escher's semantics. +They are merely an optional library — a playground for beginners. +Users can implement their own gates for data and transformation. + +The basis reference below is nearly entirely visual. +You will notice that the visual language follows a prescribed format. + +### Data (Noun) gates + +On blank slate, there is "nothing to do" -- so to speak. +For this reason, we have a collection of gates which are effectively "springs" of objects. + +Some produce integers, some floats, some complex numbers, some strings. +These are familiar types. + +There is one gate that produces "trees". +Trees are the basic type of "weavable" (or mutable) knowledge. +(In fact, the other types are not necessary, but we throw them in for convenience.) + +In the illustration below, the syntax of the respective gate design is displayed +as their name (white on black). + +![Impression of the mind](misc/img/impress.svg) + +### Combinator (Manipulator) gates + +![Grammar manipulation gates](misc/img/combine.svg) + +### Arithmetic (Applying) gates + +Arithmetic gates are a sufficient basis of operations that enables +algorithmic manipulation of the types string, int, float and complex. +_TODO_. + +### Tree of Knowledge and the Reason (Learning) Gate + +You will notice, one of the basic data gates allows the creation of a struct-like object. +This is called a _tree_. +It is a novel data structure, described in detail in +["Extensible records with scoped labels" by Daan Leijen](http://research.microsoft.com/pubs/65409/scopedlabels.pdf). + +These data structures are "built out" and "trimmed down" using three elegant +reflection methods, described in the above publication. + +Escher embodies all three in one gate, +whose main purpose is to manipulate the contents of trees. +This is the _Reason Gate_, illustrated below. + +The following three illustrations show the same gate design, +but under different orientations of the event streams. +In all valid cases, the relationship between the valve values shown, +exemplifies the effect of the gate. + +![Generalization](misc/img/generalization.svg) + +> Belief for the current state of the world, + combined with a new observation, + results in a theory. + +![Explanation](misc/img/explanation.svg) + +> A theory of observations, which explains (includes) an observation at hand, + explains the observation only to a belief consisting of the theory without the observation. + +![Prediction](misc/img/prediction.svg) + +> When a belief of the state of the world is combined with a theory that is bigger, + the conjectured difference must be found in a new observation. + +## Duality gates + +Duality gates are the boundary between Escher semantics and the outside world. +They are the I/O with the outside. +Such gates affect some external technology when prompted through Escher in a certain way. +Alternatively, such gates might fire an Escher message on one of its valves, +in response to an asynchronous events occurring in an external technology. + +### Variation (Surprise) and Causation (Action) gates + +![See and Show](misc/img/seeshow.svg) + +For instance, with the gates we've seen so far, +one might construct the following higher-level circuit abstraction for an I/O device, +which is controlled by a deferred logic: + +![I/O device](misc/img/io.svg) + +And the respective source code: + +```escher +io_device { + // recalls + in see + out show + swtch switch + // matchings + Logic = swtch.Socialize + in.Sensation = swtch.Hear + out.Action = swtch.Speak +} +``` + +## Introspective and extrospective gates + +This special type of gates fulfills the complementary functions +of constructing new circuit designs "dynamically" +(akin to "reflection" in other languages), +and materializing (i.e. executing) these designs. + +_TODO_ + +### The Julia (Exploiting) Gate + +_TODO_ + +### The Escher (Einstein) Gate: Multiple foci over Space and Time + +_TODO_ + +## The future collapsed + +_TODO_ + +## Why you should be excited + +It may seem that Escher is not more than a new semantic to do an old job. +But something nearly magical happens when transition to using the +Escher semantic—various compiler intelligence improvements that +used to be NP-hard become simple and tractable: + +* Users do not need to explicitly moduralize (sub-divide) their circuits. + One could start designing a circuit wiring and the compiler will automatically + find sub-patterns that are abstractable as circuits. + Which includes non-obvious and/or recursive ones. + +* Code speed/space/etc optimizations reduce to a simple sub-graph replacement game, + highly transparent to and customizable by the user. + +## Misc + +A reference to the +[initial](http://www.maymounkov.org/chomsky-valiant-algorithmic-mirror) +[thoughts](http://www.maymounkov.org/puzzle-test-turing-test) +that led to the invention of Escher. + +To the original author, +Escher is a language for weaving dreams: + +It makes imagination real. + +Help make it tangible, so it can be shared. ## Sponsors and credits -* [DARPA XDATA](http://www.darpa.mil/Our_Work/I2O/Programs/XDATA.aspx) initiative -under the program management of -[Dr. Christopher White](http://www.darpa.mil/Our_Work/I2O/Personnel/Dr_Christopher_White.aspx), 2013–2014 +* [DARPA XDATA](http://www.darpa.mil/Our_Work/I2O/Programs/XDATA.aspx) + initiative under the program management of + [Dr. Christopher White](http://www.darpa.mil/Our_Work/I2O/Personnel/Dr_Christopher_White.aspx), 2013–2014 * [Data Tactics Corp](http://www.data-tactics.com/), 2013–2014 * [L3](http://www.l-3com.com/), 2014 + +## Bibliography + +* [Choiceless Polynomial Time](http://arxiv.org/pdf/math/9705225.pdf), + [Andreas Blass](http://www.math.lsa.umich.edu/~ablass/), + [Yuri Gurevich](http://research.microsoft.com/en-us/um/people/gurevich/) and + [Saharon Shelah](http://shelah.logic.at/), published by Shelah Office + on [arXiv](http://arxiv.org/abs/math/9705225), 1997 + +* [Choiceless Polynomial Time Logic: Inability to Express](http://link.springer.com/chapter/10.1007%2F3-540-44622-2_6), + [Saharon Shelah](http://shelah.logic.at/), Springer Lecture Notes in Computer Science, Volume 1862, 2000, pp. 72-125 + +* [Circuits of the Mind](http://www.amazon.com/Circuits-Mind-Leslie-G-Valiant/dp/0195126688), Leslie Valiant, 2000 +* [Probably Approximately Correct, 53589083](http://www.probablyapproximatelycorrect.com/), Leslie Valiant, 2013 + +Additional, enlightening reading includes: + +* [Ancient Evenings](http://en.wikipedia.org/wiki/Ancient_Evenings), Norman Mailer, 1983 +* [Yoga Philosophy of Patañjali](http://www.amazon.com/Yoga-Philosophy-Patanjali-Translation-Annotations/dp/0873957296), + State University of New York Press, 1984 +* [Samskrta-Subodhini: A Sanskrit Primer](http://www.amazon.com/Samskrta-Subodhini-Sanskrit-Primer-Michigan-Southeast/dp/089148079X), + Michigan Papers on South and Southeast Asia, Book 47, 1999 + +People working in this or unsuspectingly related areas: + +* [Steven Witten](http://acko.net/about/) and [Kyle McDonald](http://kylemcdonald.net/) for heroic + attempts at fluidity in digital art. + +* [Bret Victor](http://worrydream.com/#!2/LadderOfAbstraction) for the insight that inventing a + good user interface and reverse-engineering the mind is one and the same thing. + +* [Noam Chomsky](https://www.youtube.com/watch?v=bfSyWRvoYfw) for suggesting + that discrepancies in language vs action are a window into the producing device, as well as + the circularity of the meaning of languages. + +* [John Conway](http://en.wikipedia.org/wiki/John_Horton_Conway) + for the [Symmetries of Things](http://www.amazon.com/The-Symmetries-Things-John-Conway/dp/1568812205). + +* [Daniel Spielman](http://www.cs.yale.edu/homes/spielman/precon/precon.html) + for the insight that general linear systems will never be invertible in linear time, + because there are no naturally linguistically-posable problems that can result in such matrices. + As well as the insight that even circular objects (like general undirected graphs) + have to be intellectually broken down to "simple" trees (via the notion of “distribution of trees”) + in order to enable a thinking process: + Thereby motivting the "tree of knowledge" data representation. + +* [Madhu Sudan](http://people.csail.mit.edu/madhu/) and + [Irit Dinur](http://www.wisdom.weizmann.ac.il/~dinuri/) for + [Probabilistically-checkable Proofs](http://www.ams.org/journals/bull/2007-44-01/S0273-0979-06-01143-8/home.html) and + [Universal Semantic Communication](http://people.csail.mit.edu/madhu/talks/2008/Semantic-TRDDC.pdf). + +* [Steven Pinker](http://stevenpinker.com/) and + [The Stuff of Thought](http://www.amazon.com/The-Stuff-Thought-Language-Window/dp/0143114247). + +* [Steven Boyd](http://web.stanford.edu/~boyd/) + for pointing attention to the relationship between convex optimization + [CVX](https://class.stanford.edu/courses/Engineering/CVX101/Winter2014/about) + and language. + +* [Leslie Valiant](http://people.seas.harvard.edu/~valiant/) for + [Circuits of the Mind](http://www.amazon.com/Circuits-Mind-Leslie-G-Valiant/dp/0195126688) and + [Probably Approximately Correct](http://www.probablyapproximatelycorrect.com/), + as well as for inspiring a spirit of thought outside of my profession (Theoretical Computer Science). + +* [Saharon Shelah](http://shelah.logic.at) for the notion of + [Choiceless Computation](http://link.springer.com/chapter/10.1007%2F3-540-44622-2_6). + +* [William Thurston](http://en.wikipedia.org/wiki/William_Thurston) for the + [Geometry and Topology of Three-manifolds](http://library.msri.org/books/gt3m/). + +* [Ken Thompson](http://en.wikipedia.org/wiki/Ken_Thompson) and + [Rob Pike](http://en.wikipedia.org/wiki/Rob_Pike) + for their pioneering work in programming languages that enabled the idea and later, + via the [Go Language](http://golang.org), the realization of Escher. + +* [Ken Stephenson](http://www.math.utk.edu/~kens/) and his work on + [Circle Packings](http://www.amazon.com/Introduction-Circle-Packing-Discrete-Functions/dp/0521823560) + +* [The Clay Institute](http://www.claymath.org) + for the insight that reducing the count of open problems + and the theoretical unification of logical theories (reducing the count of axioms) + are one and the same thing. + diff --git a/TODO b/TODO deleted file mode 100644 index 190b306..0000000 --- a/TODO +++ /dev/null @@ -1,17 +0,0 @@ -* tool to discover blockages - -* rename verb to directive -* escher.Replace gate to substitute the residual of containing circuit with… - -* addresses are sugar for a two-sided reflex: -syntactic address and index on one side, and ... - * generalize - -* download wikipedia dataset -* file reader materializer -* convert non-escher files in source directory in materializers of respective file readers - -THINK - -* remove name/value distinction (delayed because go map keys cannot be circuit or other non-primitves at the moment) - * possible resolution: make all Go circuit manipulations functional diff --git a/TODO.md b/TODO.md new file mode 100644 index 0000000..051b0af --- /dev/null +++ b/TODO.md @@ -0,0 +1,40 @@ +# TODO - Escher language + +* tool to discover blockages +* rename verb to directive +* create an `e.Replace` gate to substitute the residual of the containing circuit with… +* addresses are sugar for a two-sided reflex: + syntactic address and index on one side, and ... + * generalize +* download wikipedia data-set +* create a file-reader materializer +* convert non-escher files in source directory in materializers of respective file readers +* We need something like JavaDoc (both the standard for doc comments, and the tool) +* We need documentation in the code, and an API doc (see point above) + +## THINK + +* remove name/value distinction (delayed because go map keys have to be comparable, meaning: they have to support `==`, which is not guaranteed/true for circuits) + * possible solutions: + * make all Go circuit manipulations functional +* (maybe) convert all these TODOs in here to issues (on github) +* device some standard for storing a set of attributes with each gate, + which can be used for graphical representation: +```escher +myCircuit { + gateA 123 `// @visual{ colorFg { 255; 0; 0; }; colorBg { 0; 0; 0; }; position2d { 1.0; 1.0; }; position3d { 1.0; 1.0; 1.0; }; }` + gateB `some value` `// Some textual comment here @visual{ colorFg { 255; 255; 255; 255; }; colorBg { 0; 0; 0; 255; }; position2d { 1.0; 2.0; }; position3d { 1.0; 2.0; 3.0; }; }` +} +``` +* device and formulate a standard naming convention, + to be used at least in the Escher repo + (think Java: `ClassName`, `getSomething()`, `CONSTANT_VALUE`, ...) +* device a standard Index (== name-space) scheme + (think Java: `..`, + for example: `com.apache.commons.math.Rand.nextInt`) + maybe adhere to the go standard (which is also similar to the Java one) +* as file-names do not (currently) appear in the Index (== name-space), + we might want to enforce (or at least encourage) a similar standard like in Java, + where the file-name (without the .java extension) + is supposed to be equal to the class name within. + In Escher, this would be the contained circuits name. diff --git a/a/lex.go b/a/lex.go index 659cc61..8ed5d46 100644 --- a/a/lex.go +++ b/a/lex.go @@ -8,6 +8,9 @@ package a import "strconv" +// NullLiteral is the go value of a null/invalid escher literal +const NullLiteral = "" + func isNotVerbatim(r rune) bool { return r != '`' } diff --git a/a/symbol.go b/a/symbol.go index c3b90bd..868bc91 100644 --- a/a/symbol.go +++ b/a/symbol.go @@ -6,6 +6,6 @@ package a -// Source symbols are used as gate names by the Escher compiler to attach source information -// about the enclosing circuit. +// Source symbols are used as gate names by the Escher compiler to attach +// source information about the enclosing circuit. type Source struct{} diff --git a/be/be-circuit.go b/be/be-circuit.go index 2b4437b..8170a27 100644 --- a/be/be-circuit.go +++ b/be/be-circuit.go @@ -7,22 +7,18 @@ package be import ( - // "fmt" - - . "github.com/gocircuit/escher/circuit" + cir "github.com/hoijui/escher/circuit" ) // *Spirit gates emit the residue of the enclosing circuit itself -var SpiritVerb = NewVerbAddress("*", "Spirit") - -// Required matter: Index, View, Circuit -func materializeCircuit(given Reflex, matter Circuit) interface{} { +var SpiritVerb = cir.NewVerbAddress("*", "Spirit") - design := matter.CircuitAt("Circuit") +// create all links before materializing gates +func createLinks(design cir.Circuit) map[cir.Name]Reflex { // create all links before materializing gates - gates := make(map[Name]Reflex) - gates[Super] = make(Reflex) + gates := make(map[cir.Name]Reflex) + gates[cir.Super] = make(Reflex) for name, view := range design.Flow { if gates[name] == nil { gates[name] = make(Reflex) @@ -38,46 +34,58 @@ func materializeCircuit(given Reflex, matter Circuit) interface{} { } } - // materialize gates - residue := New() - spirit := make(map[Name]interface{}) // channel to pass circuit residue back to spirit gates inside the circuit - for g, _ := range design.Gate { - if g == Super { + return gates +} + +func calcResidue(matter cir.Circuit, design cir.Circuit, gates map[cir.Name]Reflex) (cir.Circuit, map[cir.Name]interface{}) { + + residue := cir.New() + spirit := make(map[cir.Name]interface{}) // channel to pass circuit residue back to spirit gates inside the circuit + for g := range design.Gate { + if g == cir.Super { panicWithMatter(matter, "Circuit design overwrites the empty-string gate, in design %v\n", design) } - gsyntax := design.At(g) - var gresidue interface{} + gSyntax := design.At(g) + var gResidue interface{} // Compute view of gate within circuit - view := New() + view := cir.New() for vlv, vec := range design.Flow[g] { view.Grow(vlv, design.Gate[vec.Gate]) } - if Same(gsyntax, SpiritVerb) { - gresidue, spirit[g] = MaterializeInstance(gates[g], newSubMatterView(matter, view), &Future{}) + if cir.Same(gSyntax, SpiritVerb) { + gResidue, spirit[g] = MaterializeInstance(gates[g], newSubMatterView(matter, view), &Future{}) } else { - if gcir, ok := gsyntax.(Circuit); ok && !IsVerb(gcir) { - gresidue = materializeNoun(gates[g], newSubMatterView(matter, view).Grow("Noun", gcir)) + if gCir, ok := gSyntax.(cir.Circuit); ok && !cir.IsVerb(gCir) { + gResidue = materializeNoun(gates[g], newSubMatterView(matter, view).Grow("Noun", gCir)) } else { - gresidue = route(gsyntax, gates[g], newSubMatterView(matter, view)) + gResidue = route(gSyntax, gates[g], newSubMatterView(matter, view)) } } - residue.Gate[g] = gresidue + residue.Gate[g] = gResidue } - // connect boundary synapses + return residue, spirit +} + +// connect boundary synapses +func connect(given Reflex, matter cir.Circuit, design cir.Circuit, gates map[cir.Name]Reflex) { + for vlv, s := range given { - t, ok := gates[Super][vlv] + t, ok := gates[cir.Super][vlv] if !ok { panicWithMatter(matter, "connected valve %v is not connected within circuit design %v", vlv, design) } - delete(gates[Super], vlv) + delete(gates[cir.Super], vlv) go Link(s, t) go Link(t, s) } +} + +// send residue of this circuit to all escher.Spirit reflexes +func distributeResidue(residue cir.Circuit, spirit map[cir.Name]interface{}) cir.Circuit { - // send residue of this circuit to all escher.Spirit reflexes res := CleanUp(residue) go func() { for _, f := range spirit { @@ -85,14 +93,30 @@ func materializeCircuit(given Reflex, matter Circuit) interface{} { } }() - if len(gates[Super]) > 0 { + return res +} + +// Required matter: Index, View, Circuit +func materializeCircuit(given Reflex, matter cir.Circuit) interface{} { + + design := matter.CircuitAt("Circuit") + + gates := createLinks(design) // materialize gates + + residue, spirit := calcResidue(matter, design, gates) + + connect(given, matter, design, gates) + + res := distributeResidue(residue, spirit) + + if len(gates[cir.Super]) > 0 { panicWithMatter(matter, "circuit valves left unconnected") } return res } -func newSubMatterView(matter Circuit, view Circuit) Circuit { +func newSubMatterView(matter cir.Circuit, view cir.Circuit) cir.Circuit { r := newSubMatter(matter) r.Include("View", view) return r @@ -100,14 +124,14 @@ func newSubMatterView(matter Circuit, view Circuit) Circuit { // CleanUp removes nil-valued gates and their incident edges. // CleanUp never returns nil. -func CleanUp(u Circuit) Circuit { +func CleanUp(u cir.Circuit) cir.Circuit { for n, g := range u.Gate { if g != nil { continue } delete(u.Gate, n) for vlv, vec := range u.Flow[n] { - u.Unlink(Vector{n, vlv}, vec) + u.Unlink(cir.Vector{Gate: n, Valve: vlv}, vec) } } return u diff --git a/be/be-noun.go b/be/be-noun.go index a678603..8ff8341 100644 --- a/be/be-noun.go +++ b/be/be-noun.go @@ -7,13 +7,11 @@ package be import ( - // "fmt" - - . "github.com/gocircuit/escher/circuit" + cir "github.com/hoijui/escher/circuit" ) // Required matter: Noun -func materializeNoun(given Reflex, matter Circuit) (residue interface{}) { +func materializeNoun(given Reflex, matter cir.Circuit) (residue interface{}) { noun := matter.At("Noun") for _, syn_ := range given { syn := syn_ diff --git a/be/be-system.go b/be/be-system.go index 2aa8e71..0d61926 100644 --- a/be/be-system.go +++ b/be/be-system.go @@ -10,11 +10,11 @@ import ( "fmt" "os" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/kit/runtime" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/kit/runtime" ) -func MaterializeSystem(system interface{}, index, barrier Circuit) (residue interface{}) { +func MaterializeSystem(system interface{}, index, barrier cir.Circuit) (residue interface{}) { defer func() { if r := recover(); r != nil { switch t := r.(type) { @@ -29,11 +29,11 @@ func MaterializeSystem(system interface{}, index, barrier Circuit) (residue inte } }() if barrier.IsNil() { - barrier = New() + barrier = cir.New() } - parent := New(). + parent := cir.New(). Grow("Index", index). - Grow("View", New()). + Grow("View", cir.New()). Grow("System", system). Grow("Barrier", barrier) @@ -41,12 +41,12 @@ func MaterializeSystem(system interface{}, index, barrier Circuit) (residue inte } // Required matter: Index, View -func route(design interface{}, given Reflex, matter Circuit) (residue interface{}) { +func route(design interface{}, given Reflex, matter cir.Circuit) (residue interface{}) { switch t := design.(type) { case int, float64, complex128, string: return materializeNoun(given, matter.Grow("Noun", t)) - case Circuit: - if IsVerb(t) { + case cir.Circuit: + if cir.IsVerb(t) { return materializeVerb(given, matter.Grow("Verb", t)) } else { return materializeCircuit(given, matter.Grow("Circuit", t)) diff --git a/be/be-verb.go b/be/be-verb.go index bb89ede..d41636b 100644 --- a/be/be-verb.go +++ b/be/be-verb.go @@ -7,13 +7,11 @@ package be import ( - // "fmt" - - . "github.com/gocircuit/escher/circuit" + cir "github.com/hoijui/escher/circuit" ) // Required matter: Index, View, Verb -func materializeVerb(given Reflex, matter Circuit) (residue interface{}) { +func materializeVerb(given Reflex, matter cir.Circuit) (residue interface{}) { val, verb := lookup(matter) switch verb { case "*": @@ -25,14 +23,14 @@ func materializeVerb(given Reflex, matter Circuit) (residue interface{}) { return } -func newSubMatter(matter Circuit) Circuit { - return New(). +func newSubMatter(matter cir.Circuit) cir.Circuit { + return cir.New(). Grow("Index", matter.CircuitAt("Index")). Grow("View", matter.CircuitAt("View")). Grow("Super", matter) } -func relativize(matter Circuit) []Name { +func relativize(matter cir.Circuit) []cir.Name { sup, ok := matter.CircuitOptionAt("Super") if !ok { return nil @@ -48,16 +46,16 @@ func relativize(matter Circuit) []Name { if !ok { return nil } - reladdr := Verb(supverb).Address() + reladdr := cir.Verb(supverb).Address() if len(reladdr) < 2 { return nil } return reladdr[:len(reladdr)-1] // chop off the circuit name at the end } -func lookup(matter Circuit) (interface{}, string) { +func lookup(matter cir.Circuit) (interface{}, string) { index, syntax := Index(matter.CircuitAt("Index")), matter.CircuitAt("Verb") - verb, addr := Verb(syntax).Verb().(string), Verb(syntax).Address() + verb, addr := cir.Verb(syntax).Verb().(string), cir.Verb(syntax).Address() rel := relativize(matter) var val interface{} @@ -65,15 +63,15 @@ func lookup(matter Circuit) (interface{}, string) { abs := append(rel, addr...) val = index.Recall(abs...) // lookup relative to enclosing circuit's parent circuit if val != nil { - matter.Grow("Resolved", Circuit(NewVerbAddress(verb, abs...))) + matter.Grow("Resolved", cir.Circuit(cir.NewVerbAddress(verb, abs...))) return val, verb } } val = index.Recall(addr...) // otherwise lookup globally - matter.Include("Resolved", New().Grow(0, "???")) + matter.Include("Resolved", cir.New().Grow(0, "???")) if val == nil { - panicWithMatter(matter, "dangling address %v", Verb(syntax)) + panicWithMatter(matter, "dangling address %v", cir.Verb(syntax)) } - matter.Include("Resolved", Circuit(NewVerbAddress(verb, addr...))) + matter.Include("Resolved", cir.Circuit(cir.NewVerbAddress(verb, addr...))) return val, verb } diff --git a/be/entangle.go b/be/entangle.go index a1e1902..5e3da36 100644 --- a/be/entangle.go +++ b/be/entangle.go @@ -7,10 +7,9 @@ package be import ( - // "log" "sync" - . "github.com/gocircuit/escher/circuit" + cir "github.com/hoijui/escher/circuit" ) // NewEntanglement returns two materializers that each materialize once, to @@ -25,7 +24,7 @@ type Entanglement struct { synapse *Synapse } -func (em *Entanglement) Materialize(given Reflex, _ Circuit) Value { +func (em *Entanglement) Materialize(given Reflex, _ cir.Circuit) cir.Value { em.Lock() defer em.Unlock() if len(given) != 1 { @@ -33,7 +32,7 @@ func (em *Entanglement) Materialize(given Reflex, _ Circuit) Value { } y := em.synapse em.synapse = nil - go Link(given[DefaultValve], y) + go Link(given[cir.DefaultValve], y) return nil } diff --git a/be/eye.go b/be/eye.go index 1749cbc..32ae484 100644 --- a/be/eye.go +++ b/be/eye.go @@ -7,9 +7,7 @@ package be import ( - // "fmt" - - . "github.com/gocircuit/escher/circuit" + cir "github.com/hoijui/escher/circuit" ) // Eye is a runtime facility that delivers messages by invoking gate methods and @@ -22,23 +20,23 @@ import ( // higher-level concepts of cause and effect). // type Eye struct { - show map[Name]nerve + show map[cir.Name]nerve } type nerve chan *ReCognizer type change struct { - Valve Name + Valve cir.Name Value interface{} } -type EyeCognizer func(eye *Eye, valve Name, value interface{}) +type EyeCognizer func(eye *Eye, valve cir.Name, value interface{}) func NewEye(given Reflex) (eye *Eye) { eye = &Eye{ - show: make(map[Name]nerve), + show: make(map[cir.Name]nerve), } - for vlv, _ := range given { + for vlv := range given { eye.show[vlv] = make(nerve, 1) } return @@ -57,7 +55,7 @@ func (eye *Eye) Connect(given Reflex, cog EyeCognizer) { } } -func (eye *Eye) Show(valve Name, v interface{}) { +func (eye *Eye) Show(valve cir.Name, v interface{}) { n := eye.show[valve] r := <-n defer func() { diff --git a/be/index.go b/be/index.go index a7e29d2..940471c 100644 --- a/be/index.go +++ b/be/index.go @@ -7,35 +7,32 @@ package be import ( - // "fmt" - // "log" - - . "github.com/gocircuit/escher/circuit" + cir "github.com/hoijui/escher/circuit" ) // Index is a hierarchy of names with associated meanings. // Alternatively, it is a key-value store wherein keys are sequences of names. -type Index Circuit +type Index cir.Circuit func NewIndex() Index { - return Index(New()) + return Index(cir.New()) } -func IsIndex(v Value) bool { - _, ok := v.(Circuit) +func IsIndex(v cir.Value) bool { + _, ok := v.(cir.Circuit) return ok } -func AsIndex(v Value) Index { - return Index(v.(Circuit)) +func AsIndex(v cir.Value) Index { + return Index(v.(cir.Circuit)) } -func (x Index) Recall(walk ...Name) Value { +func (x Index) Recall(walk ...cir.Name) cir.Value { if len(walk) == 0 { - return Circuit(x) + return cir.Circuit(x) } - v := Circuit(x).At(walk[0]) - if u, ok := v.(Circuit); ok && IsIndex(u) { + v := cir.Circuit(x).At(walk[0]) + if u, ok := v.(cir.Circuit); ok && IsIndex(u) { return AsIndex(u).Recall(walk[1:]...) } if len(walk) == 1 { @@ -44,12 +41,12 @@ func (x Index) Recall(walk ...Name) Value { return nil } -func (x Index) Memorize(value Value, walk ...Name) { - cx, step := Circuit(x), walk[0] +func (x Index) Memorize(value cir.Value, walk ...cir.Name) { + cx, step := cir.Circuit(x), walk[0] // y, ok := value.(Index) if ok { - value = Circuit(y) + value = cir.Circuit(y) } // if len(walk) == 1 { @@ -60,11 +57,11 @@ func (x Index) Memorize(value Value, walk ...Name) { return } if !cx.Has(step) { // next step is an index - cx.Include(step, Circuit(NewIndex())) + cx.Include(step, cir.Circuit(NewIndex())) } Index(cx.CircuitAt(step)).Memorize(value, walk[1:]...) } func (x Index) Merge(with Index) { - Circuit(x).Merge(Circuit(with)) + cir.Circuit(x).Merge(cir.Circuit(with)) } diff --git a/be/material.go b/be/material.go index efe9b8c..1cae858 100644 --- a/be/material.go +++ b/be/material.go @@ -8,12 +8,11 @@ package be import ( "fmt" - // "log" "os" "reflect" - "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/kit/runtime" + "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/kit/runtime" ) const cognizePrefix = "Cognize" @@ -101,7 +100,7 @@ func verify(matter circuit.Circuit, r gate, given Reflex) { // Verify all connected valves have dedicated handlers or there is a generic handler. ellipses := r.Ellipses.IsValid() - for vlv, _ := range given { + for vlv := range given { if ellipses { continue } @@ -111,7 +110,7 @@ func verify(matter circuit.Circuit, r gate, given Reflex) { } // Verify all dedicated valves are connected - for vlv, _ := range r.Fixed { + for vlv := range r.Fixed { if _, ok := given[vlv]; !ok { Panicf("gate valve (%v) must be connected", vlv) } diff --git a/be/noun.go b/be/noun.go index 8e21558..219e601 100644 --- a/be/noun.go +++ b/be/noun.go @@ -11,7 +11,7 @@ import ( "io" "io/ioutil" - . "github.com/gocircuit/escher/circuit" + cir "github.com/hoijui/escher/circuit" ) // Sink @@ -22,17 +22,17 @@ func NewSink() Materializer { type sink struct{} -func (sink) Spark(*Eye, Circuit, ...interface{}) Value { +func (sink) Spark(*Eye, cir.Circuit, ...interface{}) cir.Value { return nil } -func (sink) OverCognize(_ *Eye, _ Name, v interface{}) { +func (sink) OverCognize(_ *Eye, _ cir.Name, v interface{}) { SinkValue(v) } func SinkValue(v interface{}) { switch t := v.(type) { - case Circuit: + case cir.Circuit: for _, g := range t.Gate { SinkValue(g) } @@ -49,7 +49,7 @@ func NewSource(v interface{}) Materializer { return NewMaterializer(&source{}, v) } -func MaterializeSource(given Reflex, matter Circuit, v interface{}) Value { +func MaterializeSource(given Reflex, matter cir.Circuit, v interface{}) cir.Value { return Materialize(given, matter, &source{}, v) } @@ -57,11 +57,11 @@ type source struct { Value interface{} } -func (n *source) Spark(eye *Eye, matter Circuit, aux ...interface{}) Value { +func (n *source) Spark(eye *Eye, matter cir.Circuit, aux ...interface{}) cir.Value { println("spark source") n.Value = aux[0] go func() { - for vlv, _ := range matter.CircuitAt("View").Gate { + for vlv := range matter.CircuitAt("View").Gate { eye.Show(vlv, aux[0]) } }() @@ -71,7 +71,7 @@ func (n *source) Spark(eye *Eye, matter Circuit, aux ...interface{}) Value { return nil } -func (n *source) OverCognize(*Eye, Name, interface{}) {} +func (n *source) OverCognize(*Eye, cir.Name, interface{}) {} func (n *source) MaterialString(aux ...interface{}) string { return fmt.Sprintf("Source(%v)", aux[0]) @@ -80,21 +80,21 @@ func (n *source) MaterialString(aux ...interface{}) string { // Future type Future struct { eye *Eye - view Circuit + view cir.Circuit } -func (f *Future) Spark(eye *Eye, matter Circuit, _ ...interface{}) Value { +func (f *Future) Spark(eye *Eye, matter cir.Circuit, _ ...interface{}) cir.Value { f.eye = eye f.view = matter.CircuitAt("View") return nil } -func (f *Future) Charge(v Value) { +func (f *Future) Charge(v cir.Value) { go func() { - for vlv, _ := range f.view.Gate { - f.eye.Show(vlv, DeepCopy(v)) + for vlv := range f.view.Gate { + f.eye.Show(vlv, cir.DeepCopy(v)) } }() } -func (f *Future) OverCognize(*Eye, Name, interface{}) {} +func (f *Future) OverCognize(*Eye, cir.Name, interface{}) {} diff --git a/be/reflex.go b/be/reflex.go index 3af9ded..9a66e79 100644 --- a/be/reflex.go +++ b/be/reflex.go @@ -7,23 +7,22 @@ package be import ( - . "github.com/gocircuit/escher/circuit" - // . "github.com/gocircuit/escher/faculty" + cir "github.com/hoijui/escher/circuit" ) // Reflex is a bundle of not yet attached sense endpoints (synapses). -type Reflex map[Name]*Synapse +type Reflex map[cir.Name]*Synapse // -type Materializer func(Reflex, Circuit) interface{} +type Materializer func(Reflex, cir.Circuit) interface{} // Material represents a materializable object implemented as a Go type. type Material interface { - Spark(eye *Eye, matter Circuit, aux ...interface{}) Value // Initializer + Spark(eye *Eye, matter cir.Circuit, aux ...interface{}) cir.Value // Initializer } type Sparkless struct{} -func (Sparkless) Spark(eye *Eye, matter Circuit, aux ...interface{}) Value { +func (Sparkless) Spark(eye *Eye, matter cir.Circuit, aux ...interface{}) cir.Value { return nil } diff --git a/be/synapse.go b/be/synapse.go index fe8c99a..8365208 100644 --- a/be/synapse.go +++ b/be/synapse.go @@ -9,8 +9,7 @@ package be import ( // "fmt" "sync" - - // . "github.com/gocircuit/escher/circuit" + // . "github.com/hoijui/escher/circuit" ) // Cognize routines are called when a change in value is to be delivered to a reflex. diff --git a/be/union.go b/be/union.go index c7bcd02..8454489 100644 --- a/be/union.go +++ b/be/union.go @@ -3,29 +3,30 @@ // It helps future understanding of past knowledge to save // this notice, so peers of other times and backgrounds can // see history clearly. +// +// NOTE This reflex named Union (in Go) is called Fork in Escher. package be import ( - // "fmt" "log" "sync" - . "github.com/gocircuit/escher/circuit" + cir "github.com/hoijui/escher/circuit" ) type Union struct { - field []Name - flow map[Name]chan struct{} + field []cir.Name + flow map[cir.Name]chan struct{} sync.Mutex - union Circuit + union cir.Circuit } -func (u *Union) Spark(eye *Eye, matter Circuit, aux ...interface{}) Value { +func (u *Union) Spark(eye *Eye, matter cir.Circuit, aux ...interface{}) cir.Value { // check whether default valve is connected and extract names of connected non-default valves var defaultConnected bool - for vlv, _ := range matter.CircuitAt("View").Gate { - if vlv == DefaultValve { + for vlv := range matter.CircuitAt("View").Gate { + if vlv == cir.DefaultValve { defaultConnected = true } else { u.field = append(u.field, vlv) @@ -37,13 +38,13 @@ func (u *Union) Spark(eye *Eye, matter Circuit, aux ...interface{}) Value { ) } // allocate flow control channels - u.flow = make(map[Name]chan struct{}) + u.flow = make(map[cir.Name]chan struct{}) for _, f := range u.field { u.flow[f] = make(chan struct{}, 1) u.flow[f] <- struct{}{} // send initial flow tokens } // - u.union = New() + u.union = cir.New() return nil } @@ -57,16 +58,16 @@ func (u *Union) Cognize(eye *Eye, value interface{}) { log.Fatalf("Union over %v panic on %v: %v", u.field, value, r) } }() - eye.Show(f, value.(Circuit).At(f)) + eye.Show(f, value.(cir.Circuit).At(f)) y <- struct{}{} }() } - for _ = range u.field { + for range u.field { <-y } } -func (u *Union) OverCognize(eye *Eye, valve Name, value interface{}) { +func (u *Union) OverCognize(eye *Eye, valve cir.Name, value interface{}) { // log.Printf("%p u:%v %v", u, valve, value) <-u.flow[valve] // obtain flow token u.Lock() @@ -74,10 +75,10 @@ func (u *Union) OverCognize(eye *Eye, valve Name, value interface{}) { u.union.Grow(valve, value) // grow will panic, if gate already exists if u.union.Len() == len(u.field) { // flush if all the fields have been set w := u.union - u.union = New() // flush - for f, _ := range u.flow { + u.union = cir.New() // flush + for f := range u.flow { u.flow[f] <- struct{}{} // replenish flow tokens } - eye.Show(DefaultValve, w) + eye.Show(cir.DefaultValve, w) } } diff --git a/be/util.go b/be/util.go index a982e88..0f85b1f 100644 --- a/be/util.go +++ b/be/util.go @@ -11,16 +11,15 @@ import ( "fmt" "io" - . "github.com/gocircuit/escher/circuit" - // "github.com/gocircuit/escher/kit/runtime" + cir "github.com/hoijui/escher/circuit" ) type Panic struct { - Matter Circuit + Matter cir.Circuit Msg string } -func panicWithMatter(matter Circuit, format string, arg ...interface{}) { +func panicWithMatter(matter cir.Circuit, format string, arg ...interface{}) { var w bytes.Buffer fmt.Fprintf(&w, format, arg...) fmt.Fprintf(&w, "\n") @@ -34,13 +33,13 @@ func Panicf(f string, a ...interface{}) { panic(w.String()) } -func PrintableMatter(u Circuit) string { +func PrintableMatter(u cir.Circuit) string { var w bytes.Buffer PrintMatter(&w, u) return w.String() } -func PrintMatter(w io.Writer, matter Circuit) { +func PrintMatter(w io.Writer, matter cir.Circuit) { for { view, _ := matter.CircuitOptionAt("View") switch { @@ -49,13 +48,13 @@ func PrintMatter(w io.Writer, matter Circuit) { fmt.Fprintf(w, "CIRCUIT(%v)%v %v\n", PrintView(view), SummarizeIndex(matter), cir) case matter.Has("Verb"): - verb := Verb(matter.CircuitAt("Verb")) - addr := Verb(matter.CircuitAt("Resolved")) + verb := cir.Verb(matter.CircuitAt("Verb")) + addr := cir.Verb(matter.CircuitAt("Resolved")) fmt.Fprintf(w, "DIRECTIVE(%v)%v %v/%v\n", PrintView(view), SummarizeIndex(matter), verb, addr) case matter.Has("System"): system := matter.At("System") - fmt.Fprintf(w, "MATERIALIZE(%v)%v %v\n", PrintView(view), SummarizeIndex(matter), String(system)) + fmt.Fprintf(w, "MATERIALIZE(%v)%v %v\n", PrintView(view), SummarizeIndex(matter), cir.String(system)) case matter.Has("Noun"): noun := matter.At("Noun") @@ -84,7 +83,7 @@ func PrintMatter(w io.Writer, matter Circuit) { } } -func PrintView(u Circuit) string { +func PrintView(u cir.Circuit) string { var w bytes.Buffer for i, n := range u.SortedNames() { if i > 0 { @@ -95,7 +94,7 @@ func PrintView(u Circuit) string { return w.String() } -func SummarizeIndex(matter Circuit) string { +func SummarizeIndex(matter cir.Circuit) string { x := matter.CircuitAt("Index") var w bytes.Buffer w.WriteString(" Index{ ") diff --git a/circuit/all_test.go b/circuit/all_test.go index f1b3ffe..247d392 100644 --- a/circuit/all_test.go +++ b/circuit/all_test.go @@ -15,13 +15,13 @@ func TestSame(t *testing.T) { if !Same(New().Grow("x", nil), New().Grow("x", nil)) { t.Errorf("same") } - if !Same(New().Grow("x", ""), New().Grow("x", "")) { + if !Same(New().Grow("x", DefaultValve), New().Grow("x", DefaultValve)) { t.Errorf("same") } } func TestVerb(t *testing.T) { - a, b := Circuit(NewLookupVerb("abc", "d", 1)), Circuit(NewLookupVerb("abc", "d", 1)) + a, b := Circuit(NewVerbAddress("@", "abc", "d", 1)), Circuit(NewVerbAddress("@", "abc", "d", 1)) if !Same(a, b) { t.Errorf("verb same") } diff --git a/circuit/circuit.go b/circuit/circuit.go index 1386697..23a0a4c 100644 --- a/circuit/circuit.go +++ b/circuit/circuit.go @@ -7,20 +7,22 @@ package circuit import ( - // "log" "fmt" "sort" - // "math/rand" ) -// Circuit ... +// Circuit represents an Escher circuit. +// See the Escher Handbook for a description of circuits. type Circuit struct { Gate map[Name]Value Flow map[Name]map[Name]Vector // gate -> valve -> opposing gate and valve } +// Super is the super-gates name. +// The super-gate is the gate-view of the current/top-most circuit. const Super = "" +// New creates a new circuit without gates nor flows func New() Circuit { return Circuit{ Gate: make(map[Name]Value), @@ -28,23 +30,27 @@ func New() Circuit { } } +// IsNil checks whether the argument circuit is uninitialized func (u Circuit) IsNil() bool { return u.Gate == nil || u.Flow == nil } +// IsEmpty checks whether the argument circuit has no gates nor flows func (u Circuit) IsEmpty() bool { return len(u.Gate) == 0 && len(u.Flow) == 0 } +// SortedLetters returns a sorted list of all gate IDs that are strings func (u Circuit) SortedLetters() []string { x := u.Letters() sort.Strings(x) return x } +// Letters returns a list of all gate IDs that are strings func (u Circuit) Letters() []string { var l []string - for key, _ := range u.Gate { + for key := range u.Gate { if s, ok := key.(string); ok { l = append(l, s) } @@ -52,15 +58,17 @@ func (u Circuit) Letters() []string { return l } +// SortedNumbers returns a sorted list of all gate IDs that are ints func (u Circuit) SortedNumbers() []int { x := u.Numbers() sort.Ints(x) return x } +// Numbers returns a list of all gate IDs that are ints func (u Circuit) Numbers() []int { var l []int - for key, _ := range u.Gate { + for key := range u.Gate { if i, ok := key.(int); ok { l = append(l, i) } @@ -68,24 +76,29 @@ func (u Circuit) Numbers() []int { return l } +// Names returns a list of all gate IDs (whether they are string or int) func (u Circuit) Names() []Name { var r []Name - for n, _ := range u.Gate { + for n := range u.Gate { r = append(r, n) } return r } +// SortedNames returns a sorted list of all gate IDs (whether they are string or int) func (u Circuit) SortedNames() []Name { n := u.Names() SortNames(n) return n } +// Gates returns a map of gate IDs to their values func (u Circuit) Gates() map[Name]Value { return u.Gate } +// Unify creates a most simple string to narrowly identify a circuit +// by its (supplied) name and number of gates func (u Circuit) Unify(name string) string { return fmt.Sprintf("%s#%d", name, u.Len()) } diff --git a/circuit/flow.go b/circuit/flow.go index 8dc9926..ac22849 100644 --- a/circuit/flow.go +++ b/circuit/flow.go @@ -11,6 +11,8 @@ import ( "log" ) +// Link connects two different, yet unconnected valves (by vector), +// potentially from the same gate func (u Circuit) Link(x, y Vector) { if x.Gate == y.Gate && x.Valve == y.Valve { panic("self loop") @@ -34,6 +36,7 @@ func (u Circuit) valves(p Name) map[Name]Vector { return u.Flow[p] } +// Unlink removes the link between two Vectors func (u Circuit) Unlink(x, y Vector) { xs, ys := u.Flow[x.Gate], u.Flow[y.Gate] delete(xs, x.Valve) @@ -46,22 +49,30 @@ func (u Circuit) Unlink(x, y Vector) { } } +// Valves returns the list of connected valve-name +// of the gate with the supplied name, +// and their connected vectors func (u Circuit) Valves(gate Name) map[Name]Vector { return u.Flow[gate] } +// ValveNames returns the list of connected valve-names +// of the gate with the supplied name func (u Circuit) ValveNames(gate Name) []Name { var r []Name - for n, _ := range u.Flow[gate] { + for n := range u.Flow[gate] { r = append(r, n) } return r } +// Degree returns the number of connected valves +// of the gate with the supplied name func (u Circuit) Degree(gate Name) int { return len(u.Flow[gate]) } +// View returns a copy of this circuit reduced to the supplied gate func (u Circuit) View(gate Name) Circuit { x := New() for vlv, vec := range u.Flow[gate] { @@ -70,6 +81,7 @@ func (u Circuit) View(gate Name) Circuit { return x } +// Follow returns the vector linked up with the supplied vector func (u Circuit) Follow(v Vector) Vector { return u.Flow[v.Gate][v.Valve] } @@ -77,15 +89,16 @@ func (u Circuit) Follow(v Vector) Vector { func (u Circuit) Flows() (r [][2]Vector) { for xname, xview := range u.Flow { for xvalve, xvec := range xview { - r = append(r, [2]Vector{Vector{xname, xvalve}, xvec}) + r = append(r, [2]Vector{{xname, xvalve}, xvec}) } } return } +// Vol counts the number of connected valves within this circuit func (u Circuit) Vol() (vol int) { for _, view := range u.Flow { - for _ = range view { + for range view { vol++ } } diff --git a/circuit/gate.go b/circuit/gate.go index 1b0bb7d..8f15a73 100644 --- a/circuit/gate.go +++ b/circuit/gate.go @@ -11,15 +11,19 @@ import ( "log" ) +// TODO func (u Circuit) OptionAt(name Name) (Value, bool) { v, ok := u.Gate[name] return v, ok } +// At returns the value of the gate with the supplied name func (u Circuit) At(name Name) Value { return u.Gate[name] } +// IntOrZeroAt returns the value of the gate with the supplied name +// if it is an int value, 0 otherwise func (u Circuit) IntOrZeroAt(name Name) int { i, ok := u.OptionAt(name) if !ok { @@ -28,6 +32,7 @@ func (u Circuit) IntOrZeroAt(name Name) int { return i.(int) } +// TODO ... and more below func (u Circuit) NameAt(name Name) Name { return u.At(name).(Name) } @@ -40,6 +45,8 @@ func (u Circuit) FloatAt(name Name) float64 { return u.At(name).(float64) } +// FloatOrZeroAt returns the value of the gate with the supplied name +// if it is a float value, 0.0 otherwise func (u Circuit) FloatOrZeroAt(name Name) float64 { f, ok := u.OptionAt(name) if !ok { @@ -91,11 +98,11 @@ func (u Circuit) StringAt(name Name) string { func (u Circuit) StringOptionAt(name Name) (string, bool) { v, ok := u.OptionAt(name) if !ok { - return "", false + return "", false // "" means just an empty string value here } t, ok := v.(string) if !ok { - return "", false + return "", false // "" means just an empty string value here } return t, true } @@ -266,6 +273,7 @@ func (u Circuit) Exclude(name Name) (forgotten Value) { return } +// Len returns the number of gates in a circuit func (u Circuit) Len() int { return len(u.Gate) } diff --git a/circuit/irreducible.go b/circuit/irreducible.go index b5e55b4..68a584e 100644 --- a/circuit/irreducible.go +++ b/circuit/irreducible.go @@ -6,10 +6,6 @@ package circuit -import ( - // "fmt" -) - // Circuit is an irreducible. An irreducible is an object with a Copy and Same methods. func (u Circuit) Copy() Circuit { diff --git a/circuit/print.go b/circuit/print.go index 95bef68..7700090 100644 --- a/circuit/print.go +++ b/circuit/print.go @@ -11,7 +11,7 @@ import ( "fmt" "io" - . "github.com/gocircuit/escher/a" + "github.com/hoijui/escher/a" ) type Format struct { @@ -109,7 +109,7 @@ func String(v Value) string { case int, float64, complex128, bool: return fmt.Sprintf("%v", t) case string: - if IsName(t) { + if a.IsName(t) { return fmt.Sprintf("%v", t) } else { return fmt.Sprintf("%q", t) diff --git a/circuit/reflow.go b/circuit/reflow.go index 099cfc7..7aec83b 100644 --- a/circuit/reflow.go +++ b/circuit/reflow.go @@ -6,11 +6,6 @@ package circuit -import ( - // "fmt" - // "log" -) - func (u Circuit) Reflow(s, t Name) { if _, ok := u.Flow[t]; ok { panic("reflow overwrite") diff --git a/circuit/value.go b/circuit/value.go index 645c4a5..85e122a 100644 --- a/circuit/value.go +++ b/circuit/value.go @@ -13,6 +13,7 @@ import ( // Value is one of: string, int, float64, complex128, Circuit type Value interface{} +// DeepCopy creates a deep copy of the supplied value func DeepCopy(x Value) (y Value) { switch t := x.(type) { case Circuit: @@ -21,6 +22,7 @@ func DeepCopy(x Value) (y Value) { return x } +// Copy creates a shallow copy of the supplied value func Copy(x Value) (y Value) { switch t := x.(type) { case Circuit: @@ -29,6 +31,8 @@ func Copy(x Value) (y Value) { return x } +// Same checks whether the two supplied values are the same. +// See `Value.Same()` func Same(x, y Value) bool { switch t := x.(type) { case Circuit: diff --git a/circuit/vector.go b/circuit/vector.go index 23a9d2c..8a89eb1 100644 --- a/circuit/vector.go +++ b/circuit/vector.go @@ -10,12 +10,13 @@ import ( "fmt" ) -// Vector ... +// Vector identifies a specific valve of a gate (== instance of a circuit) type Vector struct { - Gate Name + Gate Name Valve Name } +// String prints a string representation of the vector func (v Vector) String() string { return fmt.Sprintf("%v:%v", v.Gate, v.Valve) } diff --git a/circuit/verb.go b/circuit/verb.go index 918c428..b6c9450 100644 --- a/circuit/verb.go +++ b/circuit/verb.go @@ -12,10 +12,10 @@ import ( "io" "strings" - . "github.com/gocircuit/escher/a" + "github.com/hoijui/escher/a" ) -// DefaultValve +// DefaultValve is the name of the default valve const DefaultValve = "" // Verb is an interpretation of a circuit. @@ -23,6 +23,8 @@ const DefaultValve = "" // The value of the empty-string gate, if present, is expected to be a string and is a ‘verb’ word. type Verb Circuit +// NewAddress returns a verb-view circuit (like an array in other languages) +// of the supplied names in the supplied order. func NewAddress(addr ...Name) Verb { x := New() for i, n := range addr { @@ -31,18 +33,21 @@ func NewAddress(addr ...Name) Verb { return Verb(x) } +// NewVerbAddress returns a verb-view circuit (like an array in other languages) +// with the given name and the supplied names in the supplied order func NewVerbAddress(verb string, addr ...Name) Verb { x := NewAddress(addr...) - x.Gate[""] = verb + x.Gate[Super] = verb return x } +// IsVerb returns true if the supplied value is a verb func IsVerb(v Value) bool { u, ok := v.(Circuit) if !ok { return false } - s, ok := u.StringOptionAt("") + s, ok := u.StringOptionAt(Super) return s == "*" || s == "@" } @@ -57,7 +62,7 @@ func (a Verb) Address() (addr []Name) { } func (a Verb) Verb() Value { - return a.Gate[""] + return a.Gate[Super] } func (a Verb) compactible() bool { @@ -88,17 +93,17 @@ func (a Verb) String() string { return a.summarize() } -func (a Verb) summarize() string { - index := Circuit(a).SortedNumbers() +func (verb Verb) summarize() string { + index := Circuit(verb).SortedNumbers() var w bytes.Buffer - if v, ok := a.Gate[""]; ok { + if v, ok := verb.Gate[Super]; ok { w.WriteString(fmt.Sprintf("%v", v)) } for _, i := range index { - x := a.Gate[i] + x := verb.Gate[i] fmt.Fprintf(&w, "%v", x) if i+1 < len(index) { - w.WriteString(RefineSymbolString) + w.WriteString(a.RefineSymbolString) } } return w.String() diff --git a/escher/main.go b/escher/main.go index 07cef05..c233897 100644 --- a/escher/main.go +++ b/escher/main.go @@ -12,29 +12,29 @@ import ( "io" "os" - . "github.com/gocircuit/escher/a" - . "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - . "github.com/gocircuit/escher/faculty" - . "github.com/gocircuit/escher/kit/fs" - kio "github.com/gocircuit/escher/kit/io" - "github.com/gocircuit/escher/see" + "github.com/hoijui/escher/a" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + fac "github.com/hoijui/escher/faculty" + "github.com/hoijui/escher/kit/fs" + kio "github.com/hoijui/escher/kit/io" + "github.com/hoijui/escher/see" // Load faculties - _ "github.com/gocircuit/escher/faculty/basic" - "github.com/gocircuit/escher/faculty/circuit" - _ "github.com/gocircuit/escher/faculty/cmplx" - _ "github.com/gocircuit/escher/faculty/escher" - _ "github.com/gocircuit/escher/faculty/http" - _ "github.com/gocircuit/escher/faculty/index" - _ "github.com/gocircuit/escher/faculty/io" - _ "github.com/gocircuit/escher/faculty/math" - _ "github.com/gocircuit/escher/faculty/model" - fos "github.com/gocircuit/escher/faculty/os" - "github.com/gocircuit/escher/faculty/test" - _ "github.com/gocircuit/escher/faculty/text" - _ "github.com/gocircuit/escher/faculty/time" - _ "github.com/gocircuit/escher/faculty/yield" + _ "github.com/hoijui/escher/faculty/basic" + "github.com/hoijui/escher/faculty/circuit" + _ "github.com/hoijui/escher/faculty/cmplx" + _ "github.com/hoijui/escher/faculty/escher" + _ "github.com/hoijui/escher/faculty/http" + _ "github.com/hoijui/escher/faculty/index" + _ "github.com/hoijui/escher/faculty/io" + _ "github.com/hoijui/escher/faculty/math" + _ "github.com/hoijui/escher/faculty/model" + fos "github.com/hoijui/escher/faculty/os" + "github.com/hoijui/escher/faculty/test" + _ "github.com/hoijui/escher/faculty/text" + _ "github.com/hoijui/escher/faculty/time" + _ "github.com/hoijui/escher/faculty/yield" ) // usage: escher [-a dir] [-show] address arguments... @@ -65,18 +65,18 @@ func main() { test.Init(*flagSrc) circuit.Init(*flagDiscover) // - index := Root() + index := fac.Root() if *flagSrc != "" { - index.Merge(Load(*flagSrc)) + index.Merge(fs.Load(*flagSrc)) } // run main if flagMain != "" { verb := see.ParseVerb(flagMain) - if Circuit(verb).IsNil() { + if cir.Circuit(verb).IsNil() { fmt.Fprintf(os.Stderr, "verb not recognized\n") os.Exit(1) } - exec(index, Circuit(verb), false) + exec(index, cir.Circuit(verb), false) } // standard loop r := kio.NewChunkReader(os.Stdin) @@ -88,20 +88,20 @@ func main() { break } } - src := NewSrcString(string(chunk)) + src := a.NewSrcString(string(chunk)) for src.Len() > 0 { u := see.SeeChamber(src) - if u == nil || u.(Circuit).Len() == 0 { + if u == nil || u.(cir.Circuit).Len() == 0 { break } fmt.Fprintf(os.Stderr, "MATERIALIZING %v\n", u) - exec(index, u.(Circuit), true) + exec(index, u.(cir.Circuit), true) } } } -func exec(index Index, verb Circuit, showResidue bool) { - residue := MaterializeSystem(Circuit(verb), Circuit(index), New().Grow("Main", New())) +func exec(index be.Index, verb cir.Circuit, showResidue bool) { + residue := be.MaterializeSystem(cir.Circuit(verb), cir.Circuit(index), cir.New().Grow("Main", cir.New())) if showResidue { fmt.Fprintf(os.Stderr, "RESIDUE %v\n\n", residue) } diff --git a/escher/panic.go b/escher/panic.go index 38999cc..b63cabf 100644 --- a/escher/panic.go +++ b/escher/panic.go @@ -25,7 +25,7 @@ func InstallCtrlCPanic() { //defer SavePanicTrace() ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt) - for _ = range ch { + for range ch { println("ctrl/c") // prof := pprof.Lookup("goroutine") // prof.WriteTo(os.Stderr, 2) diff --git a/faculty/basic/alternate.go b/faculty/basic/alternate.go index ad9f92d..e1a6129 100644 --- a/faculty/basic/alternate.go +++ b/faculty/basic/alternate.go @@ -7,10 +7,8 @@ package basic import ( - // "fmt" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type Alternate struct { @@ -20,14 +18,14 @@ type Alternate struct { // SX -> TX // SY -> TY -func (a *Alternate) Spark(eye *be.Eye, _ Circuit, aux ...interface{}) Value { +func (a *Alternate) Spark(eye *be.Eye, _ cir.Circuit, aux ...interface{}) cir.Value { a.flow = make([]chan struct{}, 2) a.flow[0] = make(chan struct{}, 1) a.flow[1] = make(chan struct{}, 1) return nil } -func (a *Alternate) OverCognize(eye *be.Eye, valve Name, value interface{}) { +func (a *Alternate) OverCognize(eye *be.Eye, valve cir.Name, value interface{}) { switch valve.(string) { case "SX": a.flow[0] <- struct{}{} // obtain token to send diff --git a/faculty/basic/arithmetic.go b/faculty/basic/arithmetic.go index 213777c..340472a 100644 --- a/faculty/basic/arithmetic.go +++ b/faculty/basic/arithmetic.go @@ -7,14 +7,11 @@ package basic import ( - // "fmt" "strconv" - // "sync" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" - // "github.com/gocircuit/escher/kit/plumb" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" ) func init() { @@ -25,7 +22,7 @@ func init() { // IntString type IntString struct{} -func (IntString) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (IntString) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { return IntString{} } diff --git a/faculty/basic/basic.go b/faculty/basic/basic.go index 1800cd2..cc67ce9 100644 --- a/faculty/basic/basic.go +++ b/faculty/basic/basic.go @@ -9,9 +9,9 @@ package basic import ( // "fmt" - "github.com/gocircuit/escher/be" - // . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + // . "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" ) func init() { diff --git a/faculty/basic/grow.go b/faculty/basic/grow.go index dc3d1ab..6c27c69 100644 --- a/faculty/basic/grow.go +++ b/faculty/basic/grow.go @@ -7,20 +7,19 @@ package basic import ( - // "fmt" "sync" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type Grow struct { sync.Mutex - u Circuit + u cir.Circuit } -func (g *Grow) Spark(*be.Eye, Circuit, ...interface{}) Value { - g.u = New() +func (g *Grow) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { + g.u = cir.New() return &Grow{} } @@ -41,7 +40,7 @@ func (g *Grow) CognizeValue(eye *be.Eye, v interface{}) { func (g *Grow) CognizeImg(eye *be.Eye, v interface{}) { g.Lock() defer g.Unlock() - g.u.ReGrow("Img", v.(Circuit)) + g.u.ReGrow("Img", v.(cir.Circuit)) g.fire(eye) } @@ -51,5 +50,5 @@ func (g *Grow) fire(eye *be.Eye) { if g.u.Len() != 3 { return } - eye.Show("", g.u.CircuitAt("Img").Copy().ReGrow(g.u.At("Key"), g.u.At("Value"))) + eye.Show(cir.DefaultValve, g.u.CircuitAt("Img").Copy().ReGrow(g.u.At("Key"), g.u.At("Value"))) } diff --git a/faculty/basic/lens.go b/faculty/basic/lens.go index a93616f..6f305dc 100644 --- a/faculty/basic/lens.go +++ b/faculty/basic/lens.go @@ -7,33 +7,32 @@ package basic import ( - // "fmt" "sync" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type Lens struct { - valve []Name + valve []cir.Name sync.Mutex - history Circuit // histories from both valves { ValveOne { … }, ValveTwo { … } } + history cir.Circuit // histories from both valves { ValveOne { … }, ValveTwo { … } } } -func (g *Lens) Spark(eye *be.Eye, matter Circuit, aux ...interface{}) Value { +func (g *Lens) Spark(eye *be.Eye, matter cir.Circuit, aux ...interface{}) cir.Value { mvg := matter.CircuitAt("View").Gate if len(mvg) < 1 || len(mvg) > 2 { panic("lens can have one or two endpoints") } - g.history = New() - for vlv, _ := range mvg { + g.history = cir.New() + for vlv := range mvg { g.valve = append(g.valve, vlv) - g.history.Grow(vlv, New()) + g.history.Grow(vlv, cir.New()) } return g // return self in residual to expose query interface } -func (g *Lens) OverCognize(eye *be.Eye, valve Name, value interface{}) { +func (g *Lens) OverCognize(eye *be.Eye, valve cir.Name, value interface{}) { g.remember(valve, value) for _, v := range g.valve { if v != valve { @@ -42,15 +41,15 @@ func (g *Lens) OverCognize(eye *be.Eye, valve Name, value interface{}) { } } -func (g *Lens) remember(valve Name, value Value) { +func (g *Lens) remember(valve cir.Name, value cir.Value) { g.Lock() defer g.Unlock() h := g.history.CircuitAt(valve) // valve history circuit - h.Grow(h.Len(), DeepCopy(value)) + h.Grow(h.Len(), cir.DeepCopy(value)) } -func (g *Lens) Peek() Circuit { +func (g *Lens) Peek() cir.Circuit { g.Lock() defer g.Unlock() - return DeepCopy(g.history).(Circuit) + return cir.DeepCopy(g.history).(cir.Circuit) } diff --git a/faculty/basic/oneway.go b/faculty/basic/oneway.go index 3812ede..0fad1a3 100644 --- a/faculty/basic/oneway.go +++ b/faculty/basic/oneway.go @@ -7,17 +7,15 @@ package basic import ( - // "fmt" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type OneWayDoor struct { // :From :To :Door flow chan struct{} } -func (w *OneWayDoor) Spark(eye *be.Eye, _ Circuit, aux ...interface{}) Value { +func (w *OneWayDoor) Spark(eye *be.Eye, _ cir.Circuit, aux ...interface{}) cir.Value { w.flow = make(chan struct{}) return nil } diff --git a/faculty/basic/repeat.go b/faculty/basic/repeat.go index 8bbf92a..5d595d7 100644 --- a/faculty/basic/repeat.go +++ b/faculty/basic/repeat.go @@ -7,21 +7,19 @@ package basic import ( - // "fmt" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type Repeat struct{} -func (Repeat) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (Repeat) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { return nil } func (Repeat) CognizeValue(eye *be.Eye, value interface{}) { for { - eye.Show(DefaultValve, value) + eye.Show(cir.DefaultValve, value) } } diff --git a/faculty/basic/star.go b/faculty/basic/star.go index 5336b40..b8683e1 100644 --- a/faculty/basic/star.go +++ b/faculty/basic/star.go @@ -10,9 +10,9 @@ import ( "bytes" "fmt" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" ) func init() { @@ -22,14 +22,14 @@ func init() { faculty.Register(be.NewMaterializer(&Star{}, StarFunc(show2)), "e", "Show2") } -type StarFunc func(Name, interface{}) +type StarFunc func(cir.Name, interface{}) type Star struct { f StarFunc - view Circuit + view cir.Circuit } -func (s *Star) Spark(_ *be.Eye, matter Circuit, aux ...interface{}) Value { +func (s *Star) Spark(_ *be.Eye, matter cir.Circuit, aux ...interface{}) cir.Value { s.view = matter.CircuitAt("View") if len(aux) == 1 { s.f = aux[0].(StarFunc) @@ -37,11 +37,11 @@ func (s *Star) Spark(_ *be.Eye, matter Circuit, aux ...interface{}) Value { return nil } -func (s *Star) OverCognize(eye *be.Eye, name Name, value interface{}) { +func (s *Star) OverCognize(eye *be.Eye, name cir.Name, value interface{}) { if s.f != nil { s.f(name, value) } - for gn_, _ := range s.view.Gate { + for gn_ := range s.view.Gate { gn := gn_ if gn == name { continue @@ -50,18 +50,18 @@ func (s *Star) OverCognize(eye *be.Eye, name Name, value interface{}) { } } -func show(name Name, v interface{}) { - fmt.Printf("Showing:%v = %v\n", name, String(v)) +func show(name cir.Name, v interface{}) { + fmt.Printf("Showing:%v = %v\n", name, cir.String(v)) } -func show1(name Name, v interface{}) { +func show1(name cir.Name, v interface{}) { var w bytes.Buffer - Print(&w, Format{"", "\t", 1}, v) + cir.Print(&w, cir.Format{"", "\t", 1}, v) fmt.Printf("Showing:%v = %v\n", name, w.String()) } -func show2(name Name, v interface{}) { +func show2(name cir.Name, v interface{}) { var w bytes.Buffer - Print(&w, Format{"", "\t", 2}, v) + cir.Print(&w, cir.Format{"", "\t", 2}, v) fmt.Printf("Showing:%v = %v\n", name, w.String()) } diff --git a/faculty/basic/switch.go b/faculty/basic/switch.go index b1bcbe1..29471d2 100644 --- a/faculty/basic/switch.go +++ b/faculty/basic/switch.go @@ -7,11 +7,9 @@ package basic import ( - // "fmt" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" ) func init() { @@ -19,18 +17,18 @@ func init() { } type Switch struct { - view Circuit + view cir.Circuit } -func (s *Switch) Spark(_ *be.Eye, matter Circuit, _ ...interface{}) Value { +func (s *Switch) Spark(_ *be.Eye, matter cir.Circuit, _ ...interface{}) cir.Value { s.view = matter.CircuitAt("View") return nil } func (s *Switch) Cognize(eye *be.Eye, value interface{}) { switch t := value.(type) { - case Circuit: - if IsVerb(t) { + case cir.Circuit: + if cir.IsVerb(t) { eye.Show("Verb", value) return } @@ -60,4 +58,4 @@ func (s *Switch) Cognize(eye *be.Eye, value interface{}) { } } -func (s *Switch) OverCognize(eye *be.Eye, name Name, value interface{}) {} +func (s *Switch) OverCognize(eye *be.Eye, name cir.Name, value interface{}) {} diff --git a/faculty/circuit/circuit.go b/faculty/circuit/circuit.go index 21ba4da..6afd8f5 100644 --- a/faculty/circuit/circuit.go +++ b/faculty/circuit/circuit.go @@ -13,9 +13,9 @@ import ( "strconv" "time" - "github.com/gocircuit/circuit/client" - "github.com/gocircuit/escher/be" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/circuit/client" + "github.com/hoijui/escher/be" + "github.com/hoijui/escher/faculty" ) // client *client.Client diff --git a/faculty/circuit/docker.go b/faculty/circuit/docker.go index 9e40ed7..42fced1 100644 --- a/faculty/circuit/docker.go +++ b/faculty/circuit/docker.go @@ -12,10 +12,10 @@ import ( "log" "sync" - dkr "github.com/gocircuit/circuit/client/docker" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/kit/plumb" + dkr "github.com/hoijui/circuit/client/docker" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/kit/plumb" ) // Docker @@ -26,7 +26,7 @@ type Docker struct { spawn chan interface{} // notify loop of spawn memes } -func (p *Docker) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (p *Docker) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { p.spawn = make(chan interface{}) return nil } @@ -46,7 +46,7 @@ func (p *Docker) CognizeCommand(eye *be.Eye, dvalue interface{}) { func (p *Docker) CognizeSpawn(eye *be.Eye, dvalue interface{}) { p.spawn <- dvalue - log.Printf("circuit container spawning (%v)", Linearize(fmt.Sprintf("%v", dvalue))) + log.Printf("circuit container spawning (%v)", cir.Linearize(fmt.Sprintf("%v", dvalue))) } func (p *Docker) CognizeExit(eye *be.Eye, dvalue interface{}) {} @@ -75,7 +75,7 @@ func (p *Docker) CognizeIO(eye *be.Eye, dvalue interface{}) {} // } // func cognizeDockerCommand(v interface{}) *dkr.Run { - img, ok := v.(Circuit) + img, ok := v.(cir.Circuit) if !ok { panic(fmt.Sprintf("non-image sent as circuit container command (%v)", v)) } @@ -114,7 +114,7 @@ func cognizeDockerCommand(v interface{}) *dkr.Run { cmd.Args = append(cmd.Args, args.StringAt(key)) } } - log.Printf("circuit docker command %v", QuickPrint("", "t", -1, img)) + log.Printf("circuit docker command %v", cir.QuickPrint("", "t", -1, img)) return cmd } @@ -127,7 +127,7 @@ type dockerBack struct { func (p *dockerBack) loop() { for { spwn := <-p.spawn - x := New().Grow("Spawn", spwn) + x := cir.New().Grow("Spawn", spwn) if exit := p.spawnDocker(spwn); exit != nil { x.Grow("Exit", 1) p.eye.Show("Exit", x) @@ -135,13 +135,13 @@ func (p *dockerBack) loop() { x.Grow("Exit", 0) p.eye.Show("Exit", x) } - log.Printf("circuit container exit meme sent (%v)", Linearize(fmt.Sprintf("%v", x))) + log.Printf("circuit container exit meme sent (%v)", cir.Linearize(fmt.Sprintf("%v", x))) } } func (p *dockerBack) spawnDocker(spwn interface{}) error { // anchor determination - s := spwn.(Circuit) + s := spwn.(cir.Circuit) anchor := program.Client.Walk( []string{ s.StringAt("Server"), // server name @@ -153,19 +153,19 @@ func (p *dockerBack) spawnDocker(spwn interface{}) error { log.Fatalf("container spawn error (%v)", err) } defer anchor.Scrub() // Anchor will be scrubbed before the exit meme is sent out - g := New(). + g := cir.New(). Grow("Spawn", spwn). Grow("Stdin", container.Stdin()). Grow("Stdout", container.Stdout()). Grow("Stderr", container.Stderr()) - log.Printf("circuit docker io (%v)", Linearize(fmt.Sprintf("%v", spwn))) + log.Printf("circuit docker io (%v)", cir.Linearize(fmt.Sprintf("%v", spwn))) p.eye.Show("IO", g) - log.Printf("circuit docker waiting (%v)", Linearize(fmt.Sprintf("%v", spwn))) + log.Printf("circuit docker waiting (%v)", cir.Linearize(fmt.Sprintf("%v", spwn))) stat, err := container.Wait() if err != nil { panic("circuit container wait aborted by user") } - log.Printf("circuit container (%v) exited", Linearize(fmt.Sprintf("%v", spwn))) + log.Printf("circuit container (%v) exited", cir.Linearize(fmt.Sprintf("%v", spwn))) var exit error if stat.State.ExitCode != 0 { exit = errors.New(fmt.Sprintf("circuit container exit code: %d", stat.State.ExitCode)) diff --git a/faculty/circuit/proc.go b/faculty/circuit/proc.go index 3eedf87..ee4b572 100644 --- a/faculty/circuit/proc.go +++ b/faculty/circuit/proc.go @@ -11,9 +11,9 @@ import ( "log" "sync" - "github.com/gocircuit/circuit/client" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/circuit/client" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) // Process @@ -22,7 +22,7 @@ type Process struct { spawn chan interface{} // notify loop of spawn memes } -func (p *Process) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (p *Process) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { p.spawn = make(chan interface{}) return nil } @@ -42,7 +42,7 @@ func (p *Process) CognizeCommand(eye *be.Eye, dvalue interface{}) { func (p *Process) CognizeSpawn(eye *be.Eye, dvalue interface{}) { p.spawn <- dvalue - log.Printf("circuit process spawning (%v)", String(dvalue)) + log.Printf("circuit process spawning (%v)", cir.String(dvalue)) } func (p *Process) CognizeExit(eye *be.Eye, dvalue interface{}) {} @@ -60,7 +60,7 @@ func (p *Process) CognizeIO(eye *be.Eye, dvalue interface{}) {} // } // func cognizeProcessCommand(v interface{}) *client.Cmd { - img, ok := v.(Circuit) + img, ok := v.(cir.Circuit) if !ok { panic(fmt.Sprintf("Non-image sent to Process.Command (%v)", v)) } @@ -79,7 +79,7 @@ func cognizeProcessCommand(v interface{}) *client.Cmd { cmd.Args = append(cmd.Args, args.StringAt(key)) } } - log.Printf("circuit process command (%v)", QuickPrint("", "t", -1, img)) + log.Printf("circuit process command (%v)", cir.QuickPrint("", "t", -1, img)) return cmd } @@ -92,7 +92,7 @@ type processBack struct { func (p *processBack) loop() { for { spwn := <-p.spawn - x := New().Grow("Spawn", spwn) + x := cir.New().Grow("Spawn", spwn) if exit := p.spawnProcess(spwn); exit != nil { x.Grow("Exit", 1) p.eye.Show("Exit", x) @@ -100,13 +100,13 @@ func (p *processBack) loop() { x.Grow("Exit", 0) p.eye.Show("Exit", x) } - log.Printf("circuit process exit meme sent (%v)", Linearize(fmt.Sprintf("%v", x))) + log.Printf("circuit process exit meme sent (%v)", cir.Linearize(fmt.Sprintf("%v", x))) } } func (p *processBack) spawnProcess(spwn interface{}) error { // anchor determination - s := spwn.(Circuit) + s := spwn.(cir.Circuit) anchor := program.Client.Walk( []string{ s.StringAt("Server"), // server name @@ -118,19 +118,19 @@ func (p *processBack) spawnProcess(spwn interface{}) error { panic("invalid command argument") } defer anchor.Scrub() - g := New(). + g := cir.New(). Grow("Spawn", spwn). Grow("Stdin", proc.Stdin()). Grow("Stdout", proc.Stdout()). Grow("Stderr", proc.Stderr()) - log.Printf("circuit process io (%v)", Linearize(fmt.Sprintf("%v", spwn))) + log.Printf("circuit process io (%v)", cir.Linearize(fmt.Sprintf("%v", spwn))) p.eye.Show("IO", g) - log.Printf("circuit process waiting (%v)", Linearize(fmt.Sprintf("%v", spwn))) + log.Printf("circuit process waiting (%v)", cir.Linearize(fmt.Sprintf("%v", spwn))) stat, err := proc.Wait() if err != nil { panic("process wait aborted by user") } - log.Printf("circuit process (%v) exited", Linearize(fmt.Sprintf("%v", spwn))) + log.Printf("circuit process (%v) exited", cir.Linearize(fmt.Sprintf("%v", spwn))) if stat.Exit != nil { log.Printf("circuit process exit error: %v", stat.Exit) } diff --git a/faculty/cmplx/planar.go b/faculty/cmplx/planar.go index b845953..4992906 100644 --- a/faculty/cmplx/planar.go +++ b/faculty/cmplx/planar.go @@ -7,11 +7,9 @@ package cmplx import ( - // "math/cmplx" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" ) func init() { @@ -22,15 +20,15 @@ func init() { // Planar type Planar struct{} -func (Planar) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (Planar) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { return nil } func (Planar) CognizeComplex(eye *be.Eye, v interface{}) { - eye.Show("Planar", New().Grow("X", real(v.(complex128))).Grow("Y", imag(v.(complex128)))) + eye.Show("Planar", cir.New().Grow("X", real(v.(complex128))).Grow("Y", imag(v.(complex128)))) } func (Planar) CognizePlanar(eye *be.Eye, v interface{}) { - x := v.(Circuit) + x := v.(cir.Circuit) eye.Show("Complex", complex(x.FloatAt("X"), x.FloatAt("Y"))) } diff --git a/faculty/cmplx/polar.go b/faculty/cmplx/polar.go index 0192a1c..ae44b1f 100644 --- a/faculty/cmplx/polar.go +++ b/faculty/cmplx/polar.go @@ -9,23 +9,23 @@ package cmplx import ( "math/cmplx" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) // Polar type Polar struct{} -func (Polar) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (Polar) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { return nil } func (Polar) CognizeComplex(eye *be.Eye, v interface{}) { r, theta := cmplx.Polar(v.(complex128)) - eye.Show("Polar", New().Grow("R", r).Grow("Theta", theta)) + eye.Show("Polar", cir.New().Grow("R", r).Grow("Theta", theta)) } func (Polar) CognizePolar(eye *be.Eye, v interface{}) { - x := v.(Circuit) + x := v.(cir.Circuit) eye.Show("Complex", cmplx.Rect(x.FloatAt("R"), x.FloatAt("Theta"))) } diff --git a/faculty/escher/break.go b/faculty/escher/break.go index 5109609..7ee8b5c 100644 --- a/faculty/escher/break.go +++ b/faculty/escher/break.go @@ -7,15 +7,12 @@ package escher import ( - // "fmt" - // "log" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type Breakpoint struct{ be.Sparkless } -func (Breakpoint) OverCognize(eye *be.Eye, valve Name, value interface{}) { +func (Breakpoint) OverCognize(eye *be.Eye, valve cir.Name, value interface{}) { panic("Escher breakpoint") } diff --git a/faculty/escher/escher.go b/faculty/escher/escher.go index 7616f06..b5c5bae 100644 --- a/faculty/escher/escher.go +++ b/faculty/escher/escher.go @@ -9,9 +9,8 @@ package escher import ( // "fmt" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + "github.com/hoijui/escher/faculty" ) func init() { diff --git a/faculty/escher/help.go b/faculty/escher/help.go index 1dec574..078ab5d 100644 --- a/faculty/escher/help.go +++ b/faculty/escher/help.go @@ -11,15 +11,15 @@ import ( "fmt" "os" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type Help struct { - index Circuit + index cir.Circuit } -func (h *Help) Spark(_ *be.Eye, matter Circuit, _ ...interface{}) Value { +func (h *Help) Spark(_ *be.Eye, matter cir.Circuit, _ ...interface{}) cir.Value { h.index = matter.CircuitAt("Index") return nil } @@ -30,9 +30,9 @@ func (h *Help) Cognize(eye *be.Eye, v interface{}) { func (h *Help) value(v interface{}) { switch u := v.(type) { - case Circuit: - if IsVerb(u) { - fmt.Fprintf(os.Stderr, "\nThis is a verb constant equal to %v\n\n", Verb(u)) + case cir.Circuit: + if cir.IsVerb(u) { + fmt.Fprintf(os.Stderr, "\nThis is a verb constant equal to %v\n\n", cir.Verb(u)) } else { h.circuit(u) } @@ -49,16 +49,16 @@ func (h *Help) value(v interface{}) { } } -func (h *Help) circuit(u Circuit) { +func (h *Help) circuit(u cir.Circuit) { var w bytes.Buffer fmt.Fprintf(&w, "\nWe are looking at a circuit design \n%v\n\n", u) - valves := u.ValveNames(Super) + valves := u.ValveNames(cir.Super) if len(valves) == 0 { fmt.Fprintf(&w, "The circuit has no super valves.\n\n") } else { fmt.Fprintf(&w, "The circuit has %d super valve(s) ", len(valves)) - SortNames(valves) + cir.SortNames(valves) for _, vn := range valves { fmt.Fprintf(&w, ":%v ", vn) } diff --git a/faculty/escher/index.go b/faculty/escher/index.go index f99731d..56e0f8a 100644 --- a/faculty/escher/index.go +++ b/faculty/escher/index.go @@ -7,20 +7,18 @@ package escher import ( - // "log" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) // The index gate is a design for a source reflex that returns a copy of the // index contextual to the materialization of this gate. type Index struct{} -func (Index) Spark(eye *be.Eye, matter Circuit, aux ...interface{}) Value { +func (Index) Spark(eye *be.Eye, matter cir.Circuit, aux ...interface{}) cir.Value { index, view := matter.CircuitAt("Index"), matter.CircuitAt("View") go func() { - for vlv, _ := range view.Gate { + for vlv := range view.Gate { eye.Show(vlv, index) } }() @@ -30,4 +28,4 @@ func (Index) Spark(eye *be.Eye, matter Circuit, aux ...interface{}) Value { return nil } -func (Index) OverCognize(*be.Eye, Name, interface{}) {} +func (Index) OverCognize(*be.Eye, cir.Name, interface{}) {} diff --git a/faculty/escher/parse.go b/faculty/escher/parse.go index e901dc2..4aca160 100644 --- a/faculty/escher/parse.go +++ b/faculty/escher/parse.go @@ -9,11 +9,10 @@ package escher import ( // "log" - "github.com/gocircuit/escher/a" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/kit/plumb" - "github.com/gocircuit/escher/see" + "github.com/hoijui/escher/a" + "github.com/hoijui/escher/be" + "github.com/hoijui/escher/kit/plumb" + "github.com/hoijui/escher/see" ) type Parse struct{ be.Sparkless } diff --git a/faculty/escher/system.go b/faculty/escher/system.go index 2fc97a8..dd8a72d 100644 --- a/faculty/escher/system.go +++ b/faculty/escher/system.go @@ -10,8 +10,8 @@ import ( // "fmt" // "log" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + . "github.com/hoijui/escher/circuit" ) type System struct { diff --git a/faculty/http/request.go b/faculty/http/request.go index 4308cbf..8f84684 100644 --- a/faculty/http/request.go +++ b/faculty/http/request.go @@ -7,22 +7,21 @@ package http import ( - // "fmt" "net/http" "strings" - . "github.com/gocircuit/escher/circuit" + cir "github.com/hoijui/escher/circuit" ) // requestCircuit converts an http.Request object into a data circuit representation -func requestCircuit(req *http.Request) Circuit { - x := New() +func requestCircuit(req *http.Request) cir.Circuit { + x := cir.New() // HTTP method x.Gate["Method"] = req.Method // URL path - var nn []Name + var nn []cir.Name parts := strings.Split(req.URL.Path, "/") if len(parts) > 0 && parts[0] == "" { parts = parts[1:] @@ -33,10 +32,10 @@ func requestCircuit(req *http.Request) Circuit { for _, n := range parts { nn = append(nn, n) } - x.Gate["Path"] = NewAddress(nn...) + x.Gate["Path"] = cir.NewAddress(nn...) // URL query - v := New() + v := cir.New() for k, ss := range req.URL.Query() { v.Gate[k] = sliceCircuit(ss) } @@ -45,8 +44,8 @@ func requestCircuit(req *http.Request) Circuit { return x } -func sliceCircuit(ss []string) Circuit { - x := New() +func sliceCircuit(ss []string) cir.Circuit { + x := cir.New() for i, v := range ss { x.Gate[i] = v } diff --git a/faculty/http/response.go b/faculty/http/response.go index cd3cb43..a43ca29 100644 --- a/faculty/http/response.go +++ b/faculty/http/response.go @@ -13,12 +13,11 @@ import ( "io/ioutil" "net/http" - // "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + cir "github.com/hoijui/escher/circuit" ) // cognizeResponse reads the circuit response u and fills in the http header object, returning the status and body. -func (s *Server) cognizeResponse(header http.Header, u Circuit) (status int, body io.ReadCloser, ok bool) { +func (s *Server) cognizeResponse(header http.Header, u cir.Circuit) (status int, body io.ReadCloser, ok bool) { // Status if status, ok = u.IntOptionAt("Status"); !ok { @@ -26,7 +25,7 @@ func (s *Server) cognizeResponse(header http.Header, u Circuit) (status int, bod } // Header - var h Circuit + var h cir.Circuit if h, ok = u.CircuitOptionAt("Header"); !ok { return } @@ -39,7 +38,7 @@ func (s *Server) cognizeResponse(header http.Header, u Circuit) (status int, bod } // Body gate should be convertible to string - var v Value + var v cir.Value if v, ok = u.OptionAt("Body"); !ok { return } @@ -70,7 +69,7 @@ func (s *Server) cognizeResponse(header http.Header, u Circuit) (status int, bod return } -func circuitSlice(u Circuit) []string { +func circuitSlice(u cir.Circuit) []string { var ss []string for _, j := range u.SortedNumbers() { ss = append(ss, fmt.Sprintf("%v", u.At(j))) diff --git a/faculty/http/server.go b/faculty/http/server.go index 02e5d6a..311c60e 100644 --- a/faculty/http/server.go +++ b/faculty/http/server.go @@ -11,9 +11,9 @@ import ( "net/http" "sync" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" ) func init() { @@ -22,13 +22,13 @@ func init() { type Server struct { eye *be.Eye - matter Circuit + matter cir.Circuit sync.Mutex server *http.Server throttle chan struct{} } -func (s *Server) Spark(eye *be.Eye, matter Circuit, aux ...interface{}) Value { +func (s *Server) Spark(eye *be.Eye, matter cir.Circuit, aux ...interface{}) cir.Value { s.eye, s.matter = eye, matter const throttle = 50 s.throttle = make(chan struct{}, throttle) @@ -44,7 +44,7 @@ func (s *Server) CognizeStart(eye *be.Eye, value interface{}) { s.Lock() defer s.Unlock() // - u := value.(Circuit) + u := value.(cir.Circuit) if s.server != nil { panic("server running") } @@ -70,7 +70,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { defer func() { ch <- struct{}{} // release throttle token when request/response complete }() - status, body, ok := s.cognizeResponse(w.Header(), v.(Circuit)) + status, body, ok := s.cognizeResponse(w.Header(), v.(cir.Circuit)) if !ok { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("Escher web server: App error.")) @@ -83,7 +83,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { ) s.eye.Show( "RequestResponse", - New(). + cir.New(). Grow("Request", requestCircuit(req)). Grow("Respond", yy), ) diff --git a/faculty/index/index.go b/faculty/index/index.go index 0c833f0..fe0cb6c 100644 --- a/faculty/index/index.go +++ b/faculty/index/index.go @@ -8,8 +8,8 @@ package index import ( - "github.com/gocircuit/escher/be" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + "github.com/hoijui/escher/faculty" ) func init() { diff --git a/faculty/index/lookup.go b/faculty/index/lookup.go index 1e62c76..47ff4c9 100644 --- a/faculty/index/lookup.go +++ b/faculty/index/lookup.go @@ -7,25 +7,23 @@ package index import ( - // "fmt" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type Lookup struct{} -func (Lookup) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (Lookup) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { return nil } func (Lookup) CognizeView(eye *be.Eye, v interface{}) { - u := v.(Circuit) + u := v.(cir.Circuit) x := u.CircuitAt("Index") addr := u.VerbAt("Address") r := be.AsIndex(x).Recall(addr.Address()...) if r == nil { - eye.Show("NotFound", New().Grow("NotFound", Circuit(addr)).Grow("In", x)) + eye.Show("NotFound", cir.New().Grow("NotFound", cir.Circuit(addr)).Grow("In", x)) } else { eye.Show("Found", r) } diff --git a/faculty/index/mirror.go b/faculty/index/mirror.go index 3119abd..d663c75 100644 --- a/faculty/index/mirror.go +++ b/faculty/index/mirror.go @@ -9,8 +9,8 @@ package index import ( "fmt" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) // The Mirror gate recursively transforms an input circuit into one wherein @@ -21,29 +21,29 @@ import ( type Mirror struct{ be.Sparkless } func (Mirror) CognizeIndex(eye *be.Eye, v interface{}) { - eye.Show(DefaultValve, MirrorIndex(v.(Circuit), nil)) + eye.Show(cir.DefaultValve, MirrorIndex(v.(cir.Circuit), nil)) } func (Mirror) Cognize(eye *be.Eye, v interface{}) {} -func MirrorIndex(u Circuit, addr []Name) Circuit { - r := New() - for n, v := range Circuit(u).Gate { +func MirrorIndex(u cir.Circuit, addr []cir.Name) cir.Circuit { + r := cir.New() + for n, v := range cir.Circuit(u).Gate { switch t := v.(type) { - case Circuit: + case cir.Circuit: r.Include(n, MirrorIndex(t, append(addr, n))) case be.Materializer: - r.Include(n, be.NewSource(NewAddress(append(addr, n)...))) + r.Include(n, be.NewSource(cir.NewAddress(append(addr, n)...))) case int: - r.Include(n, be.NewSource(NewAddress("int"))) + r.Include(n, be.NewSource(cir.NewAddress("int"))) case float64: - r.Include(n, be.NewSource(NewAddress("float"))) + r.Include(n, be.NewSource(cir.NewAddress("float"))) case complex128: - r.Include(n, be.NewSource(NewAddress("complex"))) + r.Include(n, be.NewSource(cir.NewAddress("complex"))) case string: - r.Include(n, be.NewSource(NewAddress("string"))) + r.Include(n, be.NewSource(cir.NewAddress("string"))) default: - r.Include(n, be.NewSource(NewAddress("go", fmt.Sprintf("%T", t)))) + r.Include(n, be.NewSource(cir.NewAddress("go", fmt.Sprintf("%T", t)))) } } return r diff --git a/faculty/index/yield.go b/faculty/index/yield.go index b04dd44..7e13531 100644 --- a/faculty/index/yield.go +++ b/faculty/index/yield.go @@ -7,30 +7,28 @@ package index import ( - // "fmt" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type Yield struct{ be.Sparkless } func (Yield) CognizeIndex(eye *be.Eye, value interface{}) { - yieldIndex(eye, value.(Circuit), nil) + yieldIndex(eye, value.(cir.Circuit), nil) eye.Show("End", value) } -func yieldIndex(eye *be.Eye, x Circuit, path []Name) { +func yieldIndex(eye *be.Eye, x cir.Circuit, path []cir.Name) { for _, n := range x.SortedNames() { switch t := x.At(n).(type) { - case Circuit: + case cir.Circuit: if t.Vol() == 0 { // circuits without flow are treated as indices and recursed into yieldIndex(eye, t, append(path, n)) } else { - eye.Show(DefaultValve, New().Grow("Value", t).Grow("Address", NewAddress(path...))) + eye.Show(cir.DefaultValve, cir.New().Grow("Value", t).Grow("Address", cir.NewAddress(path...))) } default: - eye.Show(DefaultValve, New().Grow("Value", t).Grow("Address", NewAddress(path...))) + eye.Show(cir.DefaultValve, cir.New().Grow("Value", t).Grow("Address", cir.NewAddress(path...))) } } } diff --git a/faculty/io/chunk.go b/faculty/io/chunk.go index 2a5c37b..20636e1 100644 --- a/faculty/io/chunk.go +++ b/faculty/io/chunk.go @@ -9,12 +9,11 @@ package io import ( "io" - // "log" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" - kio "github.com/gocircuit/escher/kit/io" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" + kio "github.com/hoijui/escher/kit/io" ) func init() { @@ -24,7 +23,7 @@ func init() { // Chunk… type Chunk struct{} -func (Chunk) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (Chunk) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { return nil } diff --git a/faculty/io/er.go b/faculty/io/er.go index 6edb86b..55f6e7b 100644 --- a/faculty/io/er.go +++ b/faculty/io/er.go @@ -11,9 +11,9 @@ import ( "log" "os" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - kitio "github.com/gocircuit/escher/kit/io" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + kitio "github.com/hoijui/escher/kit/io" ) // Writer is a gate that reads from values sent to it and writes to an underlying writer. @@ -25,15 +25,15 @@ func NewWriterMaterializer(w io.Writer) be.Materializer { return be.NewMaterializer(&Writer{}, w) } -func (x *Writer) Spark(eye *be.Eye, matter Circuit, aux ...interface{}) Value { +func (x *Writer) Spark(eye *be.Eye, matter cir.Circuit, aux ...interface{}) cir.Value { x.WriteCloser = kitio.SovereignWriter(aux[0].(io.Writer)) - for vlv, _ := range matter.CircuitAt("View").Gate { + for vlv := range matter.CircuitAt("View").Gate { go eye.Show(vlv, x.WriteCloser) } return nil } -func (x *Writer) OverCognize(eye *be.Eye, _ Name, value interface{}) { +func (x *Writer) OverCognize(eye *be.Eye, _ cir.Name, value interface{}) { switch t := value.(type) { case io.Reader: go CopyClose(x.WriteCloser, t, false, true) @@ -53,15 +53,15 @@ func NewReaderMaterializer(r io.Reader) be.Materializer { return be.NewMaterializer(&Reader{}, r) } -func (x *Reader) Spark(eye *be.Eye, matter Circuit, aux ...interface{}) Value { +func (x *Reader) Spark(eye *be.Eye, matter cir.Circuit, aux ...interface{}) cir.Value { x.ReadCloser = kitio.SovereignReader(aux[0].(io.Reader)) - for vlv, _ := range matter.CircuitAt("View").Gate { + for vlv := range matter.CircuitAt("View").Gate { go eye.Show(vlv, x.ReadCloser) } return nil } -func (x *Reader) OverCognize(_ *be.Eye, _ Name, value interface{}) { +func (x *Reader) OverCognize(_ *be.Eye, _ cir.Name, value interface{}) { switch t := value.(type) { case io.Writer: go CopyClose(t, x.ReadCloser, true, false) diff --git a/faculty/io/sourcefile.go b/faculty/io/sourcefile.go index 203500e..7e70d31 100644 --- a/faculty/io/sourcefile.go +++ b/faculty/io/sourcefile.go @@ -10,9 +10,8 @@ import ( "log" "os" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - // "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) func NewSourceFile(name string) be.Materializer { @@ -21,7 +20,7 @@ func NewSourceFile(name string) be.Materializer { type SourceFile struct{} -func (SourceFile) Spark(eye *be.Eye, _ Circuit, aux ...interface{}) Value { +func (SourceFile) Spark(eye *be.Eye, _ cir.Circuit, aux ...interface{}) cir.Value { go func() { name := aux[0].(string) file, err := os.Open(name) @@ -29,7 +28,7 @@ func (SourceFile) Spark(eye *be.Eye, _ Circuit, aux ...interface{}) Value { log.Printf("Problem opening file %q (%v)", name, err) panic("open file") } - eye.Show(DefaultValve, file) + eye.Show(cir.DefaultValve, file) }() return nil } diff --git a/faculty/io/writefile.go b/faculty/io/writefile.go index 956408e..3c75d4c 100644 --- a/faculty/io/writefile.go +++ b/faculty/io/writefile.go @@ -10,11 +10,10 @@ import ( "bytes" "io" "io/ioutil" - // "log" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" ) func init() { @@ -26,7 +25,7 @@ type WriteFile struct { named chan struct{} } -func (h *WriteFile) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (h *WriteFile) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { h.named = make(chan struct{}) return &WriteFile{} } diff --git a/faculty/math/math.go b/faculty/math/math.go index 688d023..87e9932 100644 --- a/faculty/math/math.go +++ b/faculty/math/math.go @@ -7,8 +7,8 @@ package math import ( - "github.com/gocircuit/escher/be" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + "github.com/hoijui/escher/faculty" ) func init() { diff --git a/faculty/math/sum.go b/faculty/math/sum.go index 8e14152..5b270be 100644 --- a/faculty/math/sum.go +++ b/faculty/math/sum.go @@ -9,10 +9,8 @@ package math import ( "sync" - // "github.com/gocircuit/escher/faculty" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - // "github.com/gocircuit/escher/kit/plumb" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) // IntSum @@ -21,7 +19,7 @@ type IntSum struct { x, y, sum int } -func (s *IntSum) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (s *IntSum) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { return &IntSum{} } diff --git a/faculty/model/ignore.go b/faculty/model/ignore.go index b305aa0..bb67cb5 100644 --- a/faculty/model/ignore.go +++ b/faculty/model/ignore.go @@ -7,11 +7,9 @@ package model import ( - // "fmt" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" ) func init() { @@ -20,16 +18,16 @@ func init() { type IgnoreValves struct{} -func (IgnoreValves) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (IgnoreValves) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { return nil } func (IgnoreValves) CognizeCircuit(eye *be.Eye, v interface{}) { - u := v.(Circuit).Copy() + u := v.(cir.Circuit).Copy() n := u.Unify("ignoreValves") - u.Gate[n] = NewVerbAddress("*", "Ignore") - u.Reflow(Super, n) - eye.Show(DefaultValve, u) + u.Gate[n] = cir.NewVerbAddress("*", "Ignore") + u.Reflow(cir.Super, n) + eye.Show(cir.DefaultValve, u) } func (IgnoreValves) Cognize(eye *be.Eye, v interface{}) {} diff --git a/faculty/model/io.go b/faculty/model/io.go index 44bc83c..2ec259d 100644 --- a/faculty/model/io.go +++ b/faculty/model/io.go @@ -7,15 +7,13 @@ package model import ( - // "fmt" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type IO struct{} -func (IO) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (IO) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { return IO{} } @@ -26,5 +24,5 @@ func (IO) Cognize(eye *be.Eye, v interface{}) { func (IO) CognizeIn(eye *be.Eye, v interface{}) {} func (IO) CognizeOut(eye *be.Eye, v interface{}) { - eye.Show(DefaultValve, v) + eye.Show(cir.DefaultValve, v) } diff --git a/faculty/model/model.go b/faculty/model/model.go index cb6966b..8b8b21b 100644 --- a/faculty/model/model.go +++ b/faculty/model/model.go @@ -8,8 +8,8 @@ package model import ( - "github.com/gocircuit/escher/be" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + "github.com/hoijui/escher/faculty" ) func init() { diff --git a/faculty/os/os.go b/faculty/os/os.go index be375b8..4d0bc6d 100644 --- a/faculty/os/os.go +++ b/faculty/os/os.go @@ -12,10 +12,10 @@ import ( "os/exec" "path" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" - fio "github.com/gocircuit/escher/faculty/io" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" + fio "github.com/hoijui/escher/faculty/io" ) func Init(arg []string) { @@ -31,8 +31,8 @@ func Init(arg []string) { faculty.Register(fio.NewWriterMaterializer(os.Stderr), "os", "Stderr") } -func argCircuit(arg []string) Circuit { - r := New() +func argCircuit(arg []string) cir.Circuit { + r := cir.New() for i, a := range arg { r.Include(i, a) } @@ -42,7 +42,7 @@ func argCircuit(arg []string) Circuit { // Exit type Exit struct{ be.Sparkless } -func (Exit) OverCognize(eye *be.Eye, name Name, value interface{}) { +func (Exit) OverCognize(eye *be.Eye, name cir.Name, value interface{}) { switch t := value.(type) { case int: os.Exit(t) @@ -54,7 +54,7 @@ func (Exit) OverCognize(eye *be.Eye, name Name, value interface{}) { // Fatal type Fatal struct{ be.Sparkless } -func (Fatal) OverCognize(eye *be.Eye, name Name, value interface{}) { +func (Fatal) OverCognize(eye *be.Eye, name cir.Name, value interface{}) { log.Fatalf("%v", value) } @@ -66,7 +66,7 @@ func (LookPath) CognizeName(eye *be.Eye, value interface{}) { if err != nil { log.Fatalf("no file path to %s", value.(string)) } - eye.Show(DefaultValve, p) + eye.Show(cir.DefaultValve, p) } func (LookPath) Cognize(eye *be.Eye, value interface{}) {} @@ -75,12 +75,12 @@ func (LookPath) Cognize(eye *be.Eye, value interface{}) {} type Join struct{ be.Sparkless } func (Join) CognizeView(eye *be.Eye, v interface{}) { - u := v.(Circuit) + u := v.(cir.Circuit) var s []string for _, n := range u.SortedNames() { s = append(s, u.Gate[n].(string)) } - eye.Show(DefaultValve, path.Join(s...)) + eye.Show(cir.DefaultValve, path.Join(s...)) } func (Join) Cognize(*be.Eye, interface{}) {} diff --git a/faculty/os/proc.go b/faculty/os/proc.go index f7af70a..19f62e4 100644 --- a/faculty/os/proc.go +++ b/faculty/os/proc.go @@ -12,16 +12,16 @@ import ( "log" "os/exec" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - kio "github.com/gocircuit/escher/kit/io" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + kio "github.com/hoijui/escher/kit/io" ) // Process type Process struct{ be.Sparkless } func (Process) CognizeCommand(eye *be.Eye, dvalue interface{}) { - x := New() + x := cir.New() if exit := spawnProcess(eye, cognizeCommand(dvalue)); exit != nil { x.Grow("Exit", 1) eye.Show("Exit", x) @@ -56,7 +56,7 @@ func spawnProcess(eye *be.Eye, cmd *exec.Cmd) (err error) { stdin = kio.RunOnCloseWriter(stdin, func() { stdClose <- struct{}{} }) stdout = kio.RunOnCloseReader(stdout, func() { stdClose <- struct{}{} }) stderr = kio.RunOnCloseReader(stderr, func() { stdClose <- struct{}{} }) - g := New(). + g := cir.New(). Grow("Stdin", stdin). Grow("Stdout", stdout). Grow("Stderr", stderr) @@ -88,7 +88,7 @@ func (Process) CognizeIO(*be.Eye, interface{}) {} // } // func cognizeCommand(v interface{}) *exec.Cmd { - img, ok := v.(Circuit) + img, ok := v.(cir.Circuit) if !ok { panic(fmt.Sprintf("Non-image sent to Process.Command (%v)", v)) } diff --git a/faculty/root.go b/faculty/root.go index 5d39e9c..91745d9 100644 --- a/faculty/root.go +++ b/faculty/root.go @@ -9,20 +9,20 @@ package faculty import ( "sync" - . "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) var lk sync.Mutex -var root = NewIndex() +var root = be.NewIndex() -func Root() Index { +func Root() be.Index { lk.Lock() defer lk.Unlock() return root } -func Register(v Materializer, addr ...Name) { +func Register(v be.Materializer, addr ...cir.Name) { lk.Lock() defer lk.Unlock() root.Memorize(v, addr...) diff --git a/faculty/test/exec.go b/faculty/test/exec.go index a6badd0..f02b1e6 100644 --- a/faculty/test/exec.go +++ b/faculty/test/exec.go @@ -11,8 +11,8 @@ import ( "os" "os/exec" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) // Exec receives values from FilterAll and executes the included test circuits @@ -20,10 +20,10 @@ import ( type Exec struct{ be.Sparkless } func (Exec) CognizeIn(eye *be.Eye, v interface{}) { - x := v.(Circuit) + x := v.(cir.Circuit) // - addr := Verb(x.CircuitAt("Address").Copy()) - addr.Gate[""] = "*" + addr := cir.Verb(x.CircuitAt("Address").Copy()) + addr.Gate[cir.Super] = "*" cmd := exec.Command(os.Args[0], "-src", srcDir, addr.String()) var success bool @@ -34,8 +34,8 @@ func (Exec) CognizeIn(eye *be.Eye, v interface{}) { fmt.Printf("+ Test %v (ok)\n", addr) success = true } - r := New(). - Grow("Verb", Circuit(addr)). + r := cir.New(). + Grow("Verb", cir.Circuit(addr)). Grow("Result", success) eye.Show("Out", r) } diff --git a/faculty/test/filter.go b/faculty/test/filter.go index 77ba6fe..a94aa48 100644 --- a/faculty/test/filter.go +++ b/faculty/test/filter.go @@ -11,15 +11,15 @@ import ( "strings" "unicode" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) // type Filter struct{ be.Sparkless } func (Filter) CognizeIn(eye *be.Eye, v interface{}) { - x := v.(Circuit) + x := v.(cir.Circuit) // name_, view := x.NameAt("Name"), x.CircuitAt("View") name, ok := name_.(string) @@ -33,7 +33,7 @@ func (Filter) CognizeIn(eye *be.Eye, v interface{}) { if len(sfx) == 0 || !unicode.IsUpper(rune(sfx[0])) { return } - y := New(). + y := cir.New(). Grow("Address", x.CircuitAt("Address")). Grow("Name", name). Grow("View", view) diff --git a/faculty/test/match.go b/faculty/test/match.go index baa7284..765442f 100644 --- a/faculty/test/match.go +++ b/faculty/test/match.go @@ -9,8 +9,8 @@ package test import ( "log" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) // TODO: Make sure matching works when opposing streams come at different speeds @@ -18,13 +18,13 @@ import ( // type Match struct { - name []Name + name []cir.Name flow []chan interface{} } -func (m *Match) Spark(eye *be.Eye, matter Circuit, aux ...interface{}) Value { - for vlv, _ := range matter.CircuitAt("View").Gate { - if vlv == DefaultValve { +func (m *Match) Spark(eye *be.Eye, matter cir.Circuit, aux ...interface{}) cir.Value { + for vlv := range matter.CircuitAt("View").Gate { + if vlv == cir.DefaultValve { continue } m.name = append(m.name, vlv) @@ -36,11 +36,11 @@ func (m *Match) Spark(eye *be.Eye, matter Circuit, aux ...interface{}) Value { return nil } -func (m *Match) OverCognize(eye *be.Eye, name Name, v interface{}) { +func (m *Match) OverCognize(eye *be.Eye, name cir.Name, v interface{}) { // compute valve index var i int for j, n := range m.name { - if Same(n, name) { + if cir.Same(n, name) { i = j break } @@ -48,10 +48,10 @@ func (m *Match) OverCognize(eye *be.Eye, name Name, v interface{}) { // match select { case u := <-m.flow[1-i]: // if the opposing channel is ready - if !Same(u, v) { + if !cir.Same(u, v) { log.Fatalf("mismatch %v vs %v: %v vs %v\n", m.name[1-i], name, u, v) } - eye.Show(DefaultValve, v) // emit the matched object + eye.Show(cir.DefaultValve, v) // emit the matched object default: // otherwise, offer our value m.flow[i] <- v } diff --git a/faculty/test/testing.go b/faculty/test/testing.go index 78d0e72..2693f88 100644 --- a/faculty/test/testing.go +++ b/faculty/test/testing.go @@ -7,9 +7,9 @@ package test import ( - // . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/be" - "github.com/gocircuit/escher/faculty" + // . "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/be" + "github.com/hoijui/escher/faculty" ) func Init(srcdir string) { diff --git a/faculty/text/form.go b/faculty/text/form.go index 603d384..0667912 100644 --- a/faculty/text/form.go +++ b/faculty/text/form.go @@ -10,14 +10,14 @@ import ( "bytes" "text/template" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type Form struct{ be.Sparkless } func (Form) CognizeIn(eye *be.Eye, v interface{}) { - td := v.(Circuit) + td := v.(cir.Circuit) t, err := template.New("").Parse(td.StringAt("Form")) if err != nil { panic(err) diff --git a/faculty/text/merge.go b/faculty/text/merge.go index 03d0d07..f80b5e5 100644 --- a/faculty/text/merge.go +++ b/faculty/text/merge.go @@ -10,11 +10,10 @@ package text import ( "bytes" "io" - // "log" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" ) func init() { @@ -28,7 +27,7 @@ type Merge struct{ be.Sparkless } func (Merge) CognizeIn(eye *be.Eye, v interface{}) { var w bytes.Buffer - x := v.(Circuit) + x := v.(cir.Circuit) for _, name := range x.SortedLetters() { w.WriteString(flatten(x.StringAt(name))) } diff --git a/faculty/time/delay.go b/faculty/time/delay.go index b744c5c..719b748 100644 --- a/faculty/time/delay.go +++ b/faculty/time/delay.go @@ -10,10 +10,10 @@ import ( "sync" "time" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/faculty" - "github.com/gocircuit/escher/kit/plumb" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/faculty" + "github.com/hoijui/escher/kit/plumb" ) func init() { @@ -27,7 +27,7 @@ type Delay struct { dur time.Duration } -func (t *Delay) Spark(*be.Eye, Circuit, ...interface{}) Value { +func (t *Delay) Spark(*be.Eye, cir.Circuit, ...interface{}) cir.Value { return nil } diff --git a/faculty/time/ticker.go b/faculty/time/ticker.go index c310561..c9a3569 100644 --- a/faculty/time/ticker.go +++ b/faculty/time/ticker.go @@ -9,9 +9,9 @@ package time import ( "time" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - "github.com/gocircuit/escher/kit/plumb" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + "github.com/hoijui/escher/kit/plumb" ) // Ticker @@ -19,7 +19,7 @@ type Ticker struct { ctl chan time.Duration } -func (t *Ticker) Spark(eye *be.Eye, _ Circuit, _ ...interface{}) Value { +func (t *Ticker) Spark(eye *be.Eye, _ cir.Circuit, _ ...interface{}) cir.Value { t.ctl = make(chan time.Duration) go func() { var start time.Time @@ -37,7 +37,7 @@ func (t *Ticker) Spark(eye *be.Eye, _ Circuit, _ ...interface{}) Value { ch = tkr.C } case t := <-ch: - eye.Show(DefaultValve, int(t.Sub(start))) + eye.Show(cir.DefaultValve, int(t.Sub(start))) } } }() diff --git a/faculty/yield/depthfirst.go b/faculty/yield/depthfirst.go index ded9e86..28add94 100644 --- a/faculty/yield/depthfirst.go +++ b/faculty/yield/depthfirst.go @@ -7,10 +7,8 @@ package yield import ( - // "fmt" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) type DepthFirst struct{ be.Sparkless } @@ -23,8 +21,8 @@ func (DepthFirst) CognizeFrame(eye *be.Eye, v interface{}) {} func (DepthFirst) CognizeEnd(eye *be.Eye, v interface{}) {} -func depthFirst(eye *be.Eye, walk []Name, v interface{}) { - x, ok := v.(Circuit) +func depthFirst(eye *be.Eye, walk []cir.Name, v interface{}) { + x, ok := v.(cir.Circuit) if !ok { return } @@ -36,13 +34,13 @@ func depthFirst(eye *be.Eye, walk []Name, v interface{}) { } } - var nm Name = "" // The root circuit is shown with the empty name + var nm cir.Name = "" // The root circuit is shown with the empty name if len(walk) > 0 { nm = walk[len(walk)-1] } - frame := New(). - Grow("Address", Circuit(NewAddress(walk...))). + frame := cir.New(). + Grow("Address", cir.Circuit(cir.NewAddress(walk...))). Grow("Name", nm). Grow("View", x) diff --git a/faculty/yield/flows.go b/faculty/yield/flows.go index 0de7793..4ceaf23 100644 --- a/faculty/yield/flows.go +++ b/faculty/yield/flows.go @@ -7,10 +7,10 @@ package yield import ( - // "fmt" + //"fmt" - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) /* @@ -31,28 +31,28 @@ import ( */ type Flows struct{ be.Sparkless } -func sanitizeValue(u Circuit, name Name) Value { +func sanitizeValue(u cir.Circuit, name cir.Name) cir.Value { value, ok := u.Gate[name] if !ok { - return NewAddress("missing") + return cir.NewAddress("missing") } if value == nil { - return NewAddress("nil") + return cir.NewAddress("nil") } return value } func (Flows) Cognize(eye *be.Eye, value interface{}) { - u := value.(Circuit) + u := value.(cir.Circuit) for xname, xview := range u.Flow { for xvalve, xvec := range xview { yname, yvalve := xvec.Gate, xvec.Valve // xvalue, yvalue := sanitizeValue(u, xname), sanitizeValue(u, yname) // - frame := New() - xy := New().Grow("Name", xname).Grow("Value", xvalue).Grow("Valve", xvalve) - yx := New().Grow("Name", xname).Grow("Value", yvalue).Grow("Valve", yvalve) + frame := cir.New() + xy := cir.New().Grow("Name", xname).Grow("Value", xvalue).Grow("Valve", xvalve) + yx := cir.New().Grow("Name", xname).Grow("Value", yvalue).Grow("Valve", yvalve) frame.Grow(0, xy).Grow(1, yx) eye.Show("Frame", frame) } diff --git a/faculty/yield/gates.go b/faculty/yield/gates.go index ec64de8..523c960 100644 --- a/faculty/yield/gates.go +++ b/faculty/yield/gates.go @@ -7,10 +7,8 @@ package yield import ( - // "fmt" - - "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" ) /* @@ -24,9 +22,9 @@ import ( type Gates struct{ be.Sparkless } func (Gates) Cognize(eye *be.Eye, value interface{}) { - u := value.(Circuit) - for name, _ := range u.SortedNames() { - frame := New() + u := value.(cir.Circuit) + for name := range u.SortedNames() { + frame := cir.New() frame.Grow("Name", name).Grow("Value", u.At(name)) eye.Show("Frame", frame) } diff --git a/faculty/yield/yield.go b/faculty/yield/yield.go index d48898a..41e5494 100644 --- a/faculty/yield/yield.go +++ b/faculty/yield/yield.go @@ -9,8 +9,8 @@ package yield import ( // "fmt" - "github.com/gocircuit/escher/be" - "github.com/gocircuit/escher/faculty" + "github.com/hoijui/escher/be" + "github.com/hoijui/escher/faculty" ) func init() { diff --git a/kit/browser/srv.go b/kit/browser/srv.go index d627170..2129080 100644 --- a/kit/browser/srv.go +++ b/kit/browser/srv.go @@ -11,7 +11,7 @@ import ( "path" "sync" - "github.com/petar/maymounkov.io/code.google.com/p/go.net/websocket" + "golang.org/x/net/websocket" ) // Server is a static file and websocket server. diff --git a/kit/browser/ssn.go b/kit/browser/ssn.go index eafb0d1..68094d1 100644 --- a/kit/browser/ssn.go +++ b/kit/browser/ssn.go @@ -14,7 +14,7 @@ import ( "strconv" "sync" - "github.com/petar/maymounkov.io/code.google.com/p/go.net/websocket" + "golang.org/x/net/websocket" ) // Session is a websocket session abstraction diff --git a/kit/code.google.com/p/snappy-go/.gitignore b/kit/code.google.com/p/snappy-go/.gitignore deleted file mode 100644 index 831f3a3..0000000 --- a/kit/code.google.com/p/snappy-go/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.git.genus -.hg.genus diff --git a/kit/code.google.com/p/snappy-go/.hgignore b/kit/code.google.com/p/snappy-go/.hgignore deleted file mode 100644 index d804706..0000000 --- a/kit/code.google.com/p/snappy-go/.hgignore +++ /dev/null @@ -1,30 +0,0 @@ -syntax:glob -.DS_Store -.git -.gitignore -*.[568ao] -*.ao -*.so -*.pyc -._* -.nfs.* -[568a].out -*~ -*.orig -*.rej -*.exe -.*.swp -core -*.cgo*.go -*.cgo*.c -_cgo_* -_obj -_test -_testmain.go -build.out -snappy/testdata -test.out -y.tab.[ch] - -syntax:regexp -^.*/core.[0-9]*$ diff --git a/kit/code.google.com/p/snappy-go/AUTHORS b/kit/code.google.com/p/snappy-go/AUTHORS deleted file mode 100644 index 8ddb5b7..0000000 --- a/kit/code.google.com/p/snappy-go/AUTHORS +++ /dev/null @@ -1,12 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Google Inc. -Jan Mercl <0xjnml@gmail.com> diff --git a/kit/code.google.com/p/snappy-go/CONTRIBUTORS b/kit/code.google.com/p/snappy-go/CONTRIBUTORS deleted file mode 100644 index 50b69c8..0000000 --- a/kit/code.google.com/p/snappy-go/CONTRIBUTORS +++ /dev/null @@ -1,34 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Russ Cox diff --git a/kit/code.google.com/p/snappy-go/LICENSE b/kit/code.google.com/p/snappy-go/LICENSE deleted file mode 100644 index 6050c10..0000000 --- a/kit/code.google.com/p/snappy-go/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/kit/code.google.com/p/snappy-go/README b/kit/code.google.com/p/snappy-go/README deleted file mode 100644 index 3cf8be1..0000000 --- a/kit/code.google.com/p/snappy-go/README +++ /dev/null @@ -1,11 +0,0 @@ -This is a Snappy library for the Go programming language. - -To download and install from source: -$ go get code.google.com/p/snappy-go/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - -Contributions should follow the same procedure as for the Go project: -http://golang.org/doc/contribute.html - diff --git a/kit/code.google.com/p/snappy-go/lib/codereview/codereview.cfg b/kit/code.google.com/p/snappy-go/lib/codereview/codereview.cfg deleted file mode 100644 index 93b55c0..0000000 --- a/kit/code.google.com/p/snappy-go/lib/codereview/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -defaultcc: golang-dev@googlegroups.com diff --git a/kit/code.google.com/p/snappy-go/snappy/decode.go b/kit/code.google.com/p/snappy-go/snappy/decode.go deleted file mode 100644 index d93c1b9..0000000 --- a/kit/code.google.com/p/snappy-go/snappy/decode.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" -) - -// ErrCorrupt reports that the input is invalid. -var ErrCorrupt = errors.New("snappy: corrupt input") - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n == 0 { - return 0, 0, ErrCorrupt - } - if uint64(int(v)) != v { - return 0, 0, errors.New("snappy: decoded block is too large") - } - return int(v), n, nil -} - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if len(dst) < dLen { - dst = make([]byte, dLen) - } - - var d, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint(src[s] >> 2) - switch { - case x < 60: - s += 1 - case x == 60: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-1]) - case x == 61: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-2]) | uint(src[s-1])<<8 - case x == 62: - s += 4 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 - case x == 63: - s += 5 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 - } - length = int(x + 1) - if length <= 0 { - return nil, errors.New("snappy: unsupported literal length") - } - if length > len(dst)-d || length > len(src)-s { - return nil, ErrCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) - - case tagCopy2: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(src[s-2]) | int(src[s-1])<<8 - - case tagCopy4: - return nil, errors.New("snappy: unsupported COPY_4 tag") - } - - end := d + length - if offset > d || end > len(dst) { - return nil, ErrCorrupt - } - for ; d < end; d++ { - dst[d] = dst[d-offset] - } - } - if d != dLen { - return nil, ErrCorrupt - } - return dst[:d], nil -} diff --git a/kit/code.google.com/p/snappy-go/snappy/encode.go b/kit/code.google.com/p/snappy-go/snappy/encode.go deleted file mode 100644 index b2371db..0000000 --- a/kit/code.google.com/p/snappy-go/snappy/encode.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" -) - -// We limit how far copy back-references can go, the same as the C++ code. -const maxOffset = 1 << 15 - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - case n < 1<<16: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - case n < 1<<24: - dst[0] = 62<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - i = 4 - case int64(n) < 1<<32: - dst[0] = 63<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - dst[4] = uint8(n >> 24) - i = 5 - default: - panic("snappy: source buffer is too long") - } - if copy(dst[i:], lit) != len(lit) { - panic("snappy: destination buffer is too short") - } - return i + len(lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -func emitCopy(dst []byte, offset, length int) int { - i := 0 - for length > 0 { - x := length - 4 - if 0 <= x && x < 1<<3 && offset < 1<<11 { - dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - i += 2 - break - } - - x = length - if x > 1<<6 { - x = 1 << 6 - } - dst[i+0] = uint8(x-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= x - } - return i -} - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Encode(dst, src []byte) ([]byte, error) { - if n := MaxEncodedLen(len(src)); len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - // Return early if src is short. - if len(src) <= 4 { - if len(src) != 0 { - d += emitLiteral(dst[d:], src) - } - return dst[:d], nil - } - - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - const maxTableSize = 1 << 14 - shift, tableSize := uint(32-8), 1<<8 - for tableSize < maxTableSize && tableSize < len(src) { - shift-- - tableSize *= 2 - } - var table [maxTableSize]int - - // Iterate over the source bytes. - var ( - s int // The iterator position. - t int // The last position with the same hash as s. - lit int // The start position of any pending literal bytes. - ) - for s+3 < len(src) { - // Update the hash table. - b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] - h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 - p := &table[(h*0x1e35a7bd)>>shift] - // We need to to store values in [-1, inf) in table. To save - // some initialization time, (re)use the table's zero value - // and shift the values against this zero: add 1 on writes, - // subtract 1 on reads. - t, *p = *p-1, s+1 - // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. - if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { - s++ - continue - } - // Otherwise, we have a match. First, emit any pending literal bytes. - if lit != s { - d += emitLiteral(dst[d:], src[lit:s]) - } - // Extend the match to be as long as possible. - s0 := s - s, t = s+4, t+4 - for s < len(src) && src[s] == src[t] { - s++ - t++ - } - // Emit the copied bytes. - d += emitCopy(dst[d:], s-t, s-s0) - lit = s - } - - // Emit any final pending literal bytes and return. - if lit != len(src) { - d += emitLiteral(dst[d:], src[lit:]) - } - return dst[:d], nil -} - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -func MaxEncodedLen(srcLen int) int { - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - return 32 + srcLen + srcLen/6 -} diff --git a/kit/code.google.com/p/snappy-go/snappy/snappy.go b/kit/code.google.com/p/snappy-go/snappy/snappy.go deleted file mode 100644 index 2f1b790..0000000 --- a/kit/code.google.com/p/snappy-go/snappy/snappy.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. -// -// The C++ snappy implementation is at http://code.google.com/p/snappy/ -package snappy - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer supported. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) diff --git a/kit/code.google.com/p/snappy-go/snappy/snappy_test.go b/kit/code.google.com/p/snappy-go/snappy/snappy_test.go deleted file mode 100644 index 7ba8392..0000000 --- a/kit/code.google.com/p/snappy-go/snappy/snappy_test.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path/filepath" - "strings" - "testing" -) - -var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") - -func roundtrip(b, ebuf, dbuf []byte) error { - e, err := Encode(ebuf, b) - if err != nil { - return fmt.Errorf("encoding error: %v", err) - } - d, err := Decode(dbuf, e) - if err != nil { - return fmt.Errorf("decoding error: %v", err) - } - if !bytes.Equal(b, d) { - return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) - } - return nil -} - -func TestEmpty(t *testing.T) { - if err := roundtrip(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestSmallCopy(t *testing.T) { - for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for i := 0; i < 32; i++ { - s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" - if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { - t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) - } - } - } - } -} - -func TestSmallRand(t *testing.T) { - rand.Seed(27354294) - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i, _ := range b { - b[i] = uint8(rand.Uint32()) - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestSmallRegular(t *testing.T) { - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i, _ := range b { - b[i] = uint8(i%10 + 'a') - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func benchDecode(b *testing.B, src []byte) { - encoded, err := Encode(nil, src) - if err != nil { - b.Fatal(err) - } - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Decode(src, encoded) - } -} - -func benchEncode(b *testing.B, src []byte) { - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - dst := make([]byte, MaxEncodedLen(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Encode(dst, src) - } -} - -func readFile(b *testing.B, filename string) []byte { - src, err := ioutil.ReadFile(filename) - if err != nil { - b.Fatalf("failed reading %s: %s", filename, err) - } - if len(src) == 0 { - b.Fatalf("%s has zero length", filename) - } - return src -} - -// expand returns a slice of length n containing repeated copies of src. -func expand(src []byte, n int) []byte { - dst := make([]byte, n) - for x := dst; len(x) > 0; { - i := copy(x, src) - x = x[i:] - } - return dst -} - -func benchWords(b *testing.B, n int, decode bool) { - // Note: the file is OS-language dependent so the resulting values are not - // directly comparable for non-US-English OS installations. - data := expand(readFile(b, "/usr/share/dict/words"), n) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } -func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } -func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } -func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } -func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } -func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } -func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } -func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } - -// testFiles' values are copied directly from -// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc. -// The label field is unused in snappy-go. -var testFiles = []struct { - label string - filename string -}{ - {"html", "html"}, - {"urls", "urls.10K"}, - {"jpg", "house.jpg"}, - {"pdf", "mapreduce-osdi-1.pdf"}, - {"html4", "html_x_4"}, - {"cp", "cp.html"}, - {"c", "fields.c"}, - {"lsp", "grammar.lsp"}, - {"xls", "kennedy.xls"}, - {"txt1", "alice29.txt"}, - {"txt2", "asyoulik.txt"}, - {"txt3", "lcet10.txt"}, - {"txt4", "plrabn12.txt"}, - {"bin", "ptt5"}, - {"sum", "sum"}, - {"man", "xargs.1"}, - {"pb", "geo.protodata"}, - {"gaviota", "kppkn.gtb"}, -} - -// The test data files are present at this canonical URL. -const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/" - -func downloadTestdata(basename string) (errRet error) { - filename := filepath.Join("testdata", basename) - f, err := os.Create(filename) - if err != nil { - return fmt.Errorf("failed to create %s: %s", filename, err) - } - defer f.Close() - defer func() { - if errRet != nil { - os.Remove(filename) - } - }() - resp, err := http.Get(baseURL + basename) - if err != nil { - return fmt.Errorf("failed to download %s: %s", baseURL+basename, err) - } - defer resp.Body.Close() - _, err = io.Copy(f, resp.Body) - if err != nil { - return fmt.Errorf("failed to write %s: %s", filename, err) - } - return nil -} - -func benchFile(b *testing.B, n int, decode bool) { - filename := filepath.Join("testdata", testFiles[n].filename) - if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 { - if !*download { - b.Fatal("test data not found; skipping benchmark without the -download flag") - } - // Download the official snappy C++ implementation reference test data - // files for benchmarking. - if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) { - b.Fatalf("failed to create testdata: %s", err) - } - for _, tf := range testFiles { - if err := downloadTestdata(tf.filename); err != nil { - b.Fatalf("failed to download testdata: %s", err) - } - } - } - data := readFile(b, filename) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -// Naming convention is kept similar to what snappy's C++ implementation uses. -func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } -func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } -func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } -func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } -func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } -func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } -func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } -func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } -func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } -func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } -func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } -func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } -func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) } -func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) } -func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) } -func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) } -func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) } -func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) } -func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } -func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } -func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } -func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } -func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } -func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } -func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } -func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } -func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } -func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } -func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } -func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } -func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) } -func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) } -func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) } -func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) } -func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) } -func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) } diff --git a/kit/fs/fs.go b/kit/fs/fs.go index 22b6725..dfbf4db 100644 --- a/kit/fs/fs.go +++ b/kit/fs/fs.go @@ -8,20 +8,19 @@ package fs import ( - // "fmt" "io/ioutil" "log" "os" "path" - . "github.com/gocircuit/escher/a" - . "github.com/gocircuit/escher/be" - . "github.com/gocircuit/escher/circuit" - fio "github.com/gocircuit/escher/faculty/io" - "github.com/gocircuit/escher/see" + "github.com/hoijui/escher/a" + "github.com/hoijui/escher/be" + cir "github.com/hoijui/escher/circuit" + fio "github.com/hoijui/escher/faculty/io" + "github.com/hoijui/escher/see" ) -func Load(filedir string) Index { +func Load(filedir string) be.Index { fi, err := os.Stat(filedir) if err != nil { log.Fatalf("cannot read source file %s (%v)", filedir, err) @@ -33,15 +32,15 @@ func Load(filedir string) Index { } // loadDirectory ... -func loadDirectory(dir string) Index { +func loadDirectory(dir string) be.Index { d, err := os.Open(dir) if err != nil { log.Fatalln(err) } defer d.Close() // - x := NewIndex() - x.Memorize(New().Grow("Dir", dir), Source{}) + x := be.NewIndex() + x.Memorize(cir.New().Grow("Dir", dir), a.Source{}) // fileInfos, err := d.Readdir(0) if err != nil { @@ -50,7 +49,7 @@ func loadDirectory(dir string) Index { for _, fileInfo := range fileInfos { filePath := path.Join(dir, fileInfo.Name()) if fileInfo.IsDir() { // directory - x.Memorize(loadDirectory(filePath), fileInfo.Name()) // Index can memorize Indexs recursively + x.Memorize(loadDirectory(filePath), fileInfo.Name()) // Index can memorize Indices recursively continue } if path.Ext(fileInfo.Name()) != ".escher" { // file @@ -63,13 +62,13 @@ func loadDirectory(dir string) Index { } // loadFile ... -func loadFile(dir, file string) Index { +func loadFile(dir, file string) be.Index { text, err := ioutil.ReadFile(file) if err != nil { log.Fatalf("Problem reading source file %s (%v)", file, err) } - x := NewIndex() - src := NewSrcString(string(text)) + x := be.NewIndex() + src := a.NewSrcString(string(text)) for { see.Space(src) n_, u_ := see.SeePeer(src) @@ -77,8 +76,8 @@ func loadFile(dir, file string) Index { break } n := n_.(string) // n is a string - if u, ok := u_.(Circuit); ok { - u.Include(Source{}, New().Grow("Dir", dir).Grow("File", file)) + if u, ok := u_.(cir.Circuit); ok { + u.Include(a.Source{}, cir.New().Grow("Dir", dir).Grow("File", file)) } x.Memorize(u_, n) } diff --git a/kit/github.com/ChimeraCoder/anaconda/.gitignore b/kit/github.com/ChimeraCoder/anaconda/.gitignore deleted file mode 100644 index e607e0e..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.git.genus -*.swp -*.swo -*.swn -conf.sh -*.patch diff --git a/kit/github.com/ChimeraCoder/anaconda/COPYING b/kit/github.com/ChimeraCoder/anaconda/COPYING deleted file mode 120000 index 7a694c9..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/COPYING +++ /dev/null @@ -1 +0,0 @@ -LICENSE \ No newline at end of file diff --git a/kit/github.com/ChimeraCoder/anaconda/LICENSE b/kit/github.com/ChimeraCoder/anaconda/LICENSE deleted file mode 100644 index bb01fc7..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2013 Aditya Mukerjee, Quotidian Ventures - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE diff --git a/kit/github.com/ChimeraCoder/anaconda/README b/kit/github.com/ChimeraCoder/anaconda/README deleted file mode 100644 index 7e6e6b4..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/README +++ /dev/null @@ -1,99 +0,0 @@ -Anaconda -==================== - -[![GoDoc](https://godoc.org/github.com/gocircuit/escher/kit/github.com/gocircuit/escher/kit/github.com/ChimeraCoder/anaconda?status.png)](https://godoc.org/github.com/gocircuit/escher/kit/github.com/gocircuit/escher/kit/github.com/ChimeraCoder/anaconda) - -Anaconda is a simple, transparent Go package for accessing version 1.1 of the Twitter API. - -Successful API queries return native Go structs that can be used immediately, with no need for type assertions. - - - -Examples -------------- - -### Authentication - -If you already have the access token (and secret) for your user (Twitter provides this for your own account on the developer portal), creating the client is simple: - -````go -anaconda.SetConsumerKey("your-consumer-key") -anaconda.SetConsumerSecret("your-consumer-secret") -api := anaconda.NewTwitterApi("your-access-token", "your-access-token-secret") -```` - -### Queries - -Queries are conducted using a pointer to an authenticated `TwitterApi` struct. In v1.1 of Twitter's API, all requests should be authenticated. - -````go -searchResult, _ := api.GetSearch("golang", nil) -for _ , tweet := range searchResult { - fmt.Println(tweet.Text) -} -```` -Certain endpoints allow separate optional parameter; if desired, these can be passed as the final parameter. - -````go -//Perhaps we want 30 values instead of the default 15 -v := url.Values{} -v.Set("count", "30") -result, err := api.GetSearch("golang", v) -```` - -(Remember that `url.Values` is equivalent to a `map[string][]string`, if you find that more convenient notation when specifying values). Otherwise, `nil` suffices. - - - -Endpoints ------------- - -Anaconda implements most of the endpoints defined in the [Twitter API documentation](https://dev.twitter.com/docs/api/1.1). For clarity, in most cases, the function name is simply the name of the HTTP method and the endpoint (e.g., the endpoint `GET /friendships/incoming` is provided by the function `GetFriendshipsIncoming`). - -In a few cases, a shortened form has been chosen to make life easier (for example, retweeting is simply the function `Retweet`) - - - -Error Handling, Rate Limiting, and Throttling ---------------------------------- - -###Error Handling - -Twitter errors are returned as an `ApiError`, which satisfies the `error` interface and can be treated as a vanilla `error`. However, it also contains the additional information returned by the Twitter API that may be useful in deciding how to proceed after encountering an error. - - -If you make queries too quickly, you may bump against Twitter's [rate limits](https://dev.twitter.com/docs/rate-limiting/1.1). If this happens, `anaconda` automatically retries the query when the rate limit resets, using the `X-Rate-Limit-Reset` header that Twitter provides to determine how long to wait. - -In other words, users of the `anaconda` library should not need to handle rate limiting errors themselves; this is handled seamlessly behind-the-scenes. If an error is returned by a function, another form of error must have occurred (which can be checked by using the fields provided by the `ApiError` struct). - - -(If desired, this feature can be turned off by calling `ReturnRateLimitError(true)`.) - - -###Throttling - -Anaconda now supports automatic client-side throttling of queries to avoid hitting the Twitter rate-limit. - -This is currently *off* by default; however, it may be turned on by default in future versions of the library, as the implementation is improved. - - -To set a delay between queries, use the `SetDelay` method: - -````go - api.SetDelay(10 * time.Second) -```` - -Delays are set specific to each `TwitterApi` struct, so queries that use different users' access credentials are completely independent. - - -To turn off automatic throttling, set the delay to `0`: - -````go - api.SetDelay(0 * time.Second) -```` - - - -License ------------ -Anaconda is free software licensed under the MIT/X11 license. Details provided in the LICENSE file. diff --git a/kit/github.com/ChimeraCoder/anaconda/README.md b/kit/github.com/ChimeraCoder/anaconda/README.md deleted file mode 120000 index 100b938..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/README.md +++ /dev/null @@ -1 +0,0 @@ -README \ No newline at end of file diff --git a/kit/github.com/ChimeraCoder/anaconda/account.go b/kit/github.com/ChimeraCoder/anaconda/account.go deleted file mode 100644 index 756be04..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/account.go +++ /dev/null @@ -1,23 +0,0 @@ -package anaconda - -import ( - "net/url" -) - -// Verify the credentials by making a very small request -func (a TwitterApi) VerifyCredentials() (ok bool, err error) { - v := cleanValues(nil) - v.Set("include_entities", "false") - v.Set("skip_status", "true") - - _, err = a.GetSelf(v) - return err == nil, err -} - -// Get the user object for the authenticated user. Requests /account/verify_credentials -func (a TwitterApi) GetSelf(v url.Values) (u User, err error) { - v = cleanValues(v) - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/account/verify_credentials.json", v, &u, _GET, response_ch} - return u, (<-response_ch).err -} diff --git a/kit/github.com/ChimeraCoder/anaconda/directmessage.go b/kit/github.com/ChimeraCoder/anaconda/directmessage.go deleted file mode 100644 index ef14c11..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/directmessage.go +++ /dev/null @@ -1,15 +0,0 @@ -package anaconda - -type DirectMessage struct { - CreatedAt string `json:"created_at"` - Entities Entities `json:"entities"` - Id int64 `json:"id"` - IdStr string `json:"id_str"` - Recipient User `json:"recipient"` - RecipientId int64 `json:"recipient_id"` - RecipientScreenName string `json:"recipient_screen_name"` - Sender User `json:"sender"` - SenderId int64 `json:"sender_id"` - SenderScreenName string `json:"sender_screen_name"` - Text string `json:"text"` -} diff --git a/kit/github.com/ChimeraCoder/anaconda/directmessages.go b/kit/github.com/ChimeraCoder/anaconda/directmessages.go deleted file mode 100644 index 0fd9490..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/directmessages.go +++ /dev/null @@ -1,23 +0,0 @@ -package anaconda - -import ( - "net/url" -) - -func (a TwitterApi) GetDirectMessages(v url.Values) (messages []DirectMessage, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/direct_messages.json", v, &messages, _GET, response_ch} - return messages, (<-response_ch).err -} - -func (a TwitterApi) GetDirectMessagesSent(v url.Values) (messages []DirectMessage, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/direct_messages_sent.json", v, &messages, _GET, response_ch} - return messages, (<-response_ch).err -} - -func (a TwitterApi) GetDirectMessagesShow(v url.Values) (messages []DirectMessage, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/direct_messages/show.json", v, &messages, _GET, response_ch} - return messages, (<-response_ch).err -} diff --git a/kit/github.com/ChimeraCoder/anaconda/errors.go b/kit/github.com/ChimeraCoder/anaconda/errors.go deleted file mode 100644 index 929ea96..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/errors.go +++ /dev/null @@ -1,111 +0,0 @@ -package anaconda - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "time" -) - -const ( - //Error code defintions match the Twitter documentation - //https://dev.twitter.com/docs/error-codes-responses - TwitterErrorCouldNotAuthenticate = 32 - TwitterErrorDoesNotExist = 34 - TwitterErrorAccountSuspended = 64 - TwitterErrorApi1Deprecation = 68 //This should never be needed - TwitterErrorRateLimitExceeded = 88 - TwitterErrorInvalidToken = 89 - TwitterErrorOverCapacity = 130 - TwitterErrorInternalError = 131 - TwitterErrorCouldNotAuthenticateYou = 135 - TwitterErrorStatusIsADuplicate = 187 - TwitterErrorBadAuthenticationData = 215 - TwitterErrorUserMustVerifyLogin = 231 -) - -type ApiError struct { - StatusCode int - Header http.Header - Body string - Decoded TwitterErrorResponse - URL *url.URL -} - -func newApiError(resp *http.Response) *ApiError { - // TODO don't ignore this error - // TODO don't use ReadAll - p, _ := ioutil.ReadAll(resp.Body) - - var twitterErrorResp TwitterErrorResponse - _ = json.Unmarshal(p, &twitterErrorResp) - return &ApiError{ - StatusCode: resp.StatusCode, - Header: resp.Header, - Body: string(p), - Decoded: twitterErrorResp, - URL: resp.Request.URL, - } -} - -// ApiError supports the error interface -func (aerr ApiError) Error() string { - return fmt.Sprintf("Get %s returned status %d, %s", aerr.URL, aerr.StatusCode, aerr.Body) -} - -// Check to see if an error is a Rate Limiting error. If so, find the next available window in the header. -// Use like so: -// -// if aerr, ok := err.(*ApiError); ok { -// if isRateLimitError, nextWindow := aerr.RateLimitCheck(); isRateLimitError { -// <-time.After(nextWindow.Sub(time.Now())) -// } -// } -// -func (aerr *ApiError) RateLimitCheck() (isRateLimitError bool, nextWindow time.Time) { - // TODO check for error code 130, which also signifies a rate limit - if aerr.StatusCode == 429 { - if reset := aerr.Header.Get("X-Rate-Limit-Reset"); reset != "" { - if resetUnix, err := strconv.ParseInt(reset, 10, 64); err == nil { - resetTime := time.Unix(resetUnix, 0) - // Reject any time greater than an hour away - if resetTime.Sub(time.Now()) > time.Hour { - return true, time.Now().Add(15 * time.Minute) - } - - return true, resetTime - } - } - } - - return false, time.Time{} -} - -//TwitterErrorResponse has an array of Twitter error messages -//It satisfies the "error" interface -//For the most part, Twitter seems to return only a single error message -//Currently, we assume that this always contains exactly one error message -type TwitterErrorResponse struct { - Errors []TwitterError `json:"errors"` -} - -func (tr TwitterErrorResponse) First() error { - return tr.Errors[0] -} - -func (tr TwitterErrorResponse) Error() string { - return tr.Errors[0].Message -} - -//TwitterError represents a single Twitter error messages/code pair -type TwitterError struct { - Message string `json:"message"` - Code int `json:"code"` -} - -func (te TwitterError) Error() string { - return te.Message -} diff --git a/kit/github.com/ChimeraCoder/anaconda/example_test.go b/kit/github.com/ChimeraCoder/anaconda/example_test.go deleted file mode 100644 index df6d8e4..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/example_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package anaconda_test - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/ChimeraCoder/anaconda" - "time" -) - -// Initialize an client library for a given user. -// This only needs to be done *once* per user -func ExampleTwitterApi_InitializeClient() { - anaconda.SetConsumerKey("your-consumer-key") - anaconda.SetConsumerSecret("your-consumer-secret") - api := anaconda.NewTwitterApi(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) - fmt.Println(*api.Credentials) -} - -func ExampleTwitterApi_GetSearch() { - - anaconda.SetConsumerKey("your-consumer-key") - anaconda.SetConsumerSecret("your-consumer-secret") - api := anaconda.NewTwitterApi("your-access-token", "your-access-token-secret") - search_result, err := api.GetSearch("golang", nil) - if err != nil { - panic(err) - } - for _, tweet := range search_result { - fmt.Print(tweet.Text) - } -} - -// Throttling queries can easily be handled in the background, automatically -func ExampleTwitterApi_Throttling() { - api := anaconda.NewTwitterApi("your-access-token", "your-access-token-secret") - api.EnableThrottling(10*time.Second, 5) - - // These queries will execute in order - // with appropriate delays inserted only if necessary - golangTweets, err := api.GetSearch("golang", nil) - anacondaTweets, err2 := api.GetSearch("anaconda", nil) - - if err != nil { - panic(err) - } - if err2 != nil { - panic(err) - } - - fmt.Println(golangTweets) - fmt.Println(anacondaTweets) -} - -// Fetch a list of all followers without any need for managing cursors -// (Each page is automatically fetched when the previous one is read) -func ExampleTwitterApi_GetFollowersListAll() { - pages := api.GetFollowersListAll(nil) - for page := range pages { - //Print the current page of followers - fmt.Println(page.Followers) - } -} diff --git a/kit/github.com/ChimeraCoder/anaconda/favorites.go b/kit/github.com/ChimeraCoder/anaconda/favorites.go deleted file mode 100644 index 66b33bb..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/favorites.go +++ /dev/null @@ -1,11 +0,0 @@ -package anaconda - -import ( - "net/url" -) - -func (a TwitterApi) GetFavorites(v url.Values) (favorites []Tweet, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/favorites/list.json", v, &favorites, _GET, response_ch} - return favorites, (<-response_ch).err -} diff --git a/kit/github.com/ChimeraCoder/anaconda/friends_followers.go b/kit/github.com/ChimeraCoder/anaconda/friends_followers.go deleted file mode 100644 index 5ee66f9..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/friends_followers.go +++ /dev/null @@ -1,124 +0,0 @@ -package anaconda - -import ( - "net/url" -) - -type Cursor struct { - Previous_cursor int64 - Previous_cursor_str string - - Ids []int64 - - Next_cursor int64 - Next_cursor_str string -} - -type UserCursor struct { - Previous_cursor int64 - Previous_cursor_str string - Next_cursor int64 - Next_cursor_str string - Users []User -} - -type Friendship struct { - Name string - Id_str string - Id int64 - Connections []string - Screen_name string -} - -type FollowersPage struct { - Followers []User - Error error -} - -//GetFriendshipsNoRetweets s a collection of user_ids that the currently authenticated user does not want to receive retweets from. -//It does not currently support the stringify_ids parameter -func (a TwitterApi) GetFriendshipsNoRetweets() (ids []int64, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/friendships/no_retweets/ids.json", nil, &ids, _GET, response_ch} - return ids, (<-response_ch).err -} - -func (a TwitterApi) GetFollowersIds(v url.Values) (c Cursor, err error) { - err = a.apiGet(BaseUrl+"/followers/ids.json", v, &c) - return -} - -func (a TwitterApi) GetFriendsIds(v url.Values) (c Cursor, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/friends/ids.json", v, &c, _GET, response_ch} - return c, (<-response_ch).err -} - -func (a TwitterApi) GetFriendshipsLookup(v url.Values) (friendships []Friendship, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/friendships/lookup.json", v, &friendships, _GET, response_ch} - return friendships, (<-response_ch).err -} - -func (a TwitterApi) GetFriendshipsIncoming(v url.Values) (c Cursor, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/friendships/incoming.json", v, &c, _GET, response_ch} - return c, (<-response_ch).err -} - -func (a TwitterApi) GetFriendshipsOutgoing(v url.Values) (c Cursor, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/friendships/outgoing.json", v, &c, _GET, response_ch} - return c, (<-response_ch).err -} - -func (a TwitterApi) GetFollowersList(v url.Values) (c UserCursor, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/followers/list.json", v, &c, _GET, response_ch} - return c, (<-response_ch).err -} - -func (a TwitterApi) GetFriendsList(v url.Values) (c UserCursor, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/friends/list.json", v, &c, _GET, response_ch} - return c, (<-response_ch).err -} - -// Like GetFollowersList, but returns a channel instead of a cursor and pre-fetches the remaining results -// This channel is closed once all values have been fetched -func (a TwitterApi) GetFollowersListAll(v url.Values) (result chan FollowersPage) { - - result = make(chan FollowersPage) - - if v == nil { - v = url.Values{} - } - go func(a TwitterApi, v url.Values, result chan FollowersPage) { - // Cursor defaults to the first page ("-1") - next_cursor := "-1" - for { - v.Set("cursor", next_cursor) - c, err := a.GetFollowersList(v) - - // throttledQuery() handles all rate-limiting errors - // if GetFollowersList() returns an error, it must be a different kind of error - - result <- FollowersPage{c.Users, err} - - next_cursor = c.Next_cursor_str - if next_cursor == "0" { - close(result) - break - } - } - }(a, v, result) - return result -} - -// Like GetFriendsIds, but returns a channel instead of a cursor and pre-fetches the remaining results -// This channel is closed once all values have been fetched -func (a TwitterApi) GetFriendsIdsAll(v url.Values) (c Cursor, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/friends/ids.json", v, &c, _GET, response_ch} - return c, (<-response_ch).err -} diff --git a/kit/github.com/ChimeraCoder/anaconda/oembed.go b/kit/github.com/ChimeraCoder/anaconda/oembed.go deleted file mode 100644 index 012d8c1..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/oembed.go +++ /dev/null @@ -1,49 +0,0 @@ -package anaconda - -import ( - "net/http" - "net/url" - "strconv" -) - -type OEmbed struct { - Type string - Width int - Cache_age string - Height int - Author_url string - Html string - Version string - Provider_name string - Provider_url string - Url string - Author_name string -} - -// No authorization on this endpoint. Its the only one. -func (a TwitterApi) GetOEmbed(v url.Values) (o OEmbed, err error) { - resp, err := http.Get(BaseUrlV1 + "/statuses/oembed.json?" + v.Encode()) - if err != nil { - return - } - defer resp.Body.Close() - - err = decodeResponse(resp, &o) - return -} - -// Calls GetOEmbed with the corresponding id. Convenience wrapper for GetOEmbed() -func (a TwitterApi) GetOEmbedId(id int64, v url.Values) (o OEmbed, err error) { - if v == nil { - v = url.Values{} - } - v.Set("id", strconv.FormatInt(id, 10)) - resp, err := http.Get(BaseUrlV1 + "/statuses/oembed.json?" + v.Encode()) - if err != nil { - return - } - defer resp.Body.Close() - - err = decodeResponse(resp, &o) - return -} diff --git a/kit/github.com/ChimeraCoder/anaconda/oembed_test.go b/kit/github.com/ChimeraCoder/anaconda/oembed_test.go deleted file mode 100644 index 20829fe..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/oembed_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package anaconda - -import ( - "net/url" - "reflect" - "testing" -) - -func TestOEmbed(t *testing.T) { - // It is the only one that can be tested without auth - api := NewTwitterApi("", "") - o, err := api.GetOEmbed(url.Values{"id": []string{"99530515043983360"}}) - if err != nil { - t.Error(err) - } - - if !reflect.DeepEqual(o, expectedOEmbed) { - t.Error("Actual OEmbed differs from expected", o) - } -} - -var expectedOEmbed OEmbed = OEmbed{ - Cache_age: "3153600000", - Url: "https://twitter.com/twitter/statuses/99530515043983360", - Height: 0, - Provider_url: "https://twitter.com", - Provider_name: "Twitter", - Author_name: "Twitter", - Version: "1.0", - Author_url: "https://twitter.com/twitter", - Type: "rich", - Html: "\u003Cblockquote class=\"twitter-tweet\"\u003E\u003Cp\u003ECool! \u201C\u003Ca href=\"https://twitter.com/tw1tt3rart\"\u003E@tw1tt3rart\u003C/a\u003E: \u003Ca href=\"https://twitter.com/search?q=%23TWITTERART&src=hash\"\u003E#TWITTERART\u003C/a\u003E \u2571\u2571\u2571\u2571\u2571\u2571\u2571\u2571 \u2571\u2571\u256D\u2501\u2501\u2501\u2501\u256E\u2571\u2571\u256D\u2501\u2501\u2501\u2501\u256E \u2571\u2571\u2503\u2587\u2506\u2506\u2587\u2503\u2571\u256D\u252B\u24E6\u24D4\u24D4\u24DA\u2503 \u2571\u2571\u2503\u25BD\u25BD\u25BD\u25BD\u2503\u2501\u256F\u2503\u2661\u24D4\u24DD\u24D3\u2503 \u2571\u256D\u252B\u25B3\u25B3\u25B3\u25B3\u2523\u256E\u2571\u2570\u2501\u2501\u2501\u2501\u256F \u2571\u2503\u2503\u2506\u2506\u2506\u2506\u2503\u2503\u2571\u2571\u2571\u2571\u2571\u2571 \u2571\u2517\u252B\u2506\u250F\u2513\u2506\u2523\u251B\u2571\u2571\u2571\u2571\u2571\u201D\u003C/p\u003E— Twitter (@twitter) \u003Ca href=\"https://twitter.com/twitter/statuses/99530515043983360\"\u003EAugust 5, 2011\u003C/a\u003E\u003C/blockquote\u003E\n\u003Cscript async src=\"//platform.twitter.com/widgets.js\" charset=\"utf-8\"\u003E\u003C/script\u003E", - Width: 550, -} diff --git a/kit/github.com/ChimeraCoder/anaconda/place.go b/kit/github.com/ChimeraCoder/anaconda/place.go deleted file mode 100644 index 86d3bfd..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/place.go +++ /dev/null @@ -1,35 +0,0 @@ -package anaconda - -type Place struct { - Attributes map[string]string `json:"attributes"` - BoundingBox struct { - Coordinates [][][]float64 `json:"coordinates"` - Type string `json:"type"` - } `json:"bounding_box"` - ContainedWithin []struct { - Attributes map[string]string `json:"attributes"` - BoundingBox struct { - Coordinates [][][]float64 `json:"coordinates"` - Type string `json:"type"` - } `json:"bounding_box"` - Country string `json:"country"` - CountryCode string `json:"country_code"` - FullName string `json:"full_name"` - ID string `json:"id"` - Name string `json:"name"` - PlaceType string `json:"place_type"` - URL string `json:"url"` - } `json:"contained_within"` - Country string `json:"country"` - CountryCode string `json:"country_code"` - FullName string `json:"full_name"` - Geometry struct { - Coordinates [][][]float64 `json:"coordinates"` - Type string `json:"type"` - } `json:"geometry"` - ID string `json:"id"` - Name string `json:"name"` - PlaceType string `json:"place_type"` - Polylines []string `json:"polylines"` - URL string `json:"url"` -} diff --git a/kit/github.com/ChimeraCoder/anaconda/search.go b/kit/github.com/ChimeraCoder/anaconda/search.go deleted file mode 100644 index 4be525b..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/search.go +++ /dev/null @@ -1,26 +0,0 @@ -package anaconda - -import ( - "net/url" -) - -type searchResponse struct { - Statuses []Tweet -} - -func (a TwitterApi) GetSearch(queryString string, v url.Values) (timeline []Tweet, err error) { - var sr searchResponse - - v = cleanValues(v) - v.Set("q", queryString) - - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/search/tweets.json", v, &sr, _GET, response_ch} - - // We have to read from the response channel before assigning to timeline - // Otherwise this will happen before the responses have been written - resp := <-response_ch - err = resp.err - timeline = sr.Statuses - return timeline, err -} diff --git a/kit/github.com/ChimeraCoder/anaconda/timeline.go b/kit/github.com/ChimeraCoder/anaconda/timeline.go deleted file mode 100644 index 3a9ab5c..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/timeline.go +++ /dev/null @@ -1,41 +0,0 @@ -package anaconda - -import ( - "net/url" -) - -// GetHomeTimeline returns the most recent tweets and retweets posted by the user -// and the users that they follow. -// https://dev.twitter.com/docs/api/1.1/get/statuses/home_timeline -// By default, include_entities is set to "true" -func (a TwitterApi) GetHomeTimeline(v url.Values) (timeline []Tweet, err error) { - if v == nil { - v = url.Values{} - } - - if val := v.Get("include_entities"); val == "" { - v.Set("include_entities", "true") - } - - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/statuses/home_timeline.json", v, &timeline, _GET, response_ch} - return timeline, (<-response_ch).err -} - -func (a TwitterApi) GetUserTimeline(v url.Values) (timeline []Tweet, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/statuses/user_timeline.json", v, &timeline, _GET, response_ch} - return timeline, (<-response_ch).err -} - -func (a TwitterApi) GetMentionsTimeline(v url.Values) (timeline []Tweet, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/statuses/mentions_timeline.json", v, &timeline, _GET, response_ch} - return timeline, (<-response_ch).err -} - -func (a TwitterApi) GetRetweetsOfMe(v url.Values) (tweets []Tweet, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/statuses/retweets_of_me.json", v, &tweets, _GET, response_ch} - return tweets, (<-response_ch).err -} diff --git a/kit/github.com/ChimeraCoder/anaconda/tweet.go b/kit/github.com/ChimeraCoder/anaconda/tweet.go deleted file mode 100644 index 3626345..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/tweet.go +++ /dev/null @@ -1,36 +0,0 @@ -package anaconda - -import ( - "time" -) - -type Tweet struct { - Contributors []int64 `json:"contributors"` - Coordinates interface{} `json:"coordinates"` - CreatedAt string `json:"created_at"` - Entities Entities `json:"entities"` - FavoriteCount int `json:"favorite_count"` - Favorited bool `json:"favorited"` - Geo interface{} `json:"geo"` - Id int64 `json:"id"` - IdStr string `json:"id_str"` - InReplyToScreenName string `json:"in_reply_to_screen_name"` - InReplyToStatusID int64 `json:"in_reply_to_status_id"` - InReplyToStatusIdStr string `json:"in_reply_to_status_id_str"` - InReplyToUserID int64 `json:"in_reply_to_user_id"` - InReplyToUserIdStr string `json:"in_reply_to_user_id_str"` - Place Place `json:"place"` - PossiblySensitive bool `json:"possibly_sensitive"` - RetweetCount int `json:"retweet_count"` - Retweeted bool `json:"retweeted"` - RetweetedStatus *Tweet `json:"retweeted_status"` - Source string `json:"source"` - Text string `json:"text"` - Truncated bool `json:"truncated"` - User User `json:"user"` -} - -// CreatedAtTime is a convenience wrapper that returns the Created_at time, parsed as a time.Time struct -func (t Tweet) CreatedAtTime() (time.Time, error) { - return time.Parse(time.RubyDate, t.CreatedAt) -} diff --git a/kit/github.com/ChimeraCoder/anaconda/tweets.go b/kit/github.com/ChimeraCoder/anaconda/tweets.go deleted file mode 100644 index 0e05dfc..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/tweets.go +++ /dev/null @@ -1,65 +0,0 @@ -package anaconda - -import ( - "fmt" - "net/url" - "strconv" -) - -func (a TwitterApi) GetTweet(id int64, v url.Values) (tweet Tweet, err error) { - v = cleanValues(v) - v.Set("id", strconv.FormatInt(id, 10)) - - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/statuses/show.json", v, &tweet, _GET, response_ch} - return tweet, (<-response_ch).err -} - -func (a TwitterApi) GetRetweets(id int64, v url.Values) (tweets []Tweet, err error) { - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + fmt.Sprintf("/statuses/retweets/%d.json", id), v, &tweets, _GET, response_ch} - return tweets, (<-response_ch).err -} - -//PostTweet will create a tweet with the specified status message -func (a TwitterApi) PostTweet(status string, v url.Values) (tweet Tweet, err error) { - v = cleanValues(v) - v.Set("status", status) - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/statuses/update.json", v, &tweet, _POST, response_ch} - return tweet, (<-response_ch).err -} - -//DeleteTweet will destroy (delete) the status (tweet) with the specified ID, assuming that the authenticated user is the author of the status (tweet). -//If trimUser is set to true, only the user's Id will be provided in the user object returned. -func (a TwitterApi) DeleteTweet(id int64, trimUser bool) (tweet Tweet, err error) { - v := url.Values{} - if trimUser { - v.Set("trim_user", "t") - } - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + fmt.Sprintf("/statuses/destroy/%d.json", id), v, &tweet, _POST, response_ch} - return tweet, (<-response_ch).err -} - -//Retweet will retweet the status (tweet) with the specified ID. -//trimUser functions as in DeleteTweet -func (a TwitterApi) Retweet(id int64, trimUser bool) (rt Tweet, err error) { - v := url.Values{} - if trimUser { - v.Set("trim_user", "t") - } - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + fmt.Sprintf("/statuses/retweet/%d.json", id), v, &rt, _POST, response_ch} - return rt, (<-response_ch).err -} - -// Favorite will favorite the status (tweet) with the specified ID. -// https://dev.twitter.com/docs/api/1.1/post/favorites/create -func (a TwitterApi) Favorite(id int64) (rt Tweet, err error) { - v := url.Values{} - v.Set("id", fmt.Sprint(id)) - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + fmt.Sprintf("/favorites/create.json"), v, &rt, _POST, response_ch} - return rt, (<-response_ch).err -} diff --git a/kit/github.com/ChimeraCoder/anaconda/twitter.go b/kit/github.com/ChimeraCoder/anaconda/twitter.go deleted file mode 100644 index 649579a..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/twitter.go +++ /dev/null @@ -1,262 +0,0 @@ -//Package anaconda provides structs and functions for accessing version 1.1 -//of the Twitter API. -// -//Successful API queries return native Go structs that can be used immediately, -//with no need for type assertions. -// -//Authentication -// -//If you already have the access token (and secret) for your user (Twitter provides this for your own account on the developer portal), creating the client is simple: -// -// anaconda.SetConsumerKey("your-consumer-key") -// anaconda.SetConsumerSecret("your-consumer-secret") -// api := anaconda.NewTwitterApi("your-access-token", "your-access-token-secret") -// -// -//Queries -// -//Executing queries on an authenticated TwitterApi struct is simple. -// -// searchResult, _ := api.GetSearch("golang", nil) -// for _ , tweet := range searchResult { -// fmt.Print(tweet.Text) -// } -// -//Certain endpoints allow separate optional parameter; if desired, these can be passed as the final parameter. -// -// v := url.Values{} -// v.Set("count", "30") -// result, err := api.GetSearch("golang", v) -// -// -//Endpoints -// -//Anaconda implements most of the endpoints defined in the Twitter API documentation: https://dev.twitter.com/docs/api/1.1. -//For clarity, in most cases, the function name is simply the name of the HTTP method and the endpoint (e.g., the endpoint `GET /friendships/incoming` is provided by the function `GetFriendshipsIncoming`). -// -//In a few cases, a shortened form has been chosen to make life easier (for example, retweeting is simply the function `Retweet`) -// -//More detailed information about the behavior of each particular endpoint can be found at the official Twitter API documentation. -package anaconda - -import ( - "encoding/json" - "fmt" - "github.com/gocircuit/escher/kit/github.com/ChimeraCoder/tokenbucket" - "github.com/garyburd/go-oauth/oauth" - "io/ioutil" - "net/http" - "net/url" - "time" -) - -const ( - _GET = iota - _POST = iota - BaseUrlV1 = "https://api.twitter.com/1" - BaseUrl = "https://api.twitter.com/1.1" -) - -var oauthClient = oauth.Client{ - TemporaryCredentialRequestURI: "https://api.twitter.com/oauth/request_token", - ResourceOwnerAuthorizationURI: "https://api.twitter.com/oauth/authenticate", - TokenRequestURI: "https://api.twitter.com/oauth/access_token", -} - -type TwitterApi struct { - Credentials *oauth.Credentials - queryQueue chan query - bucket *tokenbucket.Bucket - returnRateLimitError bool - HttpClient *http.Client -} - -type query struct { - url string - form url.Values - data interface{} - method int - response_ch chan response -} - -type response struct { - data interface{} - err error -} - -const DEFAULT_DELAY = 0 * time.Second -const DEFAULT_CAPACITY = 5 - -//NewTwitterApi takes an user-specific access token and secret and returns a TwitterApi struct for that user. -//The TwitterApi struct can be used for accessing any of the endpoints available. -func NewTwitterApi(access_token string, access_token_secret string) *TwitterApi { - //TODO figure out how much to buffer this channel - //A non-buffered channel will cause blocking when multiple queries are made at the same time - queue := make(chan query) - c := &TwitterApi{&oauth.Credentials{Token: access_token, Secret: access_token_secret}, queue, nil, false, http.DefaultClient} - go c.throttledQuery() - return c -} - -//SetConsumerKey will set the application-specific consumer_key used in the initial OAuth process -//This key is listed on https://dev.twitter.com/apps/YOUR_APP_ID/show -func SetConsumerKey(consumer_key string) { - oauthClient.Credentials.Token = consumer_key -} - -//SetConsumerSecret will set the application-specific secret used in the initial OAuth process -//This secret is listed on https://dev.twitter.com/apps/YOUR_APP_ID/show -func SetConsumerSecret(consumer_secret string) { - oauthClient.Credentials.Secret = consumer_secret -} - -// ReturnRateLimitError specifies behavior when the Twitter API returns a rate-limit error. -// If set to true, the query will fail and return the error instead of automatically queuing and -// retrying the query when the rate limit expires -func (c *TwitterApi) ReturnRateLimitError(b bool) { - c.returnRateLimitError = b -} - -// Enable query throttling using the tokenbucket algorithm -func (c *TwitterApi) EnableThrottling(rate time.Duration, bufferSize int64) { - c.bucket = tokenbucket.NewBucket(rate, bufferSize) -} - -// Disable query throttling -func (c *TwitterApi) DisableThrottling() { - c.bucket = nil -} - -// SetDelay will set the delay between throttled queries -// To turn of throttling, set it to 0 seconds -func (c *TwitterApi) SetDelay(t time.Duration) { - c.bucket.SetRate(t) -} - -func (c *TwitterApi) GetDelay() time.Duration { - return c.bucket.GetRate() -} - -//AuthorizationURL generates the authorization URL for the first part of the OAuth handshake. -//Redirect the user to this URL. -//This assumes that the consumer key has already been set (using SetConsumerKey). -func AuthorizationURL(callback string) (string, *oauth.Credentials, error) { - tempCred, err := oauthClient.RequestTemporaryCredentials(http.DefaultClient, callback, nil) - if err != nil { - return "", nil, err - } - return oauthClient.AuthorizationURL(tempCred, nil), tempCred, nil -} - -func GetCredentials(tempCred *oauth.Credentials, verifier string) (*oauth.Credentials, url.Values, error) { - return oauthClient.RequestToken(http.DefaultClient, tempCred, verifier) -} - -func cleanValues(v url.Values) url.Values { - if v == nil { - return url.Values{} - } - return v -} - -// apiGet issues a GET request to the Twitter API and decodes the response JSON to data. -func (c TwitterApi) apiGet(urlStr string, form url.Values, data interface{}) error { - resp, err := oauthClient.Get(c.HttpClient, c.Credentials, urlStr, form) - if err != nil { - return err - } - defer resp.Body.Close() - return decodeResponse(resp, data) -} - -// apiPost issues a POST request to the Twitter API and decodes the response JSON to data. -func (c TwitterApi) apiPost(urlStr string, form url.Values, data interface{}) error { - resp, err := oauthClient.Post(c.HttpClient, c.Credentials, urlStr, form) - if err != nil { - return err - } - defer resp.Body.Close() - return decodeResponse(resp, data) -} - -// decodeResponse decodes the JSON response from the Twitter API. -func decodeResponse(resp *http.Response, data interface{}) error { - if resp.StatusCode != 200 { - return newApiError(resp) - } - return json.NewDecoder(resp.Body).Decode(data) -} - -func NewApiError(resp *http.Response) *ApiError { - body, _ := ioutil.ReadAll(resp.Body) - - return &ApiError{ - StatusCode: resp.StatusCode, - Header: resp.Header, - Body: string(body), - URL: resp.Request.URL, - } -} - -//query executes a query to the specified url, sending the values specified by form, and decodes the response JSON to data -//method can be either _GET or _POST -func (c TwitterApi) execQuery(urlStr string, form url.Values, data interface{}, method int) error { - switch method { - case _GET: - return c.apiGet(urlStr, form, data) - case _POST: - return c.apiPost(urlStr, form, data) - default: - return fmt.Errorf("HTTP method not yet supported") - } -} - -// throttledQuery executes queries and automatically throttles them according to SECONDS_PER_QUERY -// It is the only function that reads from the queryQueue for a particular *TwitterApi struct - -func (c *TwitterApi) throttledQuery() { - for q := range c.queryQueue { - url := q.url - form := q.form - data := q.data //This is where the actual response will be written - method := q.method - - response_ch := q.response_ch - - if c.bucket != nil { - <-c.bucket.SpendToken(1) - } - - err := c.execQuery(url, form, data, method) - - // Check if Twitter returned a rate-limiting error - if err != nil { - if apiErr, ok := err.(*ApiError); ok { - if isRateLimitError, nextWindow := apiErr.RateLimitCheck(); isRateLimitError && !c.returnRateLimitError { - // If this is a rate-limiting error, re-add the job to the queue - // TODO it really should preserve order - go func() { - c.queryQueue <- q - }() - - delay := nextWindow.Sub(time.Now()) - <-time.After(delay) - - // Drain the bucket (start over fresh) - if c.bucket != nil { - c.bucket.Drain() - } - - continue - } - } - } - - response_ch <- response{data, err} - } -} - -// Close query queue -func (c *TwitterApi) Close() { - close(c.queryQueue) -} diff --git a/kit/github.com/ChimeraCoder/anaconda/twitter_entities.go b/kit/github.com/ChimeraCoder/anaconda/twitter_entities.go deleted file mode 100644 index 29a62a0..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/twitter_entities.go +++ /dev/null @@ -1,46 +0,0 @@ -package anaconda - -type Entities struct { - Hashtags []struct { - Indices []int - Text string - } - Urls []struct { - Indices []int - Url string - Display_url string - Expanded_url string - } - User_mentions []struct { - Name string - Indices []int - Screen_name string - Id int64 - Id_str string - } - Media []struct { - Id int64 - Id_str string - Media_url string - Media_url_https string - Url string - Display_url string - Expanded_url string - Sizes MediaSizes - Type string - Indices []int - } -} - -type MediaSizes struct { - Medium MediaSize - Thumb MediaSize - Small MediaSize - Large MediaSize -} - -type MediaSize struct { - W int - H int - Resize string -} diff --git a/kit/github.com/ChimeraCoder/anaconda/twitter_test.go b/kit/github.com/ChimeraCoder/anaconda/twitter_test.go deleted file mode 100644 index 358eda7..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/twitter_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package anaconda_test - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/ChimeraCoder/anaconda" - "net/url" - "os" - "reflect" - "testing" - "time" -) - -var CONSUMER_KEY = os.Getenv("CONSUMER_KEY") -var CONSUMER_SECRET = os.Getenv("CONSUMER_SECRET") -var ACCESS_TOKEN = os.Getenv("ACCESS_TOKEN") -var ACCESS_TOKEN_SECRET = os.Getenv("ACCESS_TOKEN_SECRET") - -var api *anaconda.TwitterApi - -func init() { - // Initialize api so it can be used even when invidual tests are run in isolation - anaconda.SetConsumerKey(CONSUMER_KEY) - anaconda.SetConsumerSecret(CONSUMER_SECRET) - api = anaconda.NewTwitterApi(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) -} - -// Test_TwitterCredentials tests that non-empty Twitter credentials are set -// Without this, all following tests will fail -func Test_TwitterCredentials(t *testing.T) { - if CONSUMER_KEY == "" || CONSUMER_SECRET == "" || ACCESS_TOKEN == "" || ACCESS_TOKEN_SECRET == "" { - t.Errorf("Credentials are invalid: at least one is empty") - } -} - -// Test that creating a TwitterApi client creates a client with non-empty OAuth credentials -func Test_TwitterApi_NewTwitterApi(t *testing.T) { - anaconda.SetConsumerKey(CONSUMER_KEY) - anaconda.SetConsumerSecret(CONSUMER_SECRET) - api = anaconda.NewTwitterApi(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) - - if api.Credentials == nil { - t.Errorf("Twitter Api client has empty (nil) credentials") - } -} - -// Test that the GetSearch function actually works and returns non-empty results -func Test_TwitterApi_GetSearch(t *testing.T) { - search_result, err := api.GetSearch("golang", nil) - if err != nil { - t.Errorf("GetSearch yielded error %s", err.Error()) - panic(err) - } - - // Unless something is seriously wrong, there should be at least two tweets - if len(search_result) < 2 { - t.Errorf("Expected 2 or more tweets, and found %d", len(search_result)) - } - - // Check that at least one tweet is non-empty - for _, tweet := range search_result { - if tweet.Text != "" { - return - } - fmt.Print(tweet.Text) - } - - t.Errorf("All %d tweets had empty text", len(search_result)) -} - -// Test that a valid user can be fetched -// and that unmarshalling works properly -func Test_GetUser(t *testing.T) { - const username = "chimeracoder" - - users, err := api.GetUsersLookup(username, nil) - if err != nil { - t.Errorf("GetUsersLookup returned error: %s", err.Error()) - } - - if len(users) != 1 { - t.Errorf("Expected one user and received %d", len(users)) - } - - // If all attributes are equal to the zero value for that type, - // then the original value was not valid - if reflect.DeepEqual(users[0], anaconda.User{}) { - t.Errorf("Received %#v", users[0]) - } -} - -func Test_GetFavorites(t *testing.T) { - v := url.Values{} - v.Set("screen_name", "chimeracoder") - favorites, err := api.GetFavorites(v) - if err != nil { - t.Errorf("GetFavorites returned error: %s", err.Error()) - } - - if len(favorites) == 0 { - t.Errorf("GetFavorites returned no favorites") - } - - if reflect.DeepEqual(favorites[0], anaconda.Tweet{}) { - t.Errorf("GetFavorites returned %d favorites and the first one was empty", len(favorites)) - } -} - -// Test that a valid tweet can be fetched properly -// and that unmarshalling of tweet works without error -func Test_GetTweet(t *testing.T) { - const tweetId = 303777106620452864 - const tweetText = `golang-syd is in session. Dave Symonds is now talking about API design and protobufs. #golang http://t.co/eSq3ROwu` - - tweet, err := api.GetTweet(tweetId, nil) - if err != nil { - t.Errorf("GetTweet returned error: %s", err.Error()) - } - - if tweet.Text != tweetText { - t.Errorf("Tweet %d contained incorrect text. Received: %s", tweetId, tweetText) - } - - // Check the entities - expectedEntities := anaconda.Entities{Hashtags: []struct { - Indices []int - Text string - }{struct { - Indices []int - Text string - }{Indices: []int{86, 93}, Text: "golang"}}, Urls: []struct { - Indices []int - Url string - Display_url string - Expanded_url string - }{}, User_mentions: []struct { - Name string - Indices []int - Screen_name string - Id int64 - Id_str string - }{}, Media: []struct { - Id int64 - Id_str string - Media_url string - Media_url_https string - Url string - Display_url string - Expanded_url string - Sizes anaconda.MediaSizes - Type string - Indices []int - }{struct { - Id int64 - Id_str string - Media_url string - Media_url_https string - Url string - Display_url string - Expanded_url string - Sizes anaconda.MediaSizes - Type string - Indices []int - }{Id: 303777106628841472, Id_str: "303777106628841472", Media_url: "http://pbs.twimg.com/media/BDc7q0OCEAAoe2C.jpg", Media_url_https: "https://pbs.twimg.com/media/BDc7q0OCEAAoe2C.jpg", Url: "http://t.co/eSq3ROwu", Display_url: "pic.twitter.com/eSq3ROwu", Expanded_url: "http://twitter.com/golang/status/303777106620452864/photo/1", Sizes: anaconda.MediaSizes{Medium: anaconda.MediaSize{W: 600, H: 450, Resize: "fit"}, Thumb: anaconda.MediaSize{W: 150, H: 150, Resize: "crop"}, Small: anaconda.MediaSize{W: 340, H: 255, Resize: "fit"}, Large: anaconda.MediaSize{W: 1024, H: 768, Resize: "fit"}}, Type: "photo", Indices: []int{94, 114}}}} - if !reflect.DeepEqual(tweet.Entities, expectedEntities) { - t.Errorf("Tweet entities differ") - } - -} - -// This assumes that the current user has at least two pages' worth of followers -func Test_GetFollowersListAll(t *testing.T) { - result := api.GetFollowersListAll(nil) - i := 0 - - for page := range result { - if i == 2 { - return - } - - if page.Error != nil { - t.Errorf("Receved error from GetFollowersListAll: %s", page.Error) - } - - if page.Followers == nil || len(page.Followers) == 0 { - t.Errorf("Received invalid value for page %d of followers: %v", i, page.Followers) - } - i++ - } -} - -// Test that setting the delay actually changes the stored delay value -func Test_TwitterApi_SetDelay(t *testing.T) { - const OLD_DELAY = 1 * time.Second - const NEW_DELAY = 20 * time.Second - api.EnableThrottling(OLD_DELAY, 4) - - delay := api.GetDelay() - if delay != OLD_DELAY { - t.Errorf("Expected initial delay to be the default delay (%s)", anaconda.DEFAULT_DELAY.String()) - } - - api.SetDelay(NEW_DELAY) - - if newDelay := api.GetDelay(); newDelay != NEW_DELAY { - t.Errorf("Attempted to set delay to %s, but delay is now %s (original delay: %s)", NEW_DELAY, newDelay, delay) - } -} - -func Test_TwitterApi_TwitterErrorDoesNotExist(t *testing.T) { - - // Try fetching a tweet that no longer exists (was deleted) - const DELETED_TWEET_ID = 404409873170841600 - - tweet, err := api.GetTweet(DELETED_TWEET_ID, nil) - if err == nil { - t.Errorf("Expected an error when fetching tweet with id %d but got none - tweet object is %+v", DELETED_TWEET_ID, tweet) - } - - apiErr, ok := err.(*anaconda.ApiError) - if !ok { - t.Errorf("Expected an *anaconda.ApiError, and received error message %s, (%+v)", err.Error(), err) - } - - terr, ok := apiErr.Decoded.First().(anaconda.TwitterError) - - if !ok { - t.Errorf("TwitterErrorResponse.First() should return value of type TwitterError, not %s", reflect.TypeOf(apiErr.Decoded.First())) - } - - if code := terr.Code; code != anaconda.TwitterErrorDoesNotExist { - if code == anaconda.TwitterErrorRateLimitExceeded { - t.Errorf("Rate limit exceeded during testing - received error code %d instead of %d", anaconda.TwitterErrorRateLimitExceeded, anaconda.TwitterErrorDoesNotExist) - } else { - - t.Errorf("Expected Twitter to return error code %d, and instead received error code %d", anaconda.TwitterErrorDoesNotExist, code) - } - } -} - -// Test that the client can be used to throttle to an arbitrary duration -func Test_TwitterApi_Throttling(t *testing.T) { - const MIN_DELAY = 15 * time.Second - - api.EnableThrottling(MIN_DELAY, 5) - oldDelay := api.GetDelay() - api.SetDelay(MIN_DELAY) - - now := time.Now() - _, err := api.GetSearch("golang", nil) - if err != nil { - t.Errorf("GetSearch yielded error %s", err.Error()) - } - _, err = api.GetSearch("anaconda", nil) - if err != nil { - t.Errorf("GetSearch yielded error %s", err.Error()) - } - after := time.Now() - - if difference := after.Sub(now); difference < MIN_DELAY { - t.Errorf("Expected delay of at least %d. Actual delay: %s", MIN_DELAY.String(), difference.String()) - } - - // Reset the delay to its previous value - api.SetDelay(oldDelay) -} diff --git a/kit/github.com/ChimeraCoder/anaconda/twitter_user.go b/kit/github.com/ChimeraCoder/anaconda/twitter_user.go deleted file mode 100644 index c172840..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/twitter_user.go +++ /dev/null @@ -1,43 +0,0 @@ -package anaconda - -type User struct { - ContributorsEnabled bool `json:"contributors_enabled"` - CreatedAt string `json:"created_at"` - DefaultProfile bool `json:"default_profile"` - DefaultProfileImage bool `json:"default_profile_image"` - Description string `json:"description"` - FavouritesCount int `json:"favourites_count"` - FollowRequestSent bool `json:"follow_request_sent"` - FollowersCount int `json:"followers_count"` - Following bool `json:"following"` - FriendsCount int `json:"friends_count"` - GeoEnabled bool `json:"geo_enabled"` - Id int64 `json:"id"` - IdStr string `json:"id_str"` - IsTranslator bool `json:"is_translator"` - Lang string `json:"lang"` - ListedCount int64 `json:"listed_count"` - Location string `json:"location"` - Name string `json:"name"` - Notifications bool `json:"notifications"` - ProfileBackgroundColor string `json:"profile_background_color"` - ProfileBackgroundImageURL string `json:"profile_background_image_url"` - ProfileBackgroundImageUrlHttps string `json:"profile_background_image_url_https"` - ProfileBackgroundTile bool `json:"profile_background_tile"` - ProfileImageURL string `json:"profile_image_url"` - ProfileImageUrlHttps string `json:"profile_image_url_https"` - ProfileLinkColor string `json:"profile_link_color"` - ProfileSidebarBorderColor string `json:"profile_sidebar_border_color"` - ProfileSidebarFillColor string `json:"profile_sidebar_fill_color"` - ProfileTextColor string `json:"profile_text_color"` - ProfileUseBackgroundImage bool `json:"profile_use_background_image"` - Protected bool `json:"protected"` - ScreenName string `json:"screen_name"` - ShowAllInlineMedia bool `json:"show_all_inline_media"` - Status *Tweet `json:"status"` // Only included if the user is a friend - StatusesCount int64 `json:"statuses_count"` - TimeZone string `json:"time_zone"` - URL string `json:"url"` - UtcOffset int `json:"utc_offset"` - Verified bool `json:"verified"` -} diff --git a/kit/github.com/ChimeraCoder/anaconda/users.go b/kit/github.com/ChimeraCoder/anaconda/users.go deleted file mode 100644 index 3b5be50..0000000 --- a/kit/github.com/ChimeraCoder/anaconda/users.go +++ /dev/null @@ -1,56 +0,0 @@ -package anaconda - -import ( - "net/url" - "strconv" -) - -func (a TwitterApi) GetUsersLookup(usernames string, v url.Values) (u []User, err error) { - v = cleanValues(v) - v.Set("screen_name", usernames) - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/users/lookup.json", v, &u, _GET, response_ch} - return u, (<-response_ch).err -} - -func (a TwitterApi) GetUsersLookupByIds(ids []int64, v url.Values) (u []User, err error) { - var pids string - for w, i := range ids { - //pids += strconv.Itoa(i) - pids += strconv.FormatInt(i, 10) - if w != len(ids)-1 { - pids += "," - } - } - v = cleanValues(v) - v.Set("user_id", pids) - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/users/lookup.json", v, &u, _GET, response_ch} - return u, (<-response_ch).err -} - -func (a TwitterApi) GetUsersShow(username string, v url.Values) (u User, err error) { - v = cleanValues(v) - v.Set("screen_name", username) - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/users/show.json", v, &u, _GET, response_ch} - return u, (<-response_ch).err -} - -func (a TwitterApi) GetUsersShowById(id int64, v url.Values) (u User, err error) { - v = cleanValues(v) - v.Set("user_id", strconv.FormatInt(id, 10)) - response_ch := make(chan response) - a.queryQueue <- query{BaseUrl + "/users/show.json", v, &u, _GET, response_ch} - return u, (<-response_ch).err -} - -func (a TwitterApi) GetUserSearch(searchTerm string, v url.Values) (u []User, err error) { - v = cleanValues(v) - v.Set("q", searchTerm) - // Set other values before calling this method: - // page, count, include_entities - response_ch := make(chan response) - a.queryQueue <- query{"http://api.twitter.com/1.1/users/search.json", v, &u, _GET, response_ch} - return u, (<-response_ch).err -} diff --git a/kit/github.com/ChimeraCoder/tokenbucket/.gitignore b/kit/github.com/ChimeraCoder/tokenbucket/.gitignore deleted file mode 100644 index 5824d3d..0000000 --- a/kit/github.com/ChimeraCoder/tokenbucket/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -.git.genus -*.swp -*.swo -*.swn -conf.sh diff --git a/kit/github.com/ChimeraCoder/tokenbucket/COPYING b/kit/github.com/ChimeraCoder/tokenbucket/COPYING deleted file mode 100644 index 65c5ca8..0000000 --- a/kit/github.com/ChimeraCoder/tokenbucket/COPYING +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/kit/github.com/ChimeraCoder/tokenbucket/LICENSE b/kit/github.com/ChimeraCoder/tokenbucket/LICENSE deleted file mode 120000 index d24842f..0000000 --- a/kit/github.com/ChimeraCoder/tokenbucket/LICENSE +++ /dev/null @@ -1 +0,0 @@ -COPYING \ No newline at end of file diff --git a/kit/github.com/ChimeraCoder/tokenbucket/README b/kit/github.com/ChimeraCoder/tokenbucket/README deleted file mode 100644 index 73811de..0000000 --- a/kit/github.com/ChimeraCoder/tokenbucket/README +++ /dev/null @@ -1,48 +0,0 @@ -[![GoDoc](http://godoc.org/github.com/gocircuit/escher/kit/github.com/gocircuit/escher/kit/github.com/ChimeraCoder/tokenbucket?status.png)](http://godoc.org/github.com/gocircuit/escher/kit/github.com/gocircuit/escher/kit/github.com/ChimeraCoder/tokenbucket) - -tokenbucket -==================== - -This package provides an implementation of [Token bucket](https://en.wikipedia.org/wiki/Token_bucket) scheduling in Go. It is useful for implementing rate-limiting, traffic shaping, or other sorts of scheduling that depend on bandwidth constraints. - - -Example ------------- - - -To create a new bucket, specify a capacity (how many tokens can be stored "in the bank"), and a rate (how often a new token is added). - -````go - - // Create a new bucket - // Allow a new action every 5 seconds, with a maximum of 3 "in the bank" - bucket := tokenbucket.NewBucket(3, 5 * time.Second) -```` - -This bucket should be shared between any functions that share the same constraints. (These functions may or may not run in separate goroutines). - - -Anytime a regulated action is performed, spend a token. - -````go - // To perform a regulated action, we must spend a token - // RegulatedAction will not be performed until the bucket contains enough tokens - <-bucket.SpendToken(1) - RegulatedAction() -```` - -`SpendToken` returns immediately. Reading from the channel that it returns will block until the action has "permission" to continue (ie, until there are enough tokens in the bucket). - - -(The channel that `SpendToken` returns is of type `error`. For now, the value will always be `nil`, so it can be ignored.) - - - -####License - -`tokenbucket` is free software provided under version 3 of the LGPL license. - - -Software that uses `tokenbucket` may be released under *any* license, as long as the source code for `tokenbucket` (including any modifications) are made available under the LGPLv3 license. - -You do not need to release the rest of the software under the LGPL, or any free/open-source license, for that matter (though we would encourage you to do so!). diff --git a/kit/github.com/ChimeraCoder/tokenbucket/README.md b/kit/github.com/ChimeraCoder/tokenbucket/README.md deleted file mode 120000 index 100b938..0000000 --- a/kit/github.com/ChimeraCoder/tokenbucket/README.md +++ /dev/null @@ -1 +0,0 @@ -README \ No newline at end of file diff --git a/kit/github.com/ChimeraCoder/tokenbucket/tokenbucket.go b/kit/github.com/ChimeraCoder/tokenbucket/tokenbucket.go deleted file mode 100644 index 215e7a7..0000000 --- a/kit/github.com/ChimeraCoder/tokenbucket/tokenbucket.go +++ /dev/null @@ -1,86 +0,0 @@ -package tokenbucket - -import ( - "sync" - "time" -) - -type Bucket struct { - capacity int64 - tokens chan struct{} - rate time.Duration // Add a token to the bucket every 1/r units of time - rateMutex sync.Mutex -} - -func NewBucket(rate time.Duration, capacity int64) *Bucket { - - //A bucket is simply a channel with a buffer representing the maximum size - tokens := make(chan struct{}, capacity) - - b := &Bucket{capacity, tokens, rate, sync.Mutex{}} - - //Set off a function that will continuously add tokens to the bucket - go func(b *Bucket) { - ticker := time.NewTicker(rate) - for _ = range ticker.C { - b.tokens <- struct{}{} - } - }(b) - - return b -} - -func (b *Bucket) GetRate() time.Duration { - b.rateMutex.Lock() - tmp := b.rate - b.rateMutex.Unlock() - return tmp -} - -func (b *Bucket) SetRate(rate time.Duration) { - b.rateMutex.Lock() - b.rate = rate - b.rateMutex.Unlock() -} - -//AddTokens manually adds n tokens to the bucket -func (b *Bucket) AddToken(n int64) { -} - -func (b *Bucket) withdrawTokens(n int64) error { - for i := int64(0); i < n; i++ { - <-b.tokens - } - return nil -} - -func (b *Bucket) SpendToken(n int64) <-chan error { - // Default to spending a single token - if n < 0 { - n = 1 - } - - c := make(chan error) - go func(b *Bucket, n int64, c chan error) { - c <- b.withdrawTokens(n) - close(c) - return - }(b, n, c) - - return c -} - -// Drain will empty all tokens in the bucket -// If the tokens are being added too quickly (if the rate is too fast) -// this will never drain -func (b *Bucket) Drain() error{ - // TODO replace this with a more solid approach (such as replacing the channel altogether) - for { - select { - case _ = <-b.tokens: - continue - default: - return nil - } - } -} diff --git a/kit/github.com/ChimeraCoder/tokenbucket/tokenbucket_test.go b/kit/github.com/ChimeraCoder/tokenbucket/tokenbucket_test.go deleted file mode 100644 index a253fa9..0000000 --- a/kit/github.com/ChimeraCoder/tokenbucket/tokenbucket_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package tokenbucket_test - -import ( - "github.com/gocircuit/escher/kit/github.com/ChimeraCoder/tokenbucket" - "testing" - "time" -) - -func Example_BucketUse() { - // Allow a new action every 5 seconds, with a maximum of 3 "in the bank" - bucket := tokenbucket.NewBucket(5*time.Second, 3) - - // To perform a regulated action, we must spend a token - // RegulatedAction will not be performed until the bucket contains enough tokens - <-bucket.SpendToken(1) - RegulatedAction() -} - -// RegulatedAction represents some function that is rate-limited, monitored, -// or otherwise regulated -func RegulatedAction() { - // Some expensive action goes on here -} - -// Test that a bucket that is full does not block execution -func Test_BucketBuffering(t *testing.T) { - // Create a bucket with capacity 3, that adds tokens every 4 seconds - const RATE = 4 * time.Second - const CAPACITY = 3 - const ERROR = 500 * time.Millisecond - b := tokenbucket.NewBucket(RATE, CAPACITY) - - // Allow the bucket enough time to fill to capacity - time.Sleep(CAPACITY * RATE) - - // Check that we can empty the bucket without wasting any time - before := time.Now() - <-b.SpendToken(1) - <-b.SpendToken(1) - <-b.SpendToken(1) - after := time.Now() - - if diff := after.Sub(before); diff > RATE { - t.Errorf("Waited %d seconds, though this should have been nearly instantaneous", diff) - } -} - -// Test that a bucket that is empty blocks execution for the correct amount of time -func Test_BucketCreation(t *testing.T) { - // Create a bucket with capacity 3, that adds tokens every 4 seconds - const RATE = 4 * time.Second - const CAPACITY = 3 - const ERROR = 500 * time.Millisecond - const EXPECTED_DURATION = RATE * CAPACITY - - b := tokenbucket.NewBucket(RATE, CAPACITY) - - // Ensure that the bucket is empty - <-b.SpendToken(1) - <-b.SpendToken(1) - <-b.SpendToken(1) - <-b.SpendToken(1) - - // Spending three times on an empty bucket should take 12 seconds - // (Take the average across three, due to imprecision/scheduling) - before := time.Now() - <-b.SpendToken(1) - <-b.SpendToken(1) - <-b.SpendToken(1) - after := time.Now() - - lower := EXPECTED_DURATION - ERROR - upper := EXPECTED_DURATION + ERROR - if diff := after.Sub(before); diff < lower || diff > upper { - t.Errorf("Waited %s seconds, though really should have waited between %s and %s", diff.String(), lower.String(), upper.String()) - } -} diff --git a/kit/github.com/onsi/ginkgo/.gitignore b/kit/github.com/onsi/ginkgo/.gitignore deleted file mode 100644 index 7086ff0..0000000 --- a/kit/github.com/onsi/ginkgo/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -.DS_Store -TODO -tmp/**/* -*.coverprofile -.git.genus diff --git a/kit/github.com/onsi/ginkgo/.travis.yml b/kit/github.com/onsi/ginkgo/.travis.yml deleted file mode 100644 index 988b7b2..0000000 --- a/kit/github.com/onsi/ginkgo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -go: - - 1.3 - -install: - - go get -v ./... - - go get code.google.com/p/go.tools/cmd/cover - - go get github.com/onsi/gomega - - go install github.com/onsi/ginkgo/ginkgo - - export PATH=$PATH:$HOME/gopath/bin - -script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race diff --git a/kit/github.com/onsi/ginkgo/CHANGELOG.md b/kit/github.com/onsi/ginkgo/CHANGELOG.md deleted file mode 100644 index 27c687a..0000000 --- a/kit/github.com/onsi/ginkgo/CHANGELOG.md +++ /dev/null @@ -1,80 +0,0 @@ -## HEAD - -Improvements: - -- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does. - -## 1.1.0 (8/2/2014) - -No changes, just dropping the beta. - -## 1.1.0-beta (7/22/2014) -New Features: - -- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag. -- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass. This allows CI systems to detect accidental commits of focused test suites. -- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes. -- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command. -- `ginkgo --failFast` aborts the test suite after the first failure. -- `ginkgo generate file_1 file_2` can take multiple file arguments. -- Ginkgo now summarizes any spec failures that occured at the end of the test run. -- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed. - -Improvements: - -- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped. -- `ginkgo --untilItFails` no longer recompiles between attempts. -- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed. - -Bug Fixes: - -- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`. -- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic - -## 1.0.0 (5/24/2014) -New Features: - -- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode` - -Improvements: - -- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor. -- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified. - -Bug Fixes: - -- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s. -- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail. -- Fix all remaining race conditions in Ginkgo's test suite. - -## 1.0.0-beta (4/14/2014) -Breaking changes: - -- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead -- Modified the Reporter interface -- `watch` is now a subcommand, not a flag. - -DSL changes: - -- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites. -- `AfterSuite` is triggered on interrupt (`^C`) as well as exit. -- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes. - -CLI changes: - -- `watch` is now a subcommand, not a flag -- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot` -- Additional arguments can be passed to specs. Pass them after the `--` separator -- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp. -- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs. - -Misc: - -- Start using semantic versioning -- Start maintaining changelog - -Major refactor: - -- Pull out Ginkgo's internal to `internal` -- Rename `example` everywhere to `spec` -- Much more! diff --git a/kit/github.com/onsi/ginkgo/MIT.LICENSE b/kit/github.com/onsi/ginkgo/MIT.LICENSE deleted file mode 100644 index 941ee5b..0000000 --- a/kit/github.com/onsi/ginkgo/MIT.LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2013 Onsi Fakhouri - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/kit/github.com/onsi/ginkgo/README.md b/kit/github.com/onsi/ginkgo/README.md deleted file mode 100644 index b41018a..0000000 --- a/kit/github.com/onsi/ginkgo/README.md +++ /dev/null @@ -1,109 +0,0 @@ -![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png) - -[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo) - -Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! - -To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega). - -## Feature List - -- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite) - -- Structure your BDD-style tests expressively: - - Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) - - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown - - [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions - - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern). - - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite. - -- A comprehensive test runner that lets you: - - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs) - - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line - - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order. - - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs) - -- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests) - -- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. - -- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples: - - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime - - `ginkgo -cover` runs your tests using Golang's code coverage tool - - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package - - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression - - `ginkgo -r` runs all tests suites under the current directory - - `ginkgo -v` prints out identifying information for each tests just before it runs - - And much more: run `ginkgo help` for details! - - The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test` - -- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop! - -- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details. - -- A modular architecture that lets you easily: - - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter). - - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo - -## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library - -Learn more about Gomega [here](http://onsi.github.io/gomega/) - -## Set Me Up! - -You'll need Golang v1.2+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!) - -```bash - -go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI -go get github.com/onsi/gomega # fetches the matcher library - -cd path/to/package/you/want/to/test - -ginkgo bootstrap # set up a new ginkgo suite -ginkgo generate # will create a sample test file. edit this file and add your tests then... - -go test # to run your tests - -ginkgo # also runs your tests - -``` - -## I'm new to Go: What are my testing options? - -Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set. - -With that said, it's great to know what your options are :) - -### What Golang gives you out of the box - -Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. - -### Matcher libraries for Golang's XUnit style tests - -A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction: - -- [testify](https://github.com/stretchr/testify) -- [gocheck](http://labix.org/gocheck) - -You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests) - -### BDD style testing frameworks - -There are a handful of BDD-style testing frameworks written for Golang. Here are a few: - -- [Ginkgo](https://github.com/onsi/ginkgo) ;) -- [GoConvey](https://github.com/smartystreets/goconvey) -- [Goblin](https://github.com/franela/goblin) -- [Mao](https://github.com/azer/mao) -- [Zen](https://github.com/pranavraja/zen) - -Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries. - -Go explore! - -## License - -Ginkgo is MIT-Licensed \ No newline at end of file diff --git a/kit/github.com/onsi/ginkgo/config/config.go b/kit/github.com/onsi/ginkgo/config/config.go deleted file mode 100644 index e9d9358..0000000 --- a/kit/github.com/onsi/ginkgo/config/config.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Ginkgo accepts a number of configuration options. - -These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli) - -You can also learn more via - - ginkgo help - -or (I kid you not): - - go test -asdf -*/ -package config - -import ( - "flag" - "time" - - "fmt" -) - -const VERSION = "1.1.0" - -type GinkgoConfigType struct { - RandomSeed int64 - RandomizeAllSpecs bool - FocusString string - SkipString string - SkipMeasurements bool - FailOnPending bool - FailFast bool - - ParallelNode int - ParallelTotal int - SyncHost string - StreamHost string -} - -var GinkgoConfig = GinkgoConfigType{} - -type DefaultReporterConfigType struct { - NoColor bool - SlowSpecThreshold float64 - NoisyPendings bool - Succinct bool - Verbose bool - FullTrace bool -} - -var DefaultReporterConfig = DefaultReporterConfigType{} - -func processPrefix(prefix string) string { - if prefix != "" { - prefix = prefix + "." - } - return prefix -} - -func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { - prefix = processPrefix(prefix) - flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") - flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.") - flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.") - flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.") - flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.") - flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.") - flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.") - - if includeParallelFlags { - flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.") - flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.") - flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.") - flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.") - } - - flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") - flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter (default: 5 seconds).") - flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") - flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") - flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") - flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") -} - -func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { - prefix = processPrefix(prefix) - result := make([]string, 0) - - if ginkgo.RandomSeed > 0 { - result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed)) - } - - if ginkgo.RandomizeAllSpecs { - result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix)) - } - - if ginkgo.SkipMeasurements { - result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix)) - } - - if ginkgo.FailOnPending { - result = append(result, fmt.Sprintf("--%sfailOnPending", prefix)) - } - - if ginkgo.FailFast { - result = append(result, fmt.Sprintf("--%sfailFast", prefix)) - } - - if ginkgo.FocusString != "" { - result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString)) - } - - if ginkgo.SkipString != "" { - result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString)) - } - - if ginkgo.ParallelNode != 0 { - result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode)) - } - - if ginkgo.ParallelTotal != 0 { - result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal)) - } - - if ginkgo.StreamHost != "" { - result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost)) - } - - if ginkgo.SyncHost != "" { - result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost)) - } - - if reporter.NoColor { - result = append(result, fmt.Sprintf("--%snoColor", prefix)) - } - - if reporter.SlowSpecThreshold > 0 { - result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold)) - } - - if !reporter.NoisyPendings { - result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix)) - } - - if reporter.Verbose { - result = append(result, fmt.Sprintf("--%sv", prefix)) - } - - if reporter.Succinct { - result = append(result, fmt.Sprintf("--%ssuccinct", prefix)) - } - - if reporter.FullTrace { - result = append(result, fmt.Sprintf("--%strace", prefix)) - } - - return result -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go b/kit/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go deleted file mode 100644 index 383137d..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go +++ /dev/null @@ -1,126 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "os" - "path/filepath" - "strings" - "text/template" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/nodot" -) - -func BuildBootstrapCommand() *Command { - var noDot bool - flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError) - flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega") - - return &Command{ - Name: "bootstrap", - FlagSet: flagSet, - UsageCommand: "ginkgo bootstrap ", - Usage: []string{ - "Bootstrap a test suite for the current package", - "Accepts the following flags:", - }, - Command: func(args []string, additionalArgs []string) { - generateBootstrap(noDot) - }, - } -} - -var bootstrapText = `package {{.Package}}_test - -import ( - {{.GinkgoImport}} - {{.GomegaImport}} - - "testing" -) - -func Test{{.FormattedPackage}}(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "{{.FormattedPackage}} Suite") -} -` - -type bootstrapData struct { - Package string - FormattedPackage string - GinkgoImport string - GomegaImport string -} - -func getPackage() string { - workingDir, err := os.Getwd() - if err != nil { - complainAndQuit("Could not find package: " + err.Error()) - } - packageName := filepath.Base(workingDir) - return strings.Replace(packageName, "-", "_", -1) -} - -func fileExists(path string) bool { - _, err := os.Stat(path) - if err == nil { - return true - } - if os.IsNotExist(err) { - return false - } - return false -} - -func generateBootstrap(noDot bool) { - packageName := getPackage() - formattedPackage := strings.Replace(strings.Title(strings.Replace(packageName, "_", " ", -1)), " ", "", -1) - data := bootstrapData{ - Package: packageName, - FormattedPackage: formattedPackage, - GinkgoImport: `. "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo"`, - GomegaImport: `. "github.com/gocircuit/escher/kit/github.com/onsi/gomega"`, - } - - if noDot { - data.GinkgoImport = `"github.com/gocircuit/escher/kit/github.com/onsi/ginkgo"` - data.GomegaImport = `"github.com/gocircuit/escher/kit/github.com/onsi/gomega"` - } - - targetFile := fmt.Sprintf("%s_suite_test.go", packageName) - if fileExists(targetFile) { - fmt.Printf("%s already exists.\n\n", targetFile) - os.Exit(1) - } else { - fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile) - } - - f, err := os.Create(targetFile) - if err != nil { - complainAndQuit("Could not create file: " + err.Error()) - panic(err.Error()) - } - defer f.Close() - - bootstrapTemplate, err := template.New("bootstrap").Parse(bootstrapText) - if err != nil { - panic(err.Error()) - } - - buf := &bytes.Buffer{} - bootstrapTemplate.Execute(buf, data) - - if noDot { - contents, err := nodot.ApplyNoDot(buf.Bytes()) - if err != nil { - complainAndQuit("Failed to import nodot declarations: " + err.Error()) - } - fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot") - buf = bytes.NewBuffer(contents) - } - - buf.WriteTo(f) - - goFmt(targetFile) -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go b/kit/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go deleted file mode 100644 index 02e2b3b..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go +++ /dev/null @@ -1,123 +0,0 @@ -package convert - -import ( - "fmt" - "go/ast" - "strings" - "unicode" -) - -/* - * Creates a func init() node - */ -func createVarUnderscoreBlock() *ast.ValueSpec { - valueSpec := &ast.ValueSpec{} - object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0} - ident := &ast.Ident{Name: "_", Obj: object} - valueSpec.Names = append(valueSpec.Names, ident) - return valueSpec -} - -/* - * Creates a Describe("Testing with ginkgo", func() { }) node - */ -func createDescribeBlock() *ast.CallExpr { - blockStatement := &ast.BlockStmt{List: []ast.Stmt{}} - - fieldList := &ast.FieldList{} - funcType := &ast.FuncType{Params: fieldList} - funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement} - basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""} - describeIdent := &ast.Ident{Name: "Describe"} - return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}} -} - -/* - * Convenience function to return the name of the *testing.T param - * for a Test function that will be rewritten. This is useful because - * we will want to replace the usage of this named *testing.T inside the - * body of the function with a GinktoT. - */ -func namedTestingTArg(node *ast.FuncDecl) string { - return node.Type.Params.List[0].Names[0].Name // *exhale* -} - -/* - * Convenience function to return the block statement node for a Describe statement - */ -func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt { - var funcLit *ast.FuncLit - var found = false - - for _, node := range desc.Args { - switch node := node.(type) { - case *ast.FuncLit: - found = true - funcLit = node - break - } - } - - if !found { - panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.") - } - - return funcLit.Body -} - -/* convenience function for creating an It("TestNameHere") - * with all the body of the test function inside the anonymous - * func passed to It() - */ -func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt { - blockStatement := &ast.BlockStmt{List: testFunc.Body.List} - fieldList := &ast.FieldList{} - funcType := &ast.FuncType{Params: fieldList} - funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement} - - testName := rewriteTestName(testFunc.Name.Name) - basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)} - itBlockIdent := &ast.Ident{Name: "It"} - callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}} - return &ast.ExprStmt{X: callExpr} -} - -/* -* rewrite test names to be human readable -* eg: rewrites "TestSomethingAmazing" as "something amazing" - */ -func rewriteTestName(testName string) string { - nameComponents := []string{} - currentString := "" - indexOfTest := strings.Index(testName, "Test") - if indexOfTest != 0 { - return testName - } - - testName = strings.Replace(testName, "Test", "", 1) - first, rest := testName[0], testName[1:] - testName = string(unicode.ToLower(rune(first))) + rest - - for _, rune := range testName { - if unicode.IsUpper(rune) { - nameComponents = append(nameComponents, currentString) - currentString = string(unicode.ToLower(rune)) - } else { - currentString += string(rune) - } - } - - return strings.Join(append(nameComponents, currentString), " ") -} - -func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr { - return &ast.CallExpr{ - Lparen: ident.NamePos + 1, - Rparen: ident.NamePos + 2, - Fun: &ast.Ident{Name: "GinkgoT"}, - } -} - -func newGinkgoTInterface() *ast.Ident { - return &ast.Ident{Name: "GinkgoTInterface"} -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/convert/import.go b/kit/github.com/onsi/ginkgo/ginkgo/convert/import.go deleted file mode 100644 index 10bf96a..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/convert/import.go +++ /dev/null @@ -1,91 +0,0 @@ -package convert - -import ( - "errors" - "fmt" - "go/ast" -) - -/* - * Given the root node of an AST, returns the node containing the - * import statements for the file. - */ -func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) { - for _, declaration := range rootNode.Decls { - decl, ok := declaration.(*ast.GenDecl) - if !ok || len(decl.Specs) == 0 { - continue - } - - _, ok = decl.Specs[0].(*ast.ImportSpec) - if ok { - imports = decl - return - } - } - - err = errors.New(fmt.Sprintf("Could not find imports for root node:\n\t%#v\n", rootNode)) - return -} - -/* - * Removes "testing" import, if present - */ -func removeTestingImport(rootNode *ast.File) { - importDecl, err := importsForRootNode(rootNode) - if err != nil { - panic(err.Error()) - } - - var index int - for i, importSpec := range importDecl.Specs { - importSpec := importSpec.(*ast.ImportSpec) - if importSpec.Path.Value == "\"testing\"" { - index = i - break - } - } - - importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...) -} - -/* - * Adds import statements for onsi/ginkgo, if missing - */ -func addGinkgoImports(rootNode *ast.File) { - importDecl, err := importsForRootNode(rootNode) - if err != nil { - panic(err.Error()) - } - - if len(importDecl.Specs) == 0 { - // TODO: might need to create a import decl here - panic("unimplemented : expected to find an imports block") - } - - needsGinkgo := true - for _, importSpec := range importDecl.Specs { - importSpec, ok := importSpec.(*ast.ImportSpec) - if !ok { - continue - } - - if importSpec.Path.Value == "\"github.com/gocircuit/escher/kit/github.com/onsi/ginkgo\"" { - needsGinkgo = false - } - } - - if needsGinkgo { - importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/gocircuit/escher/kit/github.com/onsi/ginkgo\"")) - } -} - -/* - * convenience function to create an import statement - */ -func createImport(name, path string) *ast.ImportSpec { - return &ast.ImportSpec{ - Name: &ast.Ident{Name: name}, - Path: &ast.BasicLit{Kind: 9, Value: path}, - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go b/kit/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go deleted file mode 100644 index ed09c46..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go +++ /dev/null @@ -1,127 +0,0 @@ -package convert - -import ( - "fmt" - "go/build" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "regexp" -) - -/* - * RewritePackage takes a name (eg: my-package/tools), finds its test files using - * Go's build package, and then rewrites them. A ginkgo test suite file will - * also be added for this package, and all of its child packages. - */ -func RewritePackage(packageName string) { - pkg, err := packageWithName(packageName) - if err != nil { - panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error())) - } - - for _, filename := range findTestsInPackage(pkg) { - rewriteTestsInFile(filename) - } - return -} - -/* - * Given a package, findTestsInPackage reads the test files in the directory, - * and then recurses on each child package, returning a slice of all test files - * found in this process. - */ -func findTestsInPackage(pkg *build.Package) (testfiles []string) { - for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) { - testfiles = append(testfiles, filepath.Join(pkg.Dir, file)) - } - - dirFiles, err := ioutil.ReadDir(pkg.Dir) - if err != nil { - panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error())) - } - - re := regexp.MustCompile(`^[._]`) - - for _, file := range dirFiles { - if !file.IsDir() { - continue - } - - if re.Match([]byte(file.Name())) { - continue - } - - packageName := filepath.Join(pkg.ImportPath, file.Name()) - subPackage, err := packageWithName(packageName) - if err != nil { - panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error())) - } - - testfiles = append(testfiles, findTestsInPackage(subPackage)...) - } - - addGinkgoSuiteForPackage(pkg) - goFmtPackage(pkg) - return -} - -/* - * Shells out to `ginkgo bootstrap` to create a test suite file - */ -func addGinkgoSuiteForPackage(pkg *build.Package) { - originalDir, err := os.Getwd() - if err != nil { - panic(err) - } - - suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go") - - _, err = os.Stat(suite_test_file) - if err == nil { - return // test file already exists, this should be a no-op - } - - err = os.Chdir(pkg.Dir) - if err != nil { - panic(err) - } - - output, err := exec.Command("ginkgo", "bootstrap").Output() - - if err != nil { - panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error())) - } - - err = os.Chdir(originalDir) - if err != nil { - panic(err) - } -} - -/* - * Shells out to `go fmt` to format the package - */ -func goFmtPackage(pkg *build.Package) { - output, err := exec.Command("go", "fmt", pkg.ImportPath).Output() - - if err != nil { - fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error()) - } -} - -/* - * Attempts to return a package with its test files already read. - * The ImportMode arg to build.Import lets you specify if you want go to read the - * buildable go files inside the package, but it fails if the package has no go files - */ -func packageWithName(name string) (pkg *build.Package, err error) { - pkg, err = build.Default.Import(name, ".", build.ImportMode(0)) - if err == nil { - return - } - - pkg, err = build.Default.Import(name, ".", build.ImportMode(1)) - return -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go b/kit/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go deleted file mode 100644 index b33595c..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go +++ /dev/null @@ -1,56 +0,0 @@ -package convert - -import ( - "go/ast" - "regexp" -) - -/* - * Given a root node, walks its top level statements and returns - * points to function nodes to rewrite as It statements. - * These functions, according to Go testing convention, must be named - * TestWithCamelCasedName and receive a single *testing.T argument. - */ -func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) { - testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+") - - ast.Inspect(rootNode, func(node ast.Node) bool { - if node == nil { - return false - } - - switch node := node.(type) { - case *ast.FuncDecl: - matches := testNameRegexp.MatchString(node.Name.Name) - - if matches && receivesTestingT(node) { - testsToRewrite = append(testsToRewrite, node) - } - } - - return true - }) - - return -} - -/* - * convenience function that looks at args to a function and determines if its - * params include an argument of type *testing.T - */ -func receivesTestingT(node *ast.FuncDecl) bool { - if len(node.Type.Params.List) != 1 { - return false - } - - base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr) - if !ok { - return false - } - - intermediate := base.X.(*ast.SelectorExpr) - isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing" - isTestingT := intermediate.Sel.Name == "T" - - return isTestingPackage && isTestingT -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go b/kit/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go deleted file mode 100644 index 4b001a7..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go +++ /dev/null @@ -1,163 +0,0 @@ -package convert - -import ( - "bytes" - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/token" - "io/ioutil" - "os" -) - -/* - * Given a file path, rewrites any tests in the Ginkgo format. - * First, we parse the AST, and update the imports declaration. - * Then, we walk the first child elements in the file, returning tests to rewrite. - * A top level init func is declared, with a single Describe func inside. - * Then the test functions to rewrite are inserted as It statements inside the Describe. - * Finally we walk the rest of the file, replacing other usages of *testing.T - * Once that is complete, we write the AST back out again to its file. - */ -func rewriteTestsInFile(pathToFile string) { - fileSet := token.NewFileSet() - rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, 0) - if err != nil { - panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error())) - } - - addGinkgoImports(rootNode) - removeTestingImport(rootNode) - - varUnderscoreBlock := createVarUnderscoreBlock() - describeBlock := createDescribeBlock() - varUnderscoreBlock.Values = []ast.Expr{describeBlock} - - for _, testFunc := range findTestFuncs(rootNode) { - rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock) - } - - underscoreDecl := &ast.GenDecl{ - Tok: 85, // gah, magick numbers are needed to make this work - TokPos: 14, // this tricks Go into writing "var _ = Describe" - Specs: []ast.Spec{varUnderscoreBlock}, - } - - imports := rootNode.Decls[0] - tail := rootNode.Decls[1:] - rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...) - rewriteOtherFuncsToUseGinkgoT(rootNode.Decls) - walkNodesInRootNodeReplacingTestingT(rootNode) - - var buffer bytes.Buffer - if err = format.Node(&buffer, fileSet, rootNode); err != nil { - panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error())) - } - - fileInfo, err := os.Stat(pathToFile) - if err != nil { - panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile)) - } - - ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode()) - return -} - -/* - * Given a test func named TestDoesSomethingNeat, rewrites it as - * It("does something neat", func() { __test_body_here__ }) and adds it - * to the Describe's list of statements - */ -func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) { - var funcIndex int = -1 - for index, child := range rootNode.Decls { - if child == testFunc { - funcIndex = index - break - } - } - - if funcIndex < 0 { - panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name)) - } - - var block *ast.BlockStmt = blockStatementFromDescribe(describe) - block.List = append(block.List, createItStatementForTestFunc(testFunc)) - replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc)) - - // remove the old test func from the root node's declarations - rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...) - return -} - -/* - * walks nodes inside of a test func's statements and replaces the usage of - * it's named *testing.T param with GinkgoT's - */ -func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) { - ast.Inspect(statementsBlock, func(node ast.Node) bool { - if node == nil { - return false - } - - keyValueExpr, ok := node.(*ast.KeyValueExpr) - if ok { - replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT) - return true - } - - funcLiteral, ok := node.(*ast.FuncLit) - if ok { - replaceTypeDeclTestingTsInFuncLiteral(funcLiteral) - return true - } - - callExpr, ok := node.(*ast.CallExpr) - if !ok { - return true - } - replaceTestingTsInArgsLists(callExpr, testingT) - - funCall, ok := callExpr.Fun.(*ast.SelectorExpr) - if ok { - replaceTestingTsMethodCalls(funCall, testingT) - } - - return true - }) -} - -/* - * rewrite t.Fail() or any other *testing.T method by replacing with T().Fail() - * This function receives a selector expression (eg: t.Fail()) and - * the name of the *testing.T param from the function declaration. Rewrites the - * selector expression in place if the target was a *testing.T - */ -func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) { - ident, ok := selectorExpr.X.(*ast.Ident) - if !ok { - return - } - - if ident.Name == testingT { - selectorExpr.X = newGinkgoTFromIdent(ident) - } -} - -/* - * replaces usages of a named *testing.T param inside of a call expression - * with a new GinkgoT object - */ -func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) { - for index, arg := range callExpr.Args { - ident, ok := arg.(*ast.Ident) - if !ok { - continue - } - - if ident.Name == testingT { - callExpr.Args[index] = newGinkgoTFromIdent(ident) - } - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go b/kit/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go deleted file mode 100644 index 418cdc4..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go +++ /dev/null @@ -1,130 +0,0 @@ -package convert - -import ( - "go/ast" -) - -/* - * Rewrites any other top level funcs that receive a *testing.T param - */ -func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) { - for _, decl := range declarations { - decl, ok := decl.(*ast.FuncDecl) - if !ok { - continue - } - - for _, param := range decl.Type.Params.List { - starExpr, ok := param.Type.(*ast.StarExpr) - if !ok { - continue - } - - selectorExpr, ok := starExpr.X.(*ast.SelectorExpr) - if !ok { - continue - } - - xIdent, ok := selectorExpr.X.(*ast.Ident) - if !ok || xIdent.Name != "testing" { - continue - } - - if selectorExpr.Sel.Name != "T" { - continue - } - - param.Type = newGinkgoTInterface() - } - } -} - -/* - * Walks all of the nodes in the file, replacing *testing.T in struct - * and func literal nodes. eg: - * type foo struct { *testing.T } - * var bar = func(t *testing.T) { } - */ -func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) { - ast.Inspect(rootNode, func(node ast.Node) bool { - if node == nil { - return false - } - - switch node := node.(type) { - case *ast.StructType: - replaceTestingTsInStructType(node) - case *ast.FuncLit: - replaceTypeDeclTestingTsInFuncLiteral(node) - } - - return true - }) -} - -/* - * replaces named *testing.T inside a composite literal - */ -func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) { - ident, ok := kve.Value.(*ast.Ident) - if !ok { - return - } - - if ident.Name == testingT { - kve.Value = newGinkgoTFromIdent(ident) - } -} - -/* - * replaces *testing.T params in a func literal with GinkgoT - */ -func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) { - for _, arg := range functionLiteral.Type.Params.List { - starExpr, ok := arg.Type.(*ast.StarExpr) - if !ok { - continue - } - - selectorExpr, ok := starExpr.X.(*ast.SelectorExpr) - if !ok { - continue - } - - target, ok := selectorExpr.X.(*ast.Ident) - if !ok { - continue - } - - if target.Name == "testing" && selectorExpr.Sel.Name == "T" { - arg.Type = newGinkgoTInterface() - } - } -} - -/* - * Replaces *testing.T types inside of a struct declaration with a GinkgoT - * eg: type foo struct { *testing.T } - */ -func replaceTestingTsInStructType(structType *ast.StructType) { - for _, field := range structType.Fields.List { - starExpr, ok := field.Type.(*ast.StarExpr) - if !ok { - continue - } - - selectorExpr, ok := starExpr.X.(*ast.SelectorExpr) - if !ok { - continue - } - - xIdent, ok := selectorExpr.X.(*ast.Ident) - if !ok { - continue - } - - if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" { - field.Type = newGinkgoTInterface() - } - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/convert_command.go b/kit/github.com/onsi/ginkgo/ginkgo/convert_command.go deleted file mode 100644 index 8564b0e..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/convert_command.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/convert" - "os" -) - -func BuildConvertCommand() *Command { - return &Command{ - Name: "convert", - FlagSet: flag.NewFlagSet("convert", flag.ExitOnError), - UsageCommand: "ginkgo convert /path/to/package", - Usage: []string{ - "Convert the package at the passed in path from an XUnit-style test to a Ginkgo-style test", - }, - Command: convertPackage, - } -} - -func convertPackage(args []string, additionalArgs []string) { - if len(args) != 1 { - println(fmt.Sprintf("usage: ginkgo convert /path/to/your/package")) - os.Exit(1) - } - - defer func() { - err := recover() - if err != nil { - switch err := err.(type) { - case error: - println(err.Error()) - case string: - println(err) - default: - println(fmt.Sprintf("unexpected error: %#v", err)) - } - os.Exit(1) - } - }() - - convert.RewritePackage(args[0]) -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/generate_command.go b/kit/github.com/onsi/ginkgo/ginkgo/generate_command.go deleted file mode 100644 index aa21322..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/generate_command.go +++ /dev/null @@ -1,132 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "path/filepath" - "strings" - "text/template" -) - -func BuildGenerateCommand() *Command { - var noDot bool - flagSet := flag.NewFlagSet("generate", flag.ExitOnError) - flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega") - - return &Command{ - Name: "generate", - FlagSet: flagSet, - UsageCommand: "ginkgo generate ", - Usage: []string{ - "Generate a test file named filename_test.go", - "If the optional argument is omitted, a file named after the package in the current directory will be created.", - "Accepts the following flags:", - }, - Command: func(args []string, additionalArgs []string) { - generateSpec(args, noDot) - }, - } -} - -var specText = `package {{.Package}}_test - -import ( - . "{{.PackageImportPath}}" - - {{if .IncludeImports}}. "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo"{{end}} - {{if .IncludeImports}}. "github.com/gocircuit/escher/kit/github.com/onsi/gomega"{{end}} -) - -var _ = Describe("{{.Subject}}", func() { - -}) -` - -type specData struct { - Package string - Subject string - PackageImportPath string - IncludeImports bool -} - -func generateSpec(args []string, noDot bool) { - if len(args) == 0 { - err := generateSpecForSubject("", noDot) - if err != nil { - fmt.Println(err.Error()) - fmt.Println("") - os.Exit(1) - } - fmt.Println("") - return - } - - var failed bool - for _, arg := range args { - err := generateSpecForSubject(arg, noDot) - if err != nil { - failed = true - fmt.Println(err.Error()) - } - } - fmt.Println("") - if failed { - os.Exit(1) - } -} - -func generateSpecForSubject(subject string, noDot bool) error { - packageName := getPackage() - if subject == "" { - subject = packageName - } else { - subject = strings.Split(subject, ".go")[0] - subject = strings.Split(subject, "_test")[0] - } - - formattedSubject := strings.Replace(strings.Title(strings.Replace(subject, "_", " ", -1)), " ", "", -1) - - data := specData{ - Package: packageName, - Subject: formattedSubject, - PackageImportPath: getPackageImportPath(), - IncludeImports: !noDot, - } - - targetFile := fmt.Sprintf("%s_test.go", subject) - if fileExists(targetFile) { - return fmt.Errorf("%s already exists.", targetFile) - } else { - fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) - } - - f, err := os.Create(targetFile) - if err != nil { - return err - } - defer f.Close() - - specTemplate, err := template.New("spec").Parse(specText) - if err != nil { - return err - } - - specTemplate.Execute(f, data) - goFmt(targetFile) - return nil -} - -func getPackageImportPath() string { - workingDir, err := os.Getwd() - if err != nil { - panic(err.Error()) - } - sep := string(filepath.Separator) - paths := strings.Split(workingDir, sep+"src"+sep) - if len(paths) == 1 { - fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") - return "UNKNOWN_PACKAGE_PATH" - } - return filepath.ToSlash(paths[len(paths)-1]) -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/help_command.go b/kit/github.com/onsi/ginkgo/ginkgo/help_command.go deleted file mode 100644 index 6f24d07..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/help_command.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "flag" - "fmt" -) - -func BuildHelpCommand() *Command { - return &Command{ - Name: "help", - FlagSet: flag.NewFlagSet("help", flag.ExitOnError), - UsageCommand: "ginkgo help ", - Usage: []string{ - "Print usage information. If a command is passed in, print usage information just for that command.", - }, - Command: printHelp, - } -} - -func printHelp(args []string, additionalArgs []string) { - if len(args) == 0 { - usage() - } else { - command, found := commandMatching(args[0]) - if !found { - complainAndQuit(fmt.Sprintf("Unkown command: %s", args[0])) - } - - usageForCommand(command, true) - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/interrupt_handler.go b/kit/github.com/onsi/ginkgo/ginkgo/interrupt_handler.go deleted file mode 100644 index 81567a4..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/interrupt_handler.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "os" - "os/signal" - "sync" -) - -type InterruptHandler struct { - interruptCount int - lock *sync.Mutex - C chan bool -} - -func NewInterruptHandler() *InterruptHandler { - h := &InterruptHandler{ - lock: &sync.Mutex{}, - C: make(chan bool, 0), - } - - go h.handleInterrupt() - - return h -} - -func (h *InterruptHandler) WasInterrupted() bool { - h.lock.Lock() - defer h.lock.Unlock() - - return h.interruptCount > 0 -} - -func (h *InterruptHandler) handleInterrupt() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - - <-c - signal.Stop(c) - - h.lock.Lock() - h.interruptCount++ - if h.interruptCount == 1 { - close(h.C) - } else if h.interruptCount > 5 { - os.Exit(1) - } - h.lock.Unlock() - - go h.handleInterrupt() -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/main.go b/kit/github.com/onsi/ginkgo/ginkgo/main.go deleted file mode 100644 index d92e9c0..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/main.go +++ /dev/null @@ -1,275 +0,0 @@ -/* -The Ginkgo CLI - -The Ginkgo CLI is fully documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli) - -You can also learn more by running: - - ginkgo help - -Here are some of the more commonly used commands: - -To install: - - go install github.com/onsi/ginkgo/ginkgo - -To run tests: - - ginkgo - -To run tests in all subdirectories: - - ginkgo -r - -To run tests in particular packages: - - ginkgo /path/to/package /path/to/another/package - -To pass arguments/flags to your tests: - - ginkgo -- - -To run tests in parallel - - ginkgo -p - -this will automatically detect the optimal number of nodes to use. Alternatively, you can specify the number of nodes with: - - ginkgo -nodes=N - -(note that you don't need to provide -p in this case). - -By default the Ginkgo CLI will spin up a server that the individual test processes send test output to. The CLI aggregates this output and then presents coherent test output, one test at a time, as each test completes. -An alternative is to have the parallel nodes run and stream interleaved output back. This useful for debugging, particularly in contexts where tests hang/fail to start. To get this interleaved output: - - ginkgo -nodes=N -stream=true - -On windows, the default value for stream is true. - -By default, when running multiple tests (with -r or a list of packages) Ginkgo will abort when a test fails. To have Ginkgo run subsequent test suites instead you can: - - ginkgo -keepGoing - -To monitor packages and rerun tests when changes occur: - - ginkgo watch <-r> - -passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them. -`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages -that depend on X are not rerun. - -[OSX only] To receive (desktop) notifications when a test run completes: - - ginkgo -notify - -this is particularly useful with `ginkgo watch`. Notifications are currently only supported on OS X and require that you `brew install terminal-notifier` - -Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails. You can do this with: - - ginkgo -untilItFails - -To bootstrap a test suite: - - ginkgo bootstrap - -To generate a test file: - - ginkgo generate - -To bootstrap/generate test files without using "." imports: - - ginkgo bootstrap --nodot - ginkgo generate --nodot - -this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions. When you pull to the latest Ginkgo/Gomega you'll want to run - - ginkgo nodot - -to refresh this list and pull in any new identifiers. In particular, this will pull in any new Gomega matchers that get added. - -To convert an existing XUnit style test suite to a Ginkgo-style test suite: - - ginkgo convert . - -To unfocus tests: - - ginkgo unfocus - -or - - ginkgo blur - -To print out Ginkgo's version: - - ginkgo version - -To get more help: - - ginkgo help -*/ -package main - -import ( - "flag" - "fmt" - "os" - "os/exec" - "strings" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testsuite" -) - -const greenColor = "\x1b[32m" -const redColor = "\x1b[91m" -const defaultStyle = "\x1b[0m" -const lightGrayColor = "\x1b[37m" - -type Command struct { - Name string - AltName string - FlagSet *flag.FlagSet - Usage []string - UsageCommand string - Command func(args []string, additionalArgs []string) - SuppressFlagDocumentation bool - FlagDocSubstitute []string -} - -func (c *Command) Matches(name string) bool { - return c.Name == name || (c.AltName != "" && c.AltName == name) -} - -func (c *Command) Run(args []string, additionalArgs []string) { - c.FlagSet.Parse(args) - c.Command(c.FlagSet.Args(), additionalArgs) -} - -var DefaultCommand *Command -var Commands []*Command - -func init() { - DefaultCommand = BuildRunCommand() - Commands = append(Commands, BuildWatchCommand()) - Commands = append(Commands, BuildBootstrapCommand()) - Commands = append(Commands, BuildGenerateCommand()) - Commands = append(Commands, BuildNodotCommand()) - Commands = append(Commands, BuildConvertCommand()) - Commands = append(Commands, BuildUnfocusCommand()) - Commands = append(Commands, BuildVersionCommand()) - Commands = append(Commands, BuildHelpCommand()) -} - -func main() { - args := []string{} - additionalArgs := []string{} - - foundDelimiter := false - - for _, arg := range os.Args[1:] { - if !foundDelimiter { - if arg == "--" { - foundDelimiter = true - continue - } - } - - if foundDelimiter { - additionalArgs = append(additionalArgs, arg) - } else { - args = append(args, arg) - } - } - - if len(args) > 0 { - commandToRun, found := commandMatching(args[0]) - if found { - commandToRun.Run(args[1:], additionalArgs) - return - } - } - - DefaultCommand.Run(args, additionalArgs) -} - -func commandMatching(name string) (*Command, bool) { - for _, command := range Commands { - if command.Matches(name) { - return command, true - } - } - return nil, false -} - -func usage() { - fmt.Fprintf(os.Stderr, "Ginkgo Version %s\n\n", config.VERSION) - usageForCommand(DefaultCommand, false) - for _, command := range Commands { - fmt.Fprintf(os.Stderr, "\n") - usageForCommand(command, false) - } -} - -func usageForCommand(command *Command, longForm bool) { - fmt.Fprintf(os.Stderr, "%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand))) - fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.Usage, "\n")) - if command.SuppressFlagDocumentation && !longForm { - fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.FlagDocSubstitute, "\n ")) - } else { - command.FlagSet.PrintDefaults() - } -} - -func complainAndQuit(complaint string) { - fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint) - os.Exit(1) -} - -func findSuites(args []string, recurse bool, skipPackage string) ([]testsuite.TestSuite, []string) { - suites := []testsuite.TestSuite{} - - if len(args) > 0 { - for _, arg := range args { - suites = append(suites, testsuite.SuitesInDir(arg, recurse)...) - } - } else { - suites = testsuite.SuitesInDir(".", recurse) - } - - skippedPackages := []string{} - if skipPackage != "" { - skipFilters := strings.Split(skipPackage, ",") - filteredSuites := []testsuite.TestSuite{} - for _, suite := range suites { - skip := false - for _, skipFilter := range skipFilters { - if strings.Contains(suite.Path, skipFilter) { - skip = true - break - } - } - if skip { - skippedPackages = append(skippedPackages, suite.Path) - } else { - filteredSuites = append(filteredSuites, suite) - } - } - suites = filteredSuites - } - - return suites, skippedPackages -} - -func goFmt(path string) { - err := exec.Command("go", "fmt", path).Run() - if err != nil { - complainAndQuit("Could not fmt: " + err.Error()) - } -} - -func pluralizedWord(singular, plural string, count int) string { - if count == 1 { - return singular - } - return plural -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go b/kit/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go deleted file mode 100644 index 1eda0f8..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go +++ /dev/null @@ -1,194 +0,0 @@ -package nodot - -import ( - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "path/filepath" - "strings" -) - -func ApplyNoDot(data []byte) ([]byte, error) { - sections, err := generateNodotSections() - if err != nil { - return nil, err - } - - for _, section := range sections { - data = section.createOrUpdateIn(data) - } - - return data, nil -} - -type nodotSection struct { - name string - pkg string - declarations []string - types []string -} - -func (s nodotSection) createOrUpdateIn(data []byte) []byte { - renames := map[string]string{} - - contents := string(data) - - lines := strings.Split(contents, "\n") - - comment := "// Declarations for " + s.name - - newLines := []string{} - for _, line := range lines { - if line == comment { - continue - } - - words := strings.Split(line, " ") - lastWord := words[len(words)-1] - - if s.containsDeclarationOrType(lastWord) { - renames[lastWord] = words[1] - continue - } - - newLines = append(newLines, line) - } - - if len(newLines[len(newLines)-1]) > 0 { - newLines = append(newLines, "") - } - - newLines = append(newLines, comment) - - for _, typ := range s.types { - name, ok := renames[s.prefix(typ)] - if !ok { - name = typ - } - newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ))) - } - - for _, decl := range s.declarations { - name, ok := renames[s.prefix(decl)] - if !ok { - name = decl - } - newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl))) - } - - newLines = append(newLines, "") - - newContents := strings.Join(newLines, "\n") - - return []byte(newContents) -} - -func (s nodotSection) prefix(declOrType string) string { - return s.pkg + "." + declOrType -} - -func (s nodotSection) containsDeclarationOrType(word string) bool { - for _, declaration := range s.declarations { - if s.prefix(declaration) == word { - return true - } - } - - for _, typ := range s.types { - if s.prefix(typ) == word { - return true - } - } - - return false -} - -func generateNodotSections() ([]nodotSection, error) { - sections := []nodotSection{} - - declarations, err := getExportedDeclerationsForPackage("github.com/gocircuit/escher/kit/github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC") - if err != nil { - return nil, err - } - sections = append(sections, nodotSection{ - name: "Ginkgo DSL", - pkg: "ginkgo", - declarations: declarations, - types: []string{"Done", "Benchmarker"}, - }) - - declarations, err = getExportedDeclerationsForPackage("github.com/gocircuit/escher/kit/github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION") - if err != nil { - return nil, err - } - sections = append(sections, nodotSection{ - name: "Gomega DSL", - pkg: "gomega", - declarations: declarations, - }) - - declarations, err = getExportedDeclerationsForPackage("github.com/gocircuit/escher/kit/github.com/onsi/gomega", "matchers.go") - if err != nil { - return nil, err - } - sections = append(sections, nodotSection{ - name: "Gomega Matchers", - pkg: "gomega", - declarations: declarations, - }) - - return sections, nil -} - -func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) { - pkg, err := build.Import(pkgPath, ".", 0) - if err != nil { - return []string{}, err - } - - declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename)) - if err != nil { - return []string{}, err - } - - blacklistLookup := map[string]bool{} - for _, declaration := range blacklist { - blacklistLookup[declaration] = true - } - - filteredDeclarations := []string{} - for _, declaration := range declarations { - if blacklistLookup[declaration] { - continue - } - filteredDeclarations = append(filteredDeclarations, declaration) - } - - return filteredDeclarations, nil -} - -func getExportedDeclarationsForFile(path string) ([]string, error) { - fset := token.NewFileSet() - tree, err := parser.ParseFile(fset, path, nil, 0) - if err != nil { - return []string{}, err - } - - declarations := []string{} - ast.FileExports(tree) - for _, decl := range tree.Decls { - switch x := decl.(type) { - case *ast.GenDecl: - switch s := x.Specs[0].(type) { - case *ast.ValueSpec: - declarations = append(declarations, s.Names[0].Name) - } - case *ast.FuncDecl: - declarations = append(declarations, x.Name.Name) - } - } - - return declarations, nil -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/nodot/nodot_suite_test.go b/kit/github.com/onsi/ginkgo/ginkgo/nodot/nodot_suite_test.go deleted file mode 100644 index ec902d6..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/nodot/nodot_suite_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package nodot_test - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestNodot(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Nodot Suite") -} - -// Declarations for Ginkgo DSL -type Done ginkgo.Done -type Benchmarker ginkgo.Benchmarker - -var GinkgoWriter = ginkgo.GinkgoWriter -var GinkgoParallelNode = ginkgo.GinkgoParallelNode -var GinkgoT = ginkgo.GinkgoT -var CurrentGinkgoTestDescription = ginkgo.CurrentGinkgoTestDescription -var RunSpecs = ginkgo.RunSpecs -var RunSpecsWithDefaultAndCustomReporters = ginkgo.RunSpecsWithDefaultAndCustomReporters -var RunSpecsWithCustomReporters = ginkgo.RunSpecsWithCustomReporters -var Fail = ginkgo.Fail -var GinkgoRecover = ginkgo.GinkgoRecover -var Describe = ginkgo.Describe -var FDescribe = ginkgo.FDescribe -var PDescribe = ginkgo.PDescribe -var XDescribe = ginkgo.XDescribe -var Context = ginkgo.Context -var FContext = ginkgo.FContext -var PContext = ginkgo.PContext -var XContext = ginkgo.XContext -var It = ginkgo.It -var FIt = ginkgo.FIt -var PIt = ginkgo.PIt -var XIt = ginkgo.XIt -var Measure = ginkgo.Measure -var FMeasure = ginkgo.FMeasure -var PMeasure = ginkgo.PMeasure -var XMeasure = ginkgo.XMeasure -var BeforeSuite = ginkgo.BeforeSuite -var AfterSuite = ginkgo.AfterSuite -var SynchronizedBeforeSuite = ginkgo.SynchronizedBeforeSuite -var SynchronizedAfterSuite = ginkgo.SynchronizedAfterSuite -var BeforeEach = ginkgo.BeforeEach -var JustBeforeEach = ginkgo.JustBeforeEach -var AfterEach = ginkgo.AfterEach - -// Declarations for Gomega DSL -var RegisterFailHandler = gomega.RegisterFailHandler -var RegisterTestingT = gomega.RegisterTestingT -var InterceptGomegaFailures = gomega.InterceptGomegaFailures -var Ω = gomega.Ω -var Expect = gomega.Expect -var ExpectWithOffset = gomega.ExpectWithOffset -var Eventually = gomega.Eventually -var EventuallyWithOffset = gomega.EventuallyWithOffset -var Consistently = gomega.Consistently -var ConsistentlyWithOffset = gomega.ConsistentlyWithOffset -var SetDefaultEventuallyTimeout = gomega.SetDefaultEventuallyTimeout -var SetDefaultEventuallyPollingInterval = gomega.SetDefaultEventuallyPollingInterval -var SetDefaultConsistentlyDuration = gomega.SetDefaultConsistentlyDuration -var SetDefaultConsistentlyPollingInterval = gomega.SetDefaultConsistentlyPollingInterval - -// Declarations for Gomega Matchers -var Equal = gomega.Equal -var BeEquivalentTo = gomega.BeEquivalentTo -var BeNil = gomega.BeNil -var BeTrue = gomega.BeTrue -var BeFalse = gomega.BeFalse -var HaveOccurred = gomega.HaveOccurred -var MatchError = gomega.MatchError -var BeClosed = gomega.BeClosed -var Receive = gomega.Receive -var MatchRegexp = gomega.MatchRegexp -var ContainSubstring = gomega.ContainSubstring -var MatchJSON = gomega.MatchJSON -var BeEmpty = gomega.BeEmpty -var HaveLen = gomega.HaveLen -var BeZero = gomega.BeZero -var ContainElement = gomega.ContainElement -var ConsistOf = gomega.ConsistOf -var HaveKey = gomega.HaveKey -var HaveKeyWithValue = gomega.HaveKeyWithValue -var BeNumerically = gomega.BeNumerically -var BeTemporally = gomega.BeTemporally -var BeAssignableToTypeOf = gomega.BeAssignableToTypeOf -var Panic = gomega.Panic diff --git a/kit/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go b/kit/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go deleted file mode 100644 index fab06cb..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/nodot/nodot_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package nodot_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/nodot" - "strings" -) - -var _ = Describe("ApplyNoDot", func() { - var result string - - apply := func(input string) string { - output, err := ApplyNoDot([]byte(input)) - Ω(err).ShouldNot(HaveOccurred()) - return string(output) - } - - Context("when no declarations have been imported yet", func() { - BeforeEach(func() { - result = apply("") - }) - - It("should add headings for the various declarations", func() { - Ω(result).Should(ContainSubstring("// Declarations for Ginkgo DSL")) - Ω(result).Should(ContainSubstring("// Declarations for Gomega DSL")) - Ω(result).Should(ContainSubstring("// Declarations for Gomega Matchers")) - }) - - It("should import Ginkgo's declarations", func() { - Ω(result).Should(ContainSubstring("var It = ginkgo.It")) - Ω(result).Should(ContainSubstring("var XDescribe = ginkgo.XDescribe")) - }) - - It("should import Ginkgo's types", func() { - Ω(result).Should(ContainSubstring("type Done ginkgo.Done")) - Ω(result).Should(ContainSubstring("type Benchmarker ginkgo.Benchmarker")) - Ω(strings.Count(result, "type ")).Should(Equal(2)) - }) - - It("should import Gomega's DSL and matchers", func() { - Ω(result).Should(ContainSubstring("var Ω = gomega.Ω")) - Ω(result).Should(ContainSubstring("var ContainSubstring = gomega.ContainSubstring")) - Ω(result).Should(ContainSubstring("var Equal = gomega.Equal")) - }) - - It("should not import blacklisted things", func() { - Ω(result).ShouldNot(ContainSubstring("GINKGO_VERSION")) - Ω(result).ShouldNot(ContainSubstring("GINKGO_PANIC")) - Ω(result).ShouldNot(ContainSubstring("GOMEGA_VERSION")) - }) - }) - - It("should be idempotent (module empty lines - go fmt can fix those for us)", func() { - first := apply("") - second := apply(first) - first = strings.Trim(first, "\n") - second = strings.Trim(second, "\n") - Ω(first).Should(Equal(second)) - }) - - It("should not mess with other things in the input", func() { - result = apply("var MyThing = SomethingThatsMine") - Ω(result).Should(ContainSubstring("var MyThing = SomethingThatsMine")) - }) - - Context("when the user has redefined a name", func() { - It("should honor the redefinition", func() { - result = apply(` -var _ = gomega.Ω -var When = ginkgo.It - `) - - Ω(result).Should(ContainSubstring("var _ = gomega.Ω")) - Ω(result).ShouldNot(ContainSubstring("var Ω = gomega.Ω")) - - Ω(result).Should(ContainSubstring("var When = ginkgo.It")) - Ω(result).ShouldNot(ContainSubstring("var It = ginkgo.It")) - - Ω(result).Should(ContainSubstring("var Context = ginkgo.Context")) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/ginkgo/nodot_command.go b/kit/github.com/onsi/ginkgo/ginkgo/nodot_command.go deleted file mode 100644 index 9fe43af..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/nodot_command.go +++ /dev/null @@ -1,74 +0,0 @@ -package main - -import ( - "bufio" - "flag" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/nodot" - "io/ioutil" - "os" - "path/filepath" - "regexp" -) - -func BuildNodotCommand() *Command { - return &Command{ - Name: "nodot", - FlagSet: flag.NewFlagSet("bootstrap", flag.ExitOnError), - UsageCommand: "ginkgo nodot", - Usage: []string{ - "Update the nodot declarations in your test suite", - "Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.", - "If you've renamed a declaration, that name will be honored and not overwritten.", - }, - Command: updateNodot, - } -} - -func updateNodot(args []string, additionalArgs []string) { - suiteFile, perm := findSuiteFile() - - data, err := ioutil.ReadFile(suiteFile) - if err != nil { - complainAndQuit("Failed to update nodot declarations: " + err.Error()) - } - - content, err := nodot.ApplyNoDot(data) - if err != nil { - complainAndQuit("Failed to update nodot declarations: " + err.Error()) - } - ioutil.WriteFile(suiteFile, content, perm) - - goFmt(suiteFile) -} - -func findSuiteFile() (string, os.FileMode) { - workingDir, err := os.Getwd() - if err != nil { - complainAndQuit("Could not find suite file for nodot: " + err.Error()) - } - - files, err := ioutil.ReadDir(workingDir) - if err != nil { - complainAndQuit("Could not find suite file for nodot: " + err.Error()) - } - - re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`) - - for _, file := range files { - if file.IsDir() { - continue - } - path := filepath.Join(workingDir, file.Name()) - f, err := os.Open(path) - if err != nil { - complainAndQuit("Could not find suite file for nodot: " + err.Error()) - } - if re.MatchReader(bufio.NewReader(f)) { - return path, file.Mode() - } - } - - complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.") - - return "", 0 -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/notifications.go b/kit/github.com/onsi/ginkgo/ginkgo/notifications.go deleted file mode 100644 index e3d3d80..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/notifications.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "fmt" - "os" - "os/exec" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testsuite" -) - -type Notifier struct { - commandFlags *RunAndWatchCommandFlags -} - -func NewNotifier(commandFlags *RunAndWatchCommandFlags) *Notifier { - return &Notifier{ - commandFlags: commandFlags, - } -} - -func (n *Notifier) VerifyNotificationsAreAvailable() { - if n.commandFlags.Notify { - _, err := exec.LookPath("terminal-notifier") - if err != nil { - fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed. - -To remedy this: - - brew install terminal-notifier - -To learn more about terminal-notifier: - - https://github.com/alloy/terminal-notifier -`) - os.Exit(1) - } - } -} - -func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) { - if suitePassed { - n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName)) - } else { - n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName)) - } -} - -func (n *Notifier) SendNotification(title string, subtitle string) { - args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"} - - terminal := os.Getenv("TERM_PROGRAM") - if terminal == "iTerm.app" { - args = append(args, "-activate", "com.googlecode.iterm2") - } else if terminal == "Apple_Terminal" { - args = append(args, "-activate", "com.apple.Terminal") - } - - if n.commandFlags.Notify { - exec.Command("terminal-notifier", args...).Run() - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/run_and_watch_command_flags.go b/kit/github.com/onsi/ginkgo/ginkgo/run_and_watch_command_flags.go deleted file mode 100644 index 68733ce..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/run_and_watch_command_flags.go +++ /dev/null @@ -1,98 +0,0 @@ -package main - -import ( - "flag" - "runtime" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" -) - -type RunAndWatchCommandFlags struct { - NumCPU int - ParallelStream bool - Recurse bool - Race bool - Cover bool - Notify bool - SkipPackage string - Tags string - AutoNodes bool - - //only for run command - KeepGoing bool - UntilItFails bool - RandomizeSuites bool - - //only for watch command - Depth int - - FlagSet *flag.FlagSet -} - -func NewRunCommandFlags(flagSet *flag.FlagSet) *RunAndWatchCommandFlags { - c := &RunAndWatchCommandFlags{ - FlagSet: flagSet, - } - c.flags(false) - return c -} - -func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunAndWatchCommandFlags { - c := &RunAndWatchCommandFlags{ - FlagSet: flagSet, - } - c.flags(true) - return c -} - -func (c *RunAndWatchCommandFlags) wasSet(flagName string) bool { - wasSet := false - c.FlagSet.Visit(func(f *flag.Flag) { - if f.Name == flagName { - wasSet = true - } - }) - - return wasSet -} - -func (c *RunAndWatchCommandFlags) computeNodes() { - if c.wasSet("nodes") { - return - } - if c.AutoNodes { - switch n := runtime.NumCPU(); { - case n <= 4: - c.NumCPU = n - default: - c.NumCPU = n - 1 - } - } -} - -func (c *RunAndWatchCommandFlags) flags(forWatchCommand bool) { - onWindows := (runtime.GOOS == "windows") - onOSX := (runtime.GOOS == "darwin") - - config.Flags(c.FlagSet, "", false) - - c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run") - c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes") - c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging") - c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively") - c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled") - c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory") - c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.") - if onOSX { - c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes") - } - c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build") - if !forWatchCommand { - c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running") - c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs") - c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run") - } - if forWatchCommand { - c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree") - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/run_command.go b/kit/github.com/onsi/ginkgo/ginkgo/run_command.go deleted file mode 100644 index 52f27e6..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/run_command.go +++ /dev/null @@ -1,185 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "math/rand" - "os" - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testrunner" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -func BuildRunCommand() *Command { - commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError)) - notifier := NewNotifier(commandFlags) - interruptHandler := NewInterruptHandler() - runner := &SpecRunner{ - commandFlags: commandFlags, - notifier: notifier, - interruptHandler: interruptHandler, - suiteRunner: NewSuiteRunner(notifier, interruptHandler), - } - - return &Command{ - Name: "", - FlagSet: commandFlags.FlagSet, - UsageCommand: "ginkgo -- ", - Usage: []string{ - "Run the tests in the passed in (or the package in the current directory if left blank).", - "Any arguments after -- will be passed to the test.", - "Accepts the following flags:", - }, - Command: runner.RunSpecs, - } -} - -type SpecRunner struct { - commandFlags *RunAndWatchCommandFlags - notifier *Notifier - interruptHandler *InterruptHandler - suiteRunner *SuiteRunner -} - -func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { - r.commandFlags.computeNodes() - r.notifier.VerifyNotificationsAreAvailable() - - suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage) - if len(skippedPackages) > 0 { - fmt.Println("Will skip:") - for _, skippedPackage := range skippedPackages { - fmt.Println(" " + skippedPackage) - } - } - if len(suites) == 0 { - complainAndQuit("Found no test suites") - } - - r.ComputeSuccinctMode(len(suites)) - - t := time.Now() - - runners := []*testrunner.TestRunner{} - for _, suite := range suites { - runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.Tags, additionalArgs)) - } - - numSuites := 0 - runResult := testrunner.PassingRunResult() - if r.commandFlags.UntilItFails { - iteration := 0 - for { - r.UpdateSeed() - randomizedRunners := r.randomizeOrder(runners) - runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.KeepGoing, nil) - iteration++ - - if r.interruptHandler.WasInterrupted() { - break - } - - if runResult.Passed { - fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration)) - } else { - fmt.Printf("\nTests failed on attempt #%d\n\n", iteration) - break - } - } - } else { - randomizedRunners := r.randomizeOrder(runners) - runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.KeepGoing, nil) - } - - for _, runner := range runners { - runner.CleanUp() - } - - fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t)) - - if runResult.Passed { - if runResult.HasProgrammaticFocus { - fmt.Printf("Test Suite Passed\n") - fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) - os.Exit(types.GINKGO_FOCUS_EXIT_CODE) - } else { - fmt.Printf("Test Suite Passed\n") - os.Exit(0) - } - } else { - fmt.Printf("Test Suite Failed\n") - os.Exit(1) - } -} - -func (r *SpecRunner) ComputeSuccinctMode(numSuites int) { - if config.DefaultReporterConfig.Verbose { - config.DefaultReporterConfig.Succinct = false - return - } - - if numSuites == 1 { - return - } - - if numSuites > 1 && !r.commandFlags.wasSet("succinct") { - config.DefaultReporterConfig.Succinct = true - } -} - -func (r *SpecRunner) UpdateSeed() { - if !r.commandFlags.wasSet("seed") { - config.GinkgoConfig.RandomSeed = time.Now().Unix() - } -} - -func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner { - if !r.commandFlags.RandomizeSuites { - return runners - } - - if len(runners) <= 1 { - return runners - } - - randomizedRunners := make([]*testrunner.TestRunner, len(runners)) - randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed)) - permutation := randomizer.Perm(len(runners)) - for i, j := range permutation { - randomizedRunners[i] = runners[j] - } - return randomizedRunners -} - -func orcMessage(iteration int) string { - if iteration < 10 { - return "" - } else if iteration < 30 { - return []string{ - "If at first you succeed...", - "...try, try again.", - "Looking good!", - "Still good...", - "I think your tests are fine....", - "Yep, still passing", - "Here we go again...", - "Even the gophers are getting bored", - "Did you try -race?", - "Maybe you should stop now?", - "I'm getting tired...", - "What if I just made you a sandwich?", - "Hit ^C, hit ^C, please hit ^C", - "Make it stop. Please!", - "Come on! Enough is enough!", - "Dave, this conversation can serve no purpose anymore. Goodbye.", - "Just what do you think you're doing, Dave? ", - "I, Sisyphus", - "Insanity: doing the same thing over and over again and expecting different results. -Einstein", - "I guess Einstein never tried to churn butter", - }[iteration-10] + "\n" - } else { - return "No, seriously... you can probably stop now.\n" - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/suite_runner.go b/kit/github.com/onsi/ginkgo/ginkgo/suite_runner.go deleted file mode 100644 index 1167bd8..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/suite_runner.go +++ /dev/null @@ -1,127 +0,0 @@ -package main - -import ( - "fmt" - "runtime" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testrunner" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testsuite" -) - -type SuiteRunner struct { - notifier *Notifier - interruptHandler *InterruptHandler -} - -type compiler struct { - runner *testrunner.TestRunner - compilationError chan error -} - -func (c *compiler) compile() { - retries := 0 - - err := c.runner.Compile() - for err != nil && retries < 5 { //We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness... - err = c.runner.Compile() - retries++ - } - - c.compilationError <- err -} - -func NewSuiteRunner(notifier *Notifier, interruptHandler *InterruptHandler) *SuiteRunner { - return &SuiteRunner{ - notifier: notifier, - interruptHandler: interruptHandler, - } -} - -func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) { - runResult := testrunner.PassingRunResult() - - compilers := make([]*compiler, len(runners)) - for i, runner := range runners { - compilers[i] = &compiler{ - runner: runner, - compilationError: make(chan error, 1), - } - } - - compilerChannel := make(chan *compiler) - numCompilers := runtime.NumCPU() - for i := 0; i < numCompilers; i++ { - go func() { - for compiler := range compilerChannel { - if willCompile != nil { - willCompile(compiler.runner.Suite) - } - compiler.compile() - } - }() - } - go func() { - for _, compiler := range compilers { - compilerChannel <- compiler - } - close(compilerChannel) - }() - - numSuitesThatRan := 0 - suitesThatFailed := []testsuite.TestSuite{} - for i, runner := range runners { - if r.interruptHandler.WasInterrupted() { - break - } - - compilationError := <-compilers[i].compilationError - if compilationError != nil { - fmt.Print(compilationError.Error()) - } - numSuitesThatRan++ - suiteRunResult := testrunner.FailingRunResult() - if compilationError == nil { - suiteRunResult = compilers[i].runner.Run() - } - r.notifier.SendSuiteCompletionNotification(runner.Suite, suiteRunResult.Passed) - runResult = runResult.Merge(suiteRunResult) - if !suiteRunResult.Passed { - suitesThatFailed = append(suitesThatFailed, runner.Suite) - if !keepGoing { - break - } - } - if i < len(runners)-1 && !config.DefaultReporterConfig.Succinct { - fmt.Println("") - } - } - - if keepGoing && !runResult.Passed { - r.listFailedSuites(suitesThatFailed) - } - - return runResult, numSuitesThatRan -} - -func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) { - fmt.Println("") - fmt.Println("There were failures detected in the following suites:") - - maxPackageNameLength := 0 - for _, suite := range suitesThatFailed { - if len(suite.PackageName) > maxPackageNameLength { - maxPackageNameLength = len(suite.PackageName) - } - } - - packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) - - for _, suite := range suitesThatFailed { - if config.DefaultReporterConfig.NoColor { - fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path) - } else { - fmt.Printf("\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle) - } - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go b/kit/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go deleted file mode 100644 index a73a6e3..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go +++ /dev/null @@ -1,52 +0,0 @@ -package testrunner - -import ( - "bytes" - "fmt" - "io" - "log" - "strings" - "sync" -) - -type logWriter struct { - buffer *bytes.Buffer - lock *sync.Mutex - log *log.Logger -} - -func newLogWriter(target io.Writer, node int) *logWriter { - return &logWriter{ - buffer: &bytes.Buffer{}, - lock: &sync.Mutex{}, - log: log.New(target, fmt.Sprintf("[%d] ", node), 0), - } -} - -func (w *logWriter) Write(data []byte) (n int, err error) { - w.lock.Lock() - defer w.lock.Unlock() - - w.buffer.Write(data) - contents := w.buffer.String() - - lines := strings.Split(contents, "\n") - for _, line := range lines[0 : len(lines)-1] { - w.log.Println(line) - } - - w.buffer.Reset() - w.buffer.Write([]byte(lines[len(lines)-1])) - return len(data), nil -} - -func (w *logWriter) Close() error { - w.lock.Lock() - defer w.lock.Unlock() - - if w.buffer.Len() > 0 { - w.log.Println(w.buffer.String()) - } - - return nil -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go b/kit/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go deleted file mode 100644 index 5d472ac..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go +++ /dev/null @@ -1,27 +0,0 @@ -package testrunner - -type RunResult struct { - Passed bool - HasProgrammaticFocus bool -} - -func PassingRunResult() RunResult { - return RunResult{ - Passed: true, - HasProgrammaticFocus: false, - } -} - -func FailingRunResult() RunResult { - return RunResult{ - Passed: false, - HasProgrammaticFocus: false, - } -} - -func (r RunResult) Merge(o RunResult) RunResult { - return RunResult{ - Passed: r.Passed && o.Passed, - HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus, - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/kit/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go deleted file mode 100644 index ebf7fc1..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go +++ /dev/null @@ -1,368 +0,0 @@ -package testrunner - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" - "syscall" - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testsuite" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/remote" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters/stenographer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -type TestRunner struct { - Suite testsuite.TestSuite - compiled bool - - numCPU int - parallelStream bool - race bool - cover bool - tags string - additionalArgs []string -} - -func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, tags string, additionalArgs []string) *TestRunner { - return &TestRunner{ - Suite: suite, - numCPU: numCPU, - parallelStream: parallelStream, - race: race, - cover: cover, - tags: tags, - additionalArgs: additionalArgs, - } -} - -func (t *TestRunner) Compile() error { - if t.compiled { - return nil - } - - os.Remove(t.compiledArtifact()) - - args := []string{"test", "-c", "-i"} - if t.race { - args = append(args, "-race") - } - if t.cover { - args = append(args, "-cover", "-covermode=atomic") - } - if t.tags != "" { - args = append(args, fmt.Sprintf("-tags=%s", t.tags)) - } - - cmd := exec.Command("go", args...) - - cmd.Dir = t.Suite.Path - - output, err := cmd.CombinedOutput() - - if err != nil { - fixedOutput := fixCompilationOutput(string(output), t.Suite.Path) - if len(output) > 0 { - return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput) - } - return fmt.Errorf("") - } - - t.compiled = true - return nil -} - -/* -go test -c -i spits package.test out into the cwd. there's no way to change this. - -to make sure it doesn't generate conflicting .test files in the cwd, Compile() must switch the cwd to the test package. - -unfortunately, this causes go test's compile output to be expressed *relative to the test package* instead of the cwd. - -this makes it hard to reason about what failed, and also prevents iterm's Cmd+click from working. - -fixCompilationOutput..... rewrites the output to fix the paths. - -yeah...... -*/ -func fixCompilationOutput(output string, relToPath string) string { - re := regexp.MustCompile(`^(\S.*\.go)\:\d+\:`) - lines := strings.Split(output, "\n") - for i, line := range lines { - indices := re.FindStringSubmatchIndex(line) - if len(indices) == 0 { - continue - } - - path := line[indices[2]:indices[3]] - path = filepath.Join(relToPath, path) - lines[i] = path + line[indices[3]:] - } - return strings.Join(lines, "\n") -} - -func (t *TestRunner) Run() RunResult { - if t.Suite.IsGinkgo { - if t.numCPU > 1 { - if t.parallelStream { - return t.runAndStreamParallelGinkgoSuite() - } else { - return t.runParallelGinkgoSuite() - } - } else { - return t.runSerialGinkgoSuite() - } - } else { - return t.runGoTestSuite() - } -} - -func (t *TestRunner) CleanUp() { - os.Remove(t.compiledArtifact()) -} - -func (t *TestRunner) compiledArtifact() string { - compiledArtifact, _ := filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName))) - return compiledArtifact -} - -func (t *TestRunner) runSerialGinkgoSuite() RunResult { - ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) - return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil) -} - -func (t *TestRunner) runGoTestSuite() RunResult { - return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil) -} - -func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult { - completions := make(chan RunResult) - writers := make([]*logWriter, t.numCPU) - - server, err := remote.NewServer(t.numCPU) - if err != nil { - panic("Failed to start parallel spec server") - } - - server.Start() - defer server.Close() - - for cpu := 0; cpu < t.numCPU; cpu++ { - config.GinkgoConfig.ParallelNode = cpu + 1 - config.GinkgoConfig.ParallelTotal = t.numCPU - config.GinkgoConfig.SyncHost = server.Address() - - ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) - - writers[cpu] = newLogWriter(os.Stdout, cpu+1) - - cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1) - - server.RegisterAlive(cpu+1, func() bool { - if cmd.ProcessState == nil { - return true - } - return !cmd.ProcessState.Exited() - }) - - go t.run(cmd, completions) - } - - res := PassingRunResult() - - for cpu := 0; cpu < t.numCPU; cpu++ { - res = res.Merge(<-completions) - } - - for _, writer := range writers { - writer.Close() - } - - os.Stdout.Sync() - - if t.cover { - t.combineCoverprofiles() - } - - return res -} - -func (t *TestRunner) runParallelGinkgoSuite() RunResult { - result := make(chan bool) - completions := make(chan RunResult) - writers := make([]*logWriter, t.numCPU) - reports := make([]*bytes.Buffer, t.numCPU) - - stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor) - aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer) - - server, err := remote.NewServer(t.numCPU) - if err != nil { - panic("Failed to start parallel spec server") - } - server.RegisterReporters(aggregator) - server.Start() - defer server.Close() - - for cpu := 0; cpu < t.numCPU; cpu++ { - config.GinkgoConfig.ParallelNode = cpu + 1 - config.GinkgoConfig.ParallelTotal = t.numCPU - config.GinkgoConfig.SyncHost = server.Address() - config.GinkgoConfig.StreamHost = server.Address() - - ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) - - reports[cpu] = &bytes.Buffer{} - writers[cpu] = newLogWriter(reports[cpu], cpu+1) - - cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1) - - server.RegisterAlive(cpu+1, func() bool { - if cmd.ProcessState == nil { - return true - } - return !cmd.ProcessState.Exited() - }) - - go t.run(cmd, completions) - } - - res := PassingRunResult() - - for cpu := 0; cpu < t.numCPU; cpu++ { - res = res.Merge(<-completions) - } - - //all test processes are done, at this point - //we should be able to wait for the aggregator to tell us that it's done - - select { - case <-result: - fmt.Println("") - case <-time.After(time.Second): - //the aggregator never got back to us! something must have gone wrong - fmt.Println("") - fmt.Println("") - fmt.Println(" ----------------------------------------------------------- ") - fmt.Println(" | |") - fmt.Println(" | Ginkgo timed out waiting for all parallel nodes to end! |") - fmt.Println(" | Here is some salvaged output: |") - fmt.Println(" | |") - fmt.Println(" ----------------------------------------------------------- ") - fmt.Println("") - fmt.Println("") - - os.Stdout.Sync() - - time.Sleep(time.Second) - - for _, writer := range writers { - writer.Close() - } - - for _, report := range reports { - fmt.Print(report.String()) - } - - os.Stdout.Sync() - } - - if t.cover { - t.combineCoverprofiles() - } - - return res -} - -func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd { - args := []string{"-test.timeout=24h"} - if t.cover { - coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile" - if t.numCPU > 1 { - coverprofile = fmt.Sprintf("%s.%d", coverprofile, node) - } - args = append(args, coverprofile) - } - - args = append(args, ginkgoArgs...) - args = append(args, t.additionalArgs...) - - cmd := exec.Command(t.compiledArtifact(), args...) - - cmd.Dir = t.Suite.Path - cmd.Stderr = stream - cmd.Stdout = stream - - return cmd -} - -func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult { - var res RunResult - - defer func() { - if completions != nil { - completions <- res - } - }() - - err := cmd.Start() - if err != nil { - fmt.Printf("Failed to run test suite!\n\t%s", err.Error()) - return res - } - - cmd.Wait() - exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() - res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) - res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) - - return res -} - -func (t *TestRunner) combineCoverprofiles() { - profiles := []string{} - for cpu := 1; cpu <= t.numCPU; cpu++ { - coverFile := fmt.Sprintf("%s.coverprofile.%d", t.Suite.PackageName, cpu) - coverFile = filepath.Join(t.Suite.Path, coverFile) - coverProfile, err := ioutil.ReadFile(coverFile) - os.Remove(coverFile) - - if err == nil { - profiles = append(profiles, string(coverProfile)) - } - } - - if len(profiles) != t.numCPU { - return - } - - lines := map[string]int{} - - for _, coverProfile := range profiles { - for _, line := range strings.Split(string(coverProfile), "\n")[1:] { - if len(line) == 0 { - continue - } - components := strings.Split(line, " ") - count, _ := strconv.Atoi(components[len(components)-1]) - prefix := strings.Join(components[0:len(components)-1], " ") - lines[prefix] += count - } - } - - output := []string{"mode: atomic"} - for line, count := range lines { - output = append(output, fmt.Sprintf("%s %d", line, count)) - } - finalOutput := strings.Join(output, "\n") - ioutil.WriteFile(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.coverprofile", t.Suite.PackageName)), []byte(finalOutput), 0666) -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go b/kit/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go deleted file mode 100644 index 044a7bc..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go +++ /dev/null @@ -1,71 +0,0 @@ -package testsuite - -import ( - "io/ioutil" - "os" - "path/filepath" - "regexp" -) - -type TestSuite struct { - Path string - PackageName string - IsGinkgo bool -} - -func SuitesInDir(dir string, recurse bool) []TestSuite { - suites := []TestSuite{} - files, _ := ioutil.ReadDir(dir) - re := regexp.MustCompile(`_test\.go$`) - for _, file := range files { - if !file.IsDir() && re.Match([]byte(file.Name())) { - suites = append(suites, New(dir, files)) - break - } - } - - if recurse { - re = regexp.MustCompile(`^[._]`) - for _, file := range files { - if file.IsDir() && !re.Match([]byte(file.Name())) { - suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...) - } - } - } - - return suites -} - -func New(dir string, files []os.FileInfo) TestSuite { - dir, _ = filepath.Abs(dir) - cwd, _ := os.Getwd() - dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) - dir = "." + string(filepath.Separator) + dir - - return TestSuite{ - Path: dir, - PackageName: packageNameForSuite(dir), - IsGinkgo: filesHaveGinkgoSuite(dir, files), - } -} - -func packageNameForSuite(dir string) string { - path, _ := filepath.Abs(dir) - return filepath.Base(path) -} - -func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool { - reTestFile := regexp.MustCompile(`_test\.go$`) - reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`) - - for _, file := range files { - if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { - contents, _ := ioutil.ReadFile(dir + "/" + file.Name()) - if reGinkgo.Match(contents) { - return true - } - } - } - - return false -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_suite_test.go b/kit/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_suite_test.go deleted file mode 100644 index d8f855e..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package testsuite_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestTestsuite(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Testsuite Suite") -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_test.go b/kit/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_test.go deleted file mode 100644 index 8e2d47b..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/testsuite/testsuite_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package testsuite_test - -import ( - "io/ioutil" - "os" - "path/filepath" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testsuite" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("TestSuite", func() { - var tmpDir string - var relTmpDir string - - writeFile := func(folder string, filename string, content string) { - path := filepath.Join(tmpDir, folder) - err := os.MkdirAll(path, 0700) - Ω(err).ShouldNot(HaveOccurred()) - - path = filepath.Join(path, filename) - ioutil.WriteFile(path, []byte(content), os.ModePerm) - } - - BeforeEach(func() { - var err error - tmpDir, err = ioutil.TempDir("/tmp", "ginkgo") - Ω(err).ShouldNot(HaveOccurred()) - - cwd, err := os.Getwd() - Ω(err).ShouldNot(HaveOccurred()) - relTmpDir, err = filepath.Rel(cwd, tmpDir) - relTmpDir = "./" + relTmpDir - Ω(err).ShouldNot(HaveOccurred()) - - //go files in the root directory (no tests) - writeFile("/", "main.go", "package main") - - //non-go files in a nested directory - writeFile("/redherring", "big_test.jpg", "package ginkgo") - - //non-ginkgo tests in a nested directory - writeFile("/proffessorplum", "proffessorplum_test.go", `import "testing"`) - - //ginkgo tests in a nested directory - writeFile("/colonelmustard", "colonelmustard_test.go", `import "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo"`) - - //ginkgo tests in a deeply nested directory - writeFile("/colonelmustard/library", "library_test.go", `import "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo"`) - }) - - AfterEach(func() { - os.RemoveAll(tmpDir) - }) - - Describe("scanning for suites in a directory", func() { - Context("when there are no tests in the specified directory", func() { - It("should come up empty", func() { - suites := SuitesInDir(tmpDir, false) - Ω(suites).Should(BeEmpty()) - }) - }) - - Context("when there are ginkgo tests in the specified directory", func() { - It("should return an appropriately configured suite", func() { - suites := SuitesInDir(filepath.Join(tmpDir, "colonelmustard"), false) - Ω(suites).Should(HaveLen(1)) - - Ω(suites[0].Path).Should(Equal(relTmpDir + "/colonelmustard")) - Ω(suites[0].PackageName).Should(Equal("colonelmustard")) - Ω(suites[0].IsGinkgo).Should(BeTrue()) - }) - }) - - Context("when there are non-ginkgo tests in the specified directory", func() { - It("should return an appropriately configured suite", func() { - suites := SuitesInDir(filepath.Join(tmpDir, "proffessorplum"), false) - Ω(suites).Should(HaveLen(1)) - - Ω(suites[0].Path).Should(Equal(relTmpDir + "/proffessorplum")) - Ω(suites[0].PackageName).Should(Equal("proffessorplum")) - Ω(suites[0].IsGinkgo).Should(BeFalse()) - }) - }) - - Context("when recursively scanning", func() { - It("should return suites for corresponding test suites, only", func() { - suites := SuitesInDir(tmpDir, true) - Ω(suites).Should(HaveLen(3)) - - Ω(suites).Should(ContainElement(TestSuite{ - Path: relTmpDir + "/colonelmustard", - PackageName: "colonelmustard", - IsGinkgo: true, - })) - Ω(suites).Should(ContainElement(TestSuite{ - Path: relTmpDir + "/proffessorplum", - PackageName: "proffessorplum", - IsGinkgo: false, - })) - Ω(suites).Should(ContainElement(TestSuite{ - Path: relTmpDir + "/colonelmustard/library", - PackageName: "library", - IsGinkgo: true, - })) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/ginkgo/unfocus_command.go b/kit/github.com/onsi/ginkgo/ginkgo/unfocus_command.go deleted file mode 100644 index 16f3c3b..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/unfocus_command.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os/exec" -) - -func BuildUnfocusCommand() *Command { - return &Command{ - Name: "unfocus", - AltName: "blur", - FlagSet: flag.NewFlagSet("unfocus", flag.ExitOnError), - UsageCommand: "ginkgo unfocus (or ginkgo blur)", - Usage: []string{ - "Recursively unfocuses any focused tests under the current directory", - }, - Command: unfocusSpecs, - } -} - -func unfocusSpecs([]string, []string) { - unfocus("Describe") - unfocus("Context") - unfocus("It") - unfocus("Measure") -} - -func unfocus(component string) { - fmt.Printf("Removing F%s...\n", component) - cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", ".") - out, _ := cmd.CombinedOutput() - if string(out) != "" { - println(string(out)) - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/version_command.go b/kit/github.com/onsi/ginkgo/ginkgo/version_command.go deleted file mode 100644 index 92c3236..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/version_command.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" -) - -func BuildVersionCommand() *Command { - return &Command{ - Name: "version", - FlagSet: flag.NewFlagSet("version", flag.ExitOnError), - UsageCommand: "ginkgo version", - Usage: []string{ - "Print Ginkgo's version", - }, - Command: printVersion, - } -} - -func printVersion([]string, []string) { - fmt.Printf("Ginkgo Version %s\n", config.VERSION) -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/watch/delta.go b/kit/github.com/onsi/ginkgo/ginkgo/watch/delta.go deleted file mode 100644 index 6c485c5..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/watch/delta.go +++ /dev/null @@ -1,22 +0,0 @@ -package watch - -import "sort" - -type Delta struct { - ModifiedPackages []string - - NewSuites []*Suite - RemovedSuites []*Suite - modifiedSuites []*Suite -} - -type DescendingByDelta []*Suite - -func (a DescendingByDelta) Len() int { return len(a) } -func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } - -func (d Delta) ModifiedSuites() []*Suite { - sort.Sort(DescendingByDelta(d.modifiedSuites)) - return d.modifiedSuites -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go b/kit/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go deleted file mode 100644 index 2e3f1aa..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go +++ /dev/null @@ -1,71 +0,0 @@ -package watch - -import ( - "fmt" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testsuite" -) - -type SuiteErrors map[testsuite.TestSuite]error - -type DeltaTracker struct { - maxDepth int - suites map[string]*Suite - packageHashes *PackageHashes -} - -func NewDeltaTracker(maxDepth int) *DeltaTracker { - return &DeltaTracker{ - maxDepth: maxDepth, - packageHashes: NewPackageHashes(), - suites: map[string]*Suite{}, - } -} - -func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) { - errors = SuiteErrors{} - delta.ModifiedPackages = d.packageHashes.CheckForChanges() - - providedSuitePaths := map[string]bool{} - for _, suite := range suites { - providedSuitePaths[suite.Path] = true - } - - d.packageHashes.StartTrackingUsage() - - for _, suite := range d.suites { - if providedSuitePaths[suite.Suite.Path] { - if suite.Delta() > 0 { - delta.modifiedSuites = append(delta.modifiedSuites, suite) - } - } else { - delta.RemovedSuites = append(delta.RemovedSuites, suite) - } - } - - d.packageHashes.StopTrackingUsageAndPrune() - - for _, suite := range suites { - _, ok := d.suites[suite.Path] - if !ok { - s, err := NewSuite(suite, d.maxDepth, d.packageHashes) - if err != nil { - errors[suite] = err - continue - } - d.suites[suite.Path] = s - delta.NewSuites = append(delta.NewSuites, s) - } - } - - return delta, errors -} - -func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error { - s, ok := d.suites[suite.Path] - if !ok { - return fmt.Errorf("unkown suite %s", suite.Path) - } - - return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go b/kit/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go deleted file mode 100644 index 82c25fa..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go +++ /dev/null @@ -1,91 +0,0 @@ -package watch - -import ( - "go/build" - "regexp" -) - -var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) - -type Dependencies struct { - deps map[string]int -} - -func NewDependencies(path string, maxDepth int) (Dependencies, error) { - d := Dependencies{ - deps: map[string]int{}, - } - - if maxDepth == 0 { - return d, nil - } - - err := d.seedWithDepsForPackageAtPath(path) - if err != nil { - return d, err - } - - for depth := 1; depth < maxDepth; depth++ { - n := len(d.deps) - d.addDepsForDepth(depth) - if n == len(d.deps) { - break - } - } - - return d, nil -} - -func (d Dependencies) Dependencies() map[string]int { - return d.deps -} - -func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { - pkg, err := build.ImportDir(path, 0) - if err != nil { - return err - } - - d.resolveAndAdd(pkg.Imports, 1) - d.resolveAndAdd(pkg.TestImports, 1) - d.resolveAndAdd(pkg.XTestImports, 1) - - delete(d.deps, pkg.Dir) - return nil -} - -func (d Dependencies) addDepsForDepth(depth int) { - for dep, depDepth := range d.deps { - if depDepth == depth { - d.addDepsForDep(dep, depth+1) - } - } -} - -func (d Dependencies) addDepsForDep(dep string, depth int) { - pkg, err := build.ImportDir(dep, 0) - if err != nil { - println(err.Error()) - return - } - d.resolveAndAdd(pkg.Imports, depth) -} - -func (d Dependencies) resolveAndAdd(deps []string, depth int) { - for _, dep := range deps { - pkg, err := build.Import(dep, ".", 0) - if err != nil { - continue - } - if pkg.Goroot == false && !ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) { - d.addDepIfNotPresent(pkg.Dir, depth) - } - } -} - -func (d Dependencies) addDepIfNotPresent(dep string, depth int) { - _, ok := d.deps[dep] - if !ok { - d.deps[dep] = depth - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go b/kit/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go deleted file mode 100644 index eaf357c..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go +++ /dev/null @@ -1,103 +0,0 @@ -package watch - -import ( - "fmt" - "io/ioutil" - "os" - "regexp" - "time" -) - -var goRegExp = regexp.MustCompile(`\.go$`) -var goTestRegExp = regexp.MustCompile(`_test\.go$`) - -type PackageHash struct { - CodeModifiedTime time.Time - TestModifiedTime time.Time - Deleted bool - - path string - codeHash string - testHash string -} - -func NewPackageHash(path string) *PackageHash { - p := &PackageHash{ - path: path, - } - - p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() - - return p -} - -func (p *PackageHash) CheckForChanges() bool { - codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() - - if deleted { - if p.Deleted == false { - t := time.Now() - p.CodeModifiedTime = t - p.TestModifiedTime = t - } - p.Deleted = true - return true - } - - modified := false - p.Deleted = false - - if p.codeHash != codeHash { - p.CodeModifiedTime = codeModifiedTime - modified = true - } - if p.testHash != testHash { - p.TestModifiedTime = testModifiedTime - modified = true - } - - p.codeHash = codeHash - p.testHash = testHash - return modified -} - -func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { - infos, err := ioutil.ReadDir(p.path) - - if err != nil { - deleted = true - return - } - - for _, info := range infos { - if info.IsDir() { - continue - } - - if goTestRegExp.Match([]byte(info.Name())) { - testHash += p.hashForFileInfo(info) - if info.ModTime().After(testModifiedTime) { - testModifiedTime = info.ModTime() - } - continue - } - - if goRegExp.Match([]byte(info.Name())) { - codeHash += p.hashForFileInfo(info) - if info.ModTime().After(codeModifiedTime) { - codeModifiedTime = info.ModTime() - } - } - } - - testHash += codeHash - if codeModifiedTime.After(testModifiedTime) { - testModifiedTime = codeModifiedTime - } - - return -} - -func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { - return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go b/kit/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go deleted file mode 100644 index 262eaa8..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go +++ /dev/null @@ -1,82 +0,0 @@ -package watch - -import ( - "path/filepath" - "sync" -) - -type PackageHashes struct { - PackageHashes map[string]*PackageHash - usedPaths map[string]bool - lock *sync.Mutex -} - -func NewPackageHashes() *PackageHashes { - return &PackageHashes{ - PackageHashes: map[string]*PackageHash{}, - usedPaths: nil, - lock: &sync.Mutex{}, - } -} - -func (p *PackageHashes) CheckForChanges() []string { - p.lock.Lock() - defer p.lock.Unlock() - - modified := []string{} - - for _, packageHash := range p.PackageHashes { - if packageHash.CheckForChanges() { - modified = append(modified, packageHash.path) - } - } - - return modified -} - -func (p *PackageHashes) Add(path string) *PackageHash { - p.lock.Lock() - defer p.lock.Unlock() - - path, _ = filepath.Abs(path) - _, ok := p.PackageHashes[path] - if !ok { - p.PackageHashes[path] = NewPackageHash(path) - } - - if p.usedPaths != nil { - p.usedPaths[path] = true - } - return p.PackageHashes[path] -} - -func (p *PackageHashes) Get(path string) *PackageHash { - p.lock.Lock() - defer p.lock.Unlock() - - path, _ = filepath.Abs(path) - if p.usedPaths != nil { - p.usedPaths[path] = true - } - return p.PackageHashes[path] -} - -func (p *PackageHashes) StartTrackingUsage() { - p.lock.Lock() - defer p.lock.Unlock() - - p.usedPaths = map[string]bool{} -} - -func (p *PackageHashes) StopTrackingUsageAndPrune() { - p.lock.Lock() - defer p.lock.Unlock() - - for path := range p.PackageHashes { - if !p.usedPaths[path] { - delete(p.PackageHashes, path) - } - } - - p.usedPaths = nil -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/watch/suite.go b/kit/github.com/onsi/ginkgo/ginkgo/watch/suite.go deleted file mode 100644 index 1f3e16b..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/watch/suite.go +++ /dev/null @@ -1,87 +0,0 @@ -package watch - -import ( - "fmt" - "math" - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testsuite" -) - -type Suite struct { - Suite testsuite.TestSuite - RunTime time.Time - Dependencies Dependencies - - sharedPackageHashes *PackageHashes -} - -func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { - deps, err := NewDependencies(suite.Path, maxDepth) - if err != nil { - return nil, err - } - - sharedPackageHashes.Add(suite.Path) - for dep := range deps.Dependencies() { - sharedPackageHashes.Add(dep) - } - - return &Suite{ - Suite: suite, - Dependencies: deps, - - sharedPackageHashes: sharedPackageHashes, - }, nil -} - -func (s *Suite) Delta() float64 { - delta := s.delta(s.Suite.Path, true, 0) * 1000 - for dep, depth := range s.Dependencies.Dependencies() { - delta += s.delta(dep, false, depth) - } - return delta -} - -func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { - s.RunTime = time.Now() - - deps, err := NewDependencies(s.Suite.Path, maxDepth) - if err != nil { - return err - } - - s.sharedPackageHashes.Add(s.Suite.Path) - for dep := range deps.Dependencies() { - s.sharedPackageHashes.Add(dep) - } - - s.Dependencies = deps - - return nil -} - -func (s *Suite) Description() string { - numDeps := len(s.Dependencies.Dependencies()) - pluralizer := "ies" - if numDeps == 1 { - pluralizer = "y" - } - return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) -} - -func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { - return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) -} - -func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { - packageHash := s.sharedPackageHashes.Get(packagePath) - var modifiedTime time.Time - if includeTests { - modifiedTime = packageHash.TestModifiedTime - } else { - modifiedTime = packageHash.CodeModifiedTime - } - - return modifiedTime.Sub(s.RunTime) -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo/watch_command.go b/kit/github.com/onsi/ginkgo/ginkgo/watch_command.go deleted file mode 100644 index b472c45..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo/watch_command.go +++ /dev/null @@ -1,171 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testrunner" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/testsuite" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo/watch" -) - -func BuildWatchCommand() *Command { - commandFlags := NewWatchCommandFlags(flag.NewFlagSet("watch", flag.ExitOnError)) - interruptHandler := NewInterruptHandler() - notifier := NewNotifier(commandFlags) - watcher := &SpecWatcher{ - commandFlags: commandFlags, - notifier: notifier, - interruptHandler: interruptHandler, - suiteRunner: NewSuiteRunner(notifier, interruptHandler), - } - - return &Command{ - Name: "watch", - FlagSet: commandFlags.FlagSet, - UsageCommand: "ginkgo watch -- ", - Usage: []string{ - "Watches the tests in the passed in and runs them when changes occur.", - "Any arguments after -- will be passed to the test.", - }, - Command: watcher.WatchSpecs, - SuppressFlagDocumentation: true, - FlagDocSubstitute: []string{ - "Accepts all the flags that the ginkgo command accepts except for --keepGoing and --untilItFails", - }, - } -} - -type SpecWatcher struct { - commandFlags *RunAndWatchCommandFlags - notifier *Notifier - interruptHandler *InterruptHandler - suiteRunner *SuiteRunner -} - -func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { - w.commandFlags.computeNodes() - w.notifier.VerifyNotificationsAreAvailable() - - w.WatchSuites(args, additionalArgs) -} - -func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalArgs []string) []*testrunner.TestRunner { - runners := []*testrunner.TestRunner{} - - for _, suite := range suites { - runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.Tags, additionalArgs)) - } - - return runners -} - -func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) { - suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage) - - if len(suites) == 0 { - complainAndQuit("Found no test suites") - } - - fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth) - deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth) - delta, errors := deltaTracker.Delta(suites) - - fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites))) - for _, suite := range delta.NewSuites { - fmt.Println(" " + suite.Description()) - } - - for suite, err := range errors { - fmt.Printf("Failed to watch %s: %s\n"+suite.PackageName, err) - } - - if len(suites) == 1 { - runners := w.runnersForSuites(suites, additionalArgs) - w.suiteRunner.RunSuites(runners, true, nil) - runners[0].CleanUp() - } - - ticker := time.NewTicker(time.Second) - - for { - select { - case <-ticker.C: - suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage) - delta, _ := deltaTracker.Delta(suites) - - suitesToRun := []testsuite.TestSuite{} - - if len(delta.NewSuites) > 0 { - fmt.Printf(greenColor+"Detected %d new %s:\n"+defaultStyle, len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites))) - for _, suite := range delta.NewSuites { - suitesToRun = append(suitesToRun, suite.Suite) - fmt.Println(" " + suite.Description()) - } - } - - modifiedSuites := delta.ModifiedSuites() - if len(modifiedSuites) > 0 { - fmt.Println(greenColor + "\nDetected changes in:" + defaultStyle) - for _, pkg := range delta.ModifiedPackages { - fmt.Println(" " + pkg) - } - fmt.Printf(greenColor+"Will run %d %s:\n"+defaultStyle, len(modifiedSuites), pluralizedWord("suite", "suites", len(modifiedSuites))) - for _, suite := range modifiedSuites { - suitesToRun = append(suitesToRun, suite.Suite) - fmt.Println(" " + suite.Description()) - } - fmt.Println("") - } - - if len(suitesToRun) > 0 { - w.UpdateSeed() - w.ComputeSuccinctMode(len(suitesToRun)) - runners := w.runnersForSuites(suitesToRun, additionalArgs) - result, _ := w.suiteRunner.RunSuites(runners, true, func(suite testsuite.TestSuite) { - deltaTracker.WillRun(suite) - }) - for _, runner := range runners { - runner.CleanUp() - } - if !w.interruptHandler.WasInterrupted() { - color := redColor - if result.Passed { - color = greenColor - } - fmt.Println(color + "\nDone. Resuming watch..." + defaultStyle) - } - } - - case <-w.interruptHandler.C: - return - } - } -} - -func (w *SpecWatcher) ComputeSuccinctMode(numSuites int) { - if config.DefaultReporterConfig.Verbose { - config.DefaultReporterConfig.Succinct = false - return - } - - if w.commandFlags.wasSet("succinct") { - return - } - - if numSuites == 1 { - config.DefaultReporterConfig.Succinct = false - } - - if numSuites > 1 { - config.DefaultReporterConfig.Succinct = true - } -} - -func (w *SpecWatcher) UpdateSeed() { - if !w.commandFlags.wasSet("seed") { - config.GinkgoConfig.RandomSeed = time.Now().Unix() - } -} diff --git a/kit/github.com/onsi/ginkgo/ginkgo_dsl.go b/kit/github.com/onsi/ginkgo/ginkgo_dsl.go deleted file mode 100644 index 3170410..0000000 --- a/kit/github.com/onsi/ginkgo/ginkgo_dsl.go +++ /dev/null @@ -1,500 +0,0 @@ -/* -Ginkgo is a BDD-style testing framework for Golang - -The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/ - -Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega) - -Ginkgo on Github: http://github.com/onsi/ginkgo - -Ginkgo is MIT-Licensed -*/ -package ginkgo - -import ( - "flag" - "fmt" - "io" - "net/http" - "os" - "strings" - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/remote" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/suite" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/testingtproxy" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/writer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters/stenographer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -const GINKGO_VERSION = config.VERSION -const GINKGO_PANIC = ` -Your test failed. -Ginkgo panics to prevent subsequent assertions from running. -Normally Ginkgo rescues this panic so you shouldn't see it. - -But, if you make an assertion in a goroutine, Ginkgo can't capture the panic. -To circumvent this, you should call - - defer GinkgoRecover() - -at the top of the goroutine that caused this panic. -` -const defaultTimeout = 1 - -var globalSuite *suite.Suite -var globalFailer *failer.Failer - -func init() { - config.Flags(flag.CommandLine, "ginkgo", true) - GinkgoWriter = writer.New(os.Stdout) - globalFailer = failer.New() - globalSuite = suite.New(globalFailer) -} - -//GinkgoWriter implements an io.Writer -//When running in verbose mode any writes to GinkgoWriter will be immediately printed -//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen -//only if the current test fails. -var GinkgoWriter io.Writer - -//The interface by which Ginkgo receives *testing.T -type GinkgoTestingT interface { - Fail() -} - -//GinkgoParallelNode returns the parallel node number for the current ginkgo process -//The node number is 1-indexed -func GinkgoParallelNode() int { - return config.GinkgoConfig.ParallelNode -} - -//Some matcher libraries or legacy codebases require a *testing.T -//GinkgoT implements an interface analogous to *testing.T and can be used if -//the library in question accepts *testing.T through an interface -// -// For example, with testify: -// assert.Equal(GinkgoT(), 123, 123, "they should be equal") -// -// Or with gomock: -// gomock.NewController(GinkgoT()) -// -// GinkgoT() takes an optional offset argument that can be used to get the -// correct line number associated with the failure. -func GinkgoT(optionalOffset ...int) GinkgoTInterface { - offset := 3 - if len(optionalOffset) > 0 { - offset = optionalOffset[0] - } - return testingtproxy.New(GinkgoWriter, Fail, offset) -} - -//The interface returned by GinkgoT(). This covers most of the methods -//in the testing package's T. -type GinkgoTInterface interface { - Fail() - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - FailNow() - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Log(args ...interface{}) - Logf(format string, args ...interface{}) - Failed() bool - Parallel() - Skip(args ...interface{}) - Skipf(format string, args ...interface{}) - SkipNow() - Skipped() bool -} - -//Custom Ginkgo test reporters must implement the Reporter interface. -// -//The custom reporter is passed in a SuiteSummary when the suite begins and ends, -//and a SpecSummary just before a spec begins and just after a spec ends -type Reporter reporters.Reporter - -//Asynchronous specs are given a channel of the Done type. You must close or write to the channel -//to tell Ginkgo that your async test is done. -type Done chan<- interface{} - -//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription -// FullTestText: a concatenation of ComponentTexts and the TestText -// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test -// TestText: the text in the actual It or Measure node -// IsMeasurement: true if the current test is a measurement -// FileName: the name of the file containing the current test -// LineNumber: the line number for the current test -type GinkgoTestDescription struct { - FullTestText string - ComponentTexts []string - TestText string - - IsMeasurement bool - - FileName string - LineNumber int -} - -//CurrentGinkgoTestDescripton returns information about the current running test. -func CurrentGinkgoTestDescription() GinkgoTestDescription { - summary, ok := globalSuite.CurrentRunningSpecSummary() - if !ok { - return GinkgoTestDescription{} - } - - subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1] - - return GinkgoTestDescription{ - ComponentTexts: summary.ComponentTexts[1:], - FullTestText: strings.Join(summary.ComponentTexts[1:], " "), - TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1], - IsMeasurement: summary.IsMeasurement, - FileName: subjectCodeLocation.FileName, - LineNumber: subjectCodeLocation.LineNumber, - } -} - -//Measurement tests receive a Benchmarker. -// -//You use the Time() function to time how long the passed in body function takes to run -//You use the RecordValue() function to track arbitrary numerical measurements. -//The optional info argument is passed to the test reporter and can be used to -// provide the measurement data to a custom reporter with context. -// -//See http://onsi.github.io/ginkgo/#benchmark_tests for more details -type Benchmarker interface { - Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) - RecordValue(name string, value float64, info ...interface{}) -} - -//RunSpecs is the entry point for the Ginkgo test runner. -//You must call this within a Golang testing TestX(t *testing.T) function. -// -//To bootstrap a test suite you can use the Ginkgo CLI: -// -// ginkgo bootstrap -func RunSpecs(t GinkgoTestingT, description string) bool { - specReporters := []Reporter{buildDefaultReporter()} - return RunSpecsWithCustomReporters(t, description, specReporters) -} - -//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace -//RunSpecs() with this method. -func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { - specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...) - return RunSpecsWithCustomReporters(t, description, specReporters) -} - -//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace -//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter -func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { - writer := GinkgoWriter.(*writer.Writer) - writer.SetStream(config.DefaultReporterConfig.Verbose) - reporters := make([]reporters.Reporter, len(specReporters)) - for i, reporter := range specReporters { - reporters[i] = reporter - } - passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig) - if passed && hasFocusedTests { - fmt.Println("PASS | FOCUSED") - os.Exit(types.GINKGO_FOCUS_EXIT_CODE) - } - return passed -} - -func buildDefaultReporter() Reporter { - remoteReportingServer := config.GinkgoConfig.StreamHost - if remoteReportingServer == "" { - stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor) - return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer) - } else { - return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor()) - } -} - -//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) -func Fail(message string, callerSkip ...int) { - skip := 0 - if len(callerSkip) > 0 { - skip = callerSkip[0] - } - - globalFailer.Fail(message, codelocation.New(skip+1)) - panic(GINKGO_PANIC) -} - -//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` -//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that -//calls out to Gomega -// -//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent -//further assertions from running. This panic must be recovered. Ginkgo does this for you -//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...) -// -//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no -//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. -func GinkgoRecover() { - e := recover() - if e != nil { - globalFailer.Panic(codelocation.New(1), e) - } -} - -//Describe blocks allow you to organize your specs. A Describe block can contain any number of -//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. -// -//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally -//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object -//or method and, within that Describe, outline a number of Contexts. -func Describe(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) - return true -} - -//You can focus the tests within a describe block using FDescribe -func FDescribe(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using PDescribe -func PDescribe(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using XDescribe -func XDescribe(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//Context blocks allow you to organize your specs. A Context block can contain any number of -//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. -// -//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally -//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object -//or method and, within that Describe, outline a number of Contexts. -func Context(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) - return true -} - -//You can focus the tests within a describe block using FContext -func FContext(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using PContext -func PContext(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//You can mark the tests within a describe block as pending using XContext -func XContext(text string, body func()) bool { - globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) - return true -} - -//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks -//within an It block. -// -//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a -//function that accepts a Done channel. When you do this, you can also provide an optional timeout. -func It(text string, body interface{}, timeout ...float64) bool { - globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//You can focus individual Its using FIt -func FIt(text string, body interface{}, timeout ...float64) bool { - globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//You can mark Its as pending using PIt -func PIt(text string, _ ...interface{}) bool { - globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//You can mark Its as pending using XIt -func XIt(text string, _ ...interface{}) bool { - globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//Measure blocks run the passed in body function repeatedly (determined by the samples argument) -//and accumulate metrics provided to the Benchmarker by the body function. -// -//The body function must have the signature: -// func(b Benchmarker) -func Measure(text string, body interface{}, samples int) bool { - globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) - return true -} - -//You can focus individual Measures using FMeasure -func FMeasure(text string, body interface{}, samples int) bool { - globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) - return true -} - -//You can mark Maeasurements as pending using PMeasure -func PMeasure(text string, _ ...interface{}) bool { - globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//You can mark Maeasurements as pending using XMeasure -func XMeasure(text string, _ ...interface{}) bool { - globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) - return true -} - -//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each -//parallel node process will call BeforeSuite. -// -//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel -// -//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. -func BeforeSuite(body interface{}, timeout ...float64) bool { - globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed. -//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting. -// -//When running in parallel, each parallel node process will call AfterSuite. -// -//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel -// -//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. -func AfterSuite(body interface{}, timeout ...float64) bool { - globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across -//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that -//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait -//until that node is done before running. -// -//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is -//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1) -//to the second function (on all the other nodes). -// -//The functions have the following signatures. The first function (which only runs on node 1) has the signature: -// -// func() []byte -// -//or, to run asynchronously: -// -// func(done Done) []byte -// -//The byte array returned by the first function is then passed to the second function, which has the signature: -// -// func(data []byte) -// -//or, to run asynchronously: -// -// func(data []byte, done Done) -// -//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes: -// -// var dbClient db.Client -// var dbRunner db.Runner -// -// var _ = SynchronizedBeforeSuite(func() []byte { -// dbRunner = db.NewRunner() -// err := dbRunner.Start() -// Ω(err).ShouldNot(HaveOccurred()) -// return []byte(dbRunner.URL) -// }, func(data []byte) { -// dbClient = db.NewClient() -// err := dbClient.Connect(string(data)) -// Ω(err).ShouldNot(HaveOccurred()) -// }) -func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool { - globalSuite.SetSynchronizedBeforeSuiteNode( - node1Body, - allNodesBody, - codelocation.New(1), - parseTimeout(timeout...), - ) - return true -} - -//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up -//external singleton resources shared across nodes when running tests in parallel. -// -//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1 -//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until -//all other nodes are finished. -// -//Both functions have the same signature: either func() or func(done Done) to run asynchronously. -// -//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database -//only after all nodes have finished: -// -// var _ = SynchronizedAfterSuite(func() { -// dbClient.Cleanup() -// }, func() { -// dbRunner.Stop() -// }) -func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool { - globalSuite.SetSynchronizedAfterSuiteNode( - allNodesBody, - node1Body, - codelocation.New(1), - parseTimeout(timeout...), - ) - return true -} - -//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested -//Describe and Context blocks the outermost BeforeEach blocks are run first. -// -//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func BeforeEach(body interface{}, timeout ...float64) bool { - globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details, -//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) -// -//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func JustBeforeEach(body interface{}, timeout ...float64) bool { - globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested -//Describe and Context blocks the innermost AfterEach blocks are run first. -// -//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts -//a Done channel -func AfterEach(body interface{}, timeout ...float64) bool { - globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) - return true -} - -func parseTimeout(timeout ...float64) time.Duration { - if len(timeout) == 0 { - return time.Duration(defaultTimeout * int64(time.Second)) - } else { - return time.Duration(timeout[0] * float64(time.Second)) - } -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/extra_functions_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/extra_functions_test.go deleted file mode 100644 index ccb3669..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/extra_functions_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package tmp - -import ( - "testing" -) - -func TestSomethingLessImportant(t *testing.T) { - strp := "hello!" - somethingImportant(t, &strp) -} - -func somethingImportant(t *testing.T, message *string) { - t.Log("Something important happened in a test: " + *message) -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested/nested_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested/nested_test.go deleted file mode 100644 index cde42e4..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested/nested_test.go +++ /dev/null @@ -1,10 +0,0 @@ -package nested - -import ( - "testing" -) - -func TestSomethingLessImportant(t *testing.T) { - whatever := &UselessStruct{} - t.Fail(whatever.ImportantField != "SECRET_PASSWORD") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested_without_gofiles/subpackage/nested_subpackage_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested_without_gofiles/subpackage/nested_subpackage_test.go deleted file mode 100644 index 7cdd326..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/nested_without_gofiles/subpackage/nested_subpackage_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package subpackage - -import ( - "testing" -) - -func TestNestedSubPackages(t *testing.T) { - t.Fail(true) -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/outside_package_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/outside_package_test.go deleted file mode 100644 index a682eea..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/outside_package_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package tmp_test - -import ( - "testing" -) - -type UselessStruct struct { - ImportantField string -} - -func TestSomethingImportant(t *testing.T) { - whatever := &UselessStruct{} - if whatever.ImportantField != "SECRET_PASSWORD" { - t.Fail() - } -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/xunit_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/xunit_test.go deleted file mode 100644 index 049829a..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_fixtures/xunit_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package tmp - -import ( - "testing" -) - -type UselessStruct struct { - ImportantField string - T *testing.T -} - -var testFunc = func(t *testing.T, arg *string) {} - -func assertEqual(t *testing.T, arg1, arg2 interface{}) { - if arg1 != arg2 { - t.Fail() - } -} - -func TestSomethingImportant(t *testing.T) { - whatever := &UselessStruct{ - T: t, - ImportantField: "SECRET_PASSWORD", - } - something := &UselessStruct{ImportantField: "string value"} - assertEqual(t, whatever.ImportantField, "SECRET_PASSWORD") - assertEqual(t, something.ImportantField, "string value") - - var foo = func(t *testing.T) {} - foo(t) - - strp := "something" - testFunc(t, &strp) - t.Fail() -} - -func Test3Things(t *testing.T) { - if 3 != 3 { - t.Fail() - } -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/extra_functions_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/extra_functions_test.go deleted file mode 100644 index ce60ff1..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/extra_functions_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package tmp - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" -) - -var _ = Describe("Testing with Ginkgo", func() { - It("something less important", func() { - - strp := "hello!" - somethingImportant(GinkgoT(), &strp) - }) -}) - -func somethingImportant(t GinkgoTInterface, message *string) { - t.Log("Something important happened in a test: " + *message) -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/fixtures_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/fixtures_suite_test.go deleted file mode 100644 index 0dc9802..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/fixtures_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package tmp - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestTmp(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Tmp Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_subpackage_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_subpackage_test.go deleted file mode 100644 index 0b30289..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_subpackage_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package subpackage - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" -) - -var _ = Describe("Testing with Ginkgo", func() { - It("nested sub packages", func() { - GinkgoT().Fail(true) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go deleted file mode 100644 index bd6fa1b..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package nested_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestNested(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Nested Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_test.go deleted file mode 100644 index 2689d4b..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/nested_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package nested - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" -) - -var _ = Describe("Testing with Ginkgo", func() { - It("something less important", func() { - - whatever := &UselessStruct{} - GinkgoT().Fail(whatever.ImportantField != "SECRET_PASSWORD") - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/outside_package_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/outside_package_test.go deleted file mode 100644 index 4a7ed99..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/outside_package_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package tmp_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" -) - -var _ = Describe("Testing with Ginkgo", func() { - It("something important", func() { - - whatever := &UselessStruct{} - if whatever.ImportantField != "SECRET_PASSWORD" { - GinkgoT().Fail() - } - }) -}) - -type UselessStruct struct { - ImportantField string -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go deleted file mode 100644 index ce162f1..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package convert_fixtures_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestConvertFixtures(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "ConvertFixtures Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/xunit_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/xunit_test.go deleted file mode 100644 index 3856555..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/convert_goldmasters/xunit_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package tmp - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" -) - -var _ = Describe("Testing with Ginkgo", func() { - It("something important", func() { - - whatever := &UselessStruct{ - T: GinkgoT(), - ImportantField: "SECRET_PASSWORD", - } - something := &UselessStruct{ImportantField: "string value"} - assertEqual(GinkgoT(), whatever.ImportantField, "SECRET_PASSWORD") - assertEqual(GinkgoT(), something.ImportantField, "string value") - - var foo = func(t GinkgoTInterface) {} - foo(GinkgoT()) - - strp := "something" - testFunc(GinkgoT(), &strp) - GinkgoT().Fail() - }) - It("3 things", func() { - - if 3 != 3 { - GinkgoT().Fail() - } - }) -}) - -type UselessStruct struct { - ImportantField string - T GinkgoTInterface -} - -var testFunc = func(t GinkgoTInterface, arg *string) {} - -func assertEqual(t GinkgoTInterface, arg1, arg2 interface{}) { - if arg1 != arg2 { - t.Fail() - } -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage.go deleted file mode 100644 index 436e21d..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage.go +++ /dev/null @@ -1,21 +0,0 @@ -package coverage_fixture - -func A() string { - return "A" -} - -func B() string { - return "B" -} - -func C() string { - return "C" -} - -func D() string { - return "D" -} - -func E() string { - return "untested" -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_suite_test.go deleted file mode 100644 index 006301b..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package coverage_fixture_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestCoverageFixture(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "CoverageFixture Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_test.go deleted file mode 100644 index e7e1ef6..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/coverage_fixture_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package coverage_fixture_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("CoverageFixture", func() { - It("should test A", func() { - Ω(A()).Should(Equal("A")) - }) - - It("should test B", func() { - Ω(B()).Should(Equal("B")) - }) - - It("should test C", func() { - Ω(C()).Should(Equal("C")) - }) - - It("should test D", func() { - Ω(D()).Should(Equal("D")) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_suite_test.go deleted file mode 100644 index 8713806..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package does_not_compile_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestDoes_not_compile(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Does_not_compile Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_test.go deleted file mode 100644 index 4802f52..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile/does_not_compile_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package does_not_compile_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/integration/_fixtures/does_not_compile" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("DoesNotCompile", func() { - -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_suite_test.go deleted file mode 100644 index 762b6b3..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package eventually_failing_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestEventuallyFailing(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "EventuallyFailing Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_test.go deleted file mode 100644 index b52e5b5..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/eventually_failing/eventually_failing_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package eventually_failing_test - -import ( - "fmt" - "io/ioutil" - "time" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("EventuallyFailing", func() { - It("should fail on the third try", func() { - time.Sleep(time.Second) - files, err := ioutil.ReadDir(".") - Ω(err).ShouldNot(HaveOccurred()) - Ω(len(files)).Should(BeNumerically("<", 5)) - ioutil.WriteFile(fmt.Sprintf("./%d", len(files)), []byte("foo"), 0777) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/exiting_synchronized_setup_tests/exiting_synchronized_setup_tests_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/exiting_synchronized_setup_tests/exiting_synchronized_setup_tests_suite_test.go deleted file mode 100644 index dfa4ded..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/exiting_synchronized_setup_tests/exiting_synchronized_setup_tests_suite_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package synchronized_setup_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "fmt" - "os" - "testing" -) - -func TestSynchronized_setup_tests(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Synchronized_setup_tests Suite") -} - -var beforeData string - -var _ = SynchronizedBeforeSuite(func() []byte { - fmt.Printf("BEFORE_A_%d\n", GinkgoParallelNode()) - os.Exit(1) - return []byte("WHAT EVZ") -}, func(data []byte) { - println("NEVER SEE THIS") -}) - -var _ = Describe("Synchronized Setup", func() { - It("should do nothing", func() { - Ω(true).Should(BeTrue()) - }) - - It("should do nothing", func() { - Ω(true).Should(BeTrue()) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_suite_test.go deleted file mode 100644 index d18e437..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package fail_fixture_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestFail_fixture(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Fail_fixture Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go deleted file mode 100644 index 46bb767..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/fail_fixture/fail_fixture_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package fail_fixture_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = It("handles top level failures", func() { - Ω("a top level failure on line 9").Should(Equal("nope")) - println("NEVER SEE THIS") -}) - -var _ = It("handles async top level failures", func(done Done) { - Fail("an async top level failure on line 14") - println("NEVER SEE THIS") -}, 0.1) - -var _ = It("FAIL in a goroutine", func(done Done) { - go func() { - defer GinkgoRecover() - Fail("a top level goroutine failure on line 21") - println("NEVER SEE THIS") - }() -}, 0.1) - -var _ = Describe("Excercising different failure modes", func() { - It("synchronous failures", func() { - Ω("a sync failure").Should(Equal("nope")) - println("NEVER SEE THIS") - }) - - It("synchronous panics", func() { - panic("a sync panic") - println("NEVER SEE THIS") - }) - - It("synchronous failures with FAIL", func() { - Fail("a sync FAIL failure") - println("NEVER SEE THIS") - }) - - It("async timeout", func(done Done) { - Ω(true).Should(BeTrue()) - }, 0.1) - - It("async failure", func(done Done) { - Ω("an async failure").Should(Equal("nope")) - println("NEVER SEE THIS") - }, 0.1) - - It("async panic", func(done Done) { - panic("an async panic") - println("NEVER SEE THIS") - }, 0.1) - - It("async failure with FAIL", func(done Done) { - Fail("an async FAIL failure") - println("NEVER SEE THIS") - }, 0.1) - - It("FAIL in a goroutine", func(done Done) { - go func() { - defer GinkgoRecover() - Fail("a goroutine FAIL failure") - println("NEVER SEE THIS") - }() - }, 0.1) - - It("Gomega in a goroutine", func(done Done) { - go func() { - defer GinkgoRecover() - Ω("a goroutine failure").Should(Equal("nope")) - println("NEVER SEE THIS") - }() - }, 0.1) - - It("Panic in a goroutine", func(done Done) { - go func() { - defer GinkgoRecover() - panic("a goroutine panic") - println("NEVER SEE THIS") - }() - }, 0.1) - - Measure("a FAIL measure", func(Benchmarker) { - Fail("a measure FAIL failure") - println("NEVER SEE THIS") - }, 1) - - Measure("a gomega failed measure", func(Benchmarker) { - Ω("a measure failure").Should(Equal("nope")) - println("NEVER SEE THIS") - }, 1) - - Measure("a panicking measure", func(Benchmarker) { - panic("a measure panic") - println("NEVER SEE THIS") - }, 1) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_suite_test.go deleted file mode 100644 index 2ca2db5..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_suite_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package failing_before_suite_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestFailingAfterSuite(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "FailingAfterSuite Suite") -} - -var _ = BeforeSuite(func() { - println("BEFORE SUITE") -}) - -var _ = AfterSuite(func() { - println("AFTER SUITE") - panic("BAM!") -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_test.go deleted file mode 100644 index bb54a27..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_after_suite/failing_after_suite_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package failing_before_suite_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" -) - -var _ = Describe("FailingBeforeSuite", func() { - It("should run", func() { - println("A TEST") - }) - - It("should run", func() { - println("A TEST") - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_suite_test.go deleted file mode 100644 index 86b78f6..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_suite_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package failing_before_suite_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestFailing_before_suite(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Failing_before_suite Suite") -} - -var _ = BeforeSuite(func() { - println("BEFORE SUITE") - panic("BAM!") -}) - -var _ = AfterSuite(func() { - println("AFTER SUITE") -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_test.go deleted file mode 100644 index 19e88b4..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_before_suite/failing_before_suite_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package failing_before_suite_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" -) - -var _ = Describe("FailingBeforeSuite", func() { - It("should never run", func() { - println("NEVER SEE THIS") - }) - - It("should never run", func() { - println("NEVER SEE THIS") - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests.go deleted file mode 100644 index e32cd61..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests.go +++ /dev/null @@ -1,5 +0,0 @@ -package failing_ginkgo_tests - -func AlwaysFalse() bool { - return false -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_suite_test.go deleted file mode 100644 index 17e225a..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package failing_ginkgo_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestFailing_ginkgo_tests(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Failing_ginkgo_tests Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_test.go deleted file mode 100644 index 7019691..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests/failing_ginkgo_tests_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package failing_ginkgo_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/integration/_fixtures/failing_ginkgo_tests" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("FailingGinkgoTests", func() { - It("should fail", func() { - Ω(AlwaysFalse()).Should(BeTrue()) - }) - - It("should pass", func() { - Ω(AlwaysFalse()).Should(BeFalse()) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags.go deleted file mode 100644 index a440abd..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags.go +++ /dev/null @@ -1,9 +0,0 @@ -package flags - -func Tested() string { - return "tested" -} - -func Untested() string { - return "untested" -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_suite_test.go deleted file mode 100644 index b4e7dea..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package flags_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestFlags(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Flags Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go deleted file mode 100644 index 59b70bd..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/flags_tests/flags_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package flags_test - -import ( - "flag" - "fmt" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/integration/_fixtures/flags_tests" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "time" -) - -var customFlag string - -func init() { - flag.StringVar(&customFlag, "customFlag", "default", "custom flag!") -} - -var _ = Describe("Testing various flags", func() { - FDescribe("the focused set", func() { - Measure("a measurement", func(b Benchmarker) { - b.RecordValue("a value", 3) - }, 3) - - It("should honor -cover", func() { - Ω(Tested()).Should(Equal("tested")) - }) - - PIt("should honor -failOnPending and -noisyPendings") - - Describe("smores", func() { - It("should honor -skip: marshmallow", func() { - println("marshmallow") - }) - - It("should honor -focus: chocolate", func() { - println("chocolate") - }) - }) - - It("should detect races", func(done Done) { - var a string - go func() { - a = "now you don't" - close(done) - }() - a = "now you see me" - println(a) - }) - - It("should randomize A", func() { - println("RANDOM_A") - }) - - It("should randomize B", func() { - println("RANDOM_B") - }) - - It("should randomize C", func() { - println("RANDOM_C") - }) - - It("should honor -slowSpecThreshold", func() { - time.Sleep(100 * time.Millisecond) - }) - - It("should pass in additional arguments after '--' directly to the test process", func() { - fmt.Printf("CUSTOM_FLAG: %s", customFlag) - }) - }) - - Describe("more smores", func() { - It("should not run these unless -focus is set", func() { - println("smores") - }) - }) - - Describe("a failing test", func() { - It("should fail", func() { - Ω(true).Should(Equal(false)) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_suite_test.go deleted file mode 100644 index c7e7067..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package focused_fixture_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestFocused_fixture(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Focused_fixture Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_test.go deleted file mode 100644 index c676a75..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/focused_fixture/focused_fixture_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package focused_fixture_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" -) - -var _ = Describe("FocusedFixture", func() { - FDescribe("focused", func() { - It("focused", func() { - - }) - }) - - FContext("focused", func() { - It("focused", func() { - - }) - }) - - FIt("focused", func() { - - }) - - Describe("not focused", func() { - It("not focused", func() { - - }) - }) - - Context("not focused", func() { - It("not focused", func() { - - }) - }) - - It("not focused", func() { - - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests.go deleted file mode 100644 index ca12c0d..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests.go +++ /dev/null @@ -1,5 +0,0 @@ -package more_ginkgo_tests - -func AlwaysTrue() bool { - return true -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_suite_test.go deleted file mode 100644 index f44716d..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package more_ginkgo_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestMore_ginkgo_tests(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "More_ginkgo_tests Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_test.go deleted file mode 100644 index eaf3bbe..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests/more_ginkgo_tests_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package more_ginkgo_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/integration/_fixtures/more_ginkgo_tests" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("MoreGinkgoTests", func() { - It("should pass", func() { - Ω(AlwaysTrue()).Should(BeTrue()) - }) - - It("should always pass", func() { - Ω(AlwaysTrue()).Should(BeTrue()) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/no_tests/no_tests.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/no_tests/no_tests.go deleted file mode 100644 index da29a2c..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/no_tests/no_tests.go +++ /dev/null @@ -1,4 +0,0 @@ -package main - -func main() { -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests.go deleted file mode 100644 index b710dd1..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests.go +++ /dev/null @@ -1,9 +0,0 @@ -package passing_ginkgo_tests - -func StringIdentity(a string) string { - return a -} - -func IntegerIdentity(a int) int { - return a -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_suite_test.go deleted file mode 100644 index 2770997..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package passing_ginkgo_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestPassing_ginkgo_tests(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Passing_ginkgo_tests Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_test.go deleted file mode 100644 index cc0862a..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests/passing_ginkgo_tests_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package passing_ginkgo_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_ginkgo_tests" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("PassingGinkgoTests", func() { - It("should proxy strings", func() { - Ω(StringIdentity("foo")).Should(Equal("foo")) - }) - - It("should proxy integers", func() { - Ω(IntegerIdentity(3)).Should(Equal(3)) - }) - - It("should do it again", func() { - Ω(StringIdentity("foo")).Should(Equal("foo")) - Ω(IntegerIdentity(3)).Should(Equal(3)) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_setup_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_setup_suite_test.go deleted file mode 100644 index f41ab5a..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_setup_suite_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package passing_before_suite_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestPassingSuiteSetup(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "PassingSuiteSetup Suite") -} - -var a string -var b string - -var _ = BeforeSuite(func() { - a = "ran before suite" - println("BEFORE SUITE") -}) - -var _ = AfterSuite(func() { - b = "ran after suite" - println("AFTER SUITE") -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_test.go deleted file mode 100644 index 707900e..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/passing_suite_setup/passing_suite_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package passing_before_suite_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("PassingSuiteSetup", func() { - It("should pass", func() { - Ω(a).Should(Equal("ran before suite")) - Ω(b).Should(BeEmpty()) - }) - - It("should pass", func() { - Ω(a).Should(Equal("ran before suite")) - Ω(b).Should(BeEmpty()) - }) - - It("should pass", func() { - Ω(a).Should(Equal("ran before suite")) - Ω(b).Should(BeEmpty()) - }) - - It("should pass", func() { - Ω(a).Should(Equal("ran before suite")) - Ω(b).Should(BeEmpty()) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/synchronized_setup_tests/synchronized_setup_tests_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/synchronized_setup_tests/synchronized_setup_tests_suite_test.go deleted file mode 100644 index e1d7b86..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/synchronized_setup_tests/synchronized_setup_tests_suite_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package synchronized_setup_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "fmt" - "testing" - "time" -) - -func TestSynchronized_setup_tests(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Synchronized_setup_tests Suite") -} - -var beforeData string - -var _ = SynchronizedBeforeSuite(func() []byte { - fmt.Printf("BEFORE_A_%d\n", GinkgoParallelNode()) - time.Sleep(100 * time.Millisecond) - return []byte("DATA") -}, func(data []byte) { - fmt.Printf("BEFORE_B_%d: %s\n", GinkgoParallelNode(), string(data)) - beforeData += string(data) + "OTHER" -}) - -var _ = SynchronizedAfterSuite(func() { - fmt.Printf("\nAFTER_A_%d\n", GinkgoParallelNode()) - time.Sleep(100 * time.Millisecond) -}, func() { - fmt.Printf("AFTER_B_%d\n", GinkgoParallelNode()) -}) - -var _ = Describe("Synchronized Setup", func() { - It("should run the before suite once", func() { - Ω(beforeData).Should(Equal("DATAOTHER")) - }) - - It("should run the before suite once", func() { - Ω(beforeData).Should(Equal("DATAOTHER")) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/ignored_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/ignored_test.go deleted file mode 100644 index d585d14..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/ignored_test.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build complex_tests - -package tags_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" -) - -var _ = Describe("Ignored", func() { - It("should not have these tests", func() { - - }) - - It("should not have these tests", func() { - - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_suite_test.go deleted file mode 100644 index 9b349e5..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package tags_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestTagsTests(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "TagsTests Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_test.go deleted file mode 100644 index e1e979b..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/tags_tests/tags_tests_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package tags_tests_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" -) - -var _ = Describe("TagsTests", func() { - It("should have a test", func() { - - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A.go deleted file mode 100644 index 27846b4..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A.go +++ /dev/null @@ -1,7 +0,0 @@ -package A - -import "github.com/gocircuit/escher/kit/github.com/onsi/B" - -func DoIt() string { - return B.DoIt() -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_suite_test.go deleted file mode 100644 index ef423d8..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package A_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestA(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "A Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_test.go deleted file mode 100644 index eb4f9ce..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A/A_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package A_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/A" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("A", func() { - It("should do it", func() { - Ω(DoIt()).Should(Equal("done!")) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B.go deleted file mode 100644 index 9bda60c..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B.go +++ /dev/null @@ -1,7 +0,0 @@ -package B - -import "github.com/gocircuit/escher/kit/github.com/onsi/C" - -func DoIt() string { - return C.DoIt() -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_suite_test.go deleted file mode 100644 index 9df963d..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package B_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestB(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "B Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_test.go deleted file mode 100644 index acc03c5..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B/B_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package B_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/B" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("B", func() { - It("should do it", func() { - Ω(DoIt()).Should(Equal("done!")) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.go deleted file mode 100644 index 205b688..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C.go +++ /dev/null @@ -1,5 +0,0 @@ -package C - -func DoIt() string { - return "done!" -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_suite_test.go deleted file mode 100644 index b6af002..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package C_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestC(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "C Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_test.go deleted file mode 100644 index 1736bd6..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C/C_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package C_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("C", func() { - It("should do it", func() { - Ω(DoIt()).Should(Equal("done!")) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D.go deleted file mode 100644 index 960f3e5..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D.go +++ /dev/null @@ -1,7 +0,0 @@ -package D - -import "github.com/gocircuit/escher/kit/github.com/onsi/C" - -func DoIt() string { - return C.DoIt() -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_suite_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_suite_test.go deleted file mode 100644 index b24624a..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package D_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestD(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "D Suite") -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_test.go deleted file mode 100644 index 658c5f7..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/D/D_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package D_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/integration/_fixtures/watch_fixtures/C" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("D", func() { - It("should do it", func() { - Ω(DoIt()).Should(Equal("done!")) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests.go deleted file mode 100644 index cb8fc8b..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests.go +++ /dev/null @@ -1,5 +0,0 @@ -package xunit_tests - -func AlwaysTrue() bool { - return true -} diff --git a/kit/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests_test.go b/kit/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests_test.go deleted file mode 100644 index a6ebbe1..0000000 --- a/kit/github.com/onsi/ginkgo/integration/_fixtures/xunit_tests/xunit_tests_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package xunit_tests - -import ( - "testing" -) - -func TestAlwaysTrue(t *testing.T) { - if AlwaysTrue() != true { - t.Errorf("Expected true, got false") - } -} diff --git a/kit/github.com/onsi/ginkgo/integration/convert_test.go b/kit/github.com/onsi/ginkgo/integration/convert_test.go deleted file mode 100644 index 87c81a2..0000000 --- a/kit/github.com/onsi/ginkgo/integration/convert_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package integration_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "io/ioutil" - "os" - "os/exec" - "path/filepath" -) - -var _ = Describe("ginkgo convert", func() { - var tmpDir string - - readConvertedFileNamed := func(pathComponents ...string) string { - pathToFile := filepath.Join(tmpDir, "convert_fixtures", filepath.Join(pathComponents...)) - bytes, err := ioutil.ReadFile(pathToFile) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - return string(bytes) - } - - readGoldMasterNamed := func(filename string) string { - bytes, err := ioutil.ReadFile(filepath.Join("_fixtures", "convert_goldmasters", filename)) - Ω(err).ShouldNot(HaveOccurred()) - - return string(bytes) - } - - BeforeEach(func() { - var err error - - tmpDir, err = ioutil.TempDir("", "ginkgo-convert") - Ω(err).ShouldNot(HaveOccurred()) - - err = exec.Command("cp", "-r", filepath.Join("_fixtures", "convert_fixtures"), tmpDir).Run() - Ω(err).ShouldNot(HaveOccurred()) - }) - - JustBeforeEach(func() { - cwd, err := os.Getwd() - Ω(err).ShouldNot(HaveOccurred()) - - relPath, err := filepath.Rel(cwd, filepath.Join(tmpDir, "convert_fixtures")) - Ω(err).ShouldNot(HaveOccurred()) - - err = exec.Command(pathToGinkgo, "convert", relPath).Run() - Ω(err).ShouldNot(HaveOccurred()) - }) - - AfterEach(func() { - err := os.RemoveAll(tmpDir) - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("rewrites xunit tests as ginkgo tests", func() { - convertedFile := readConvertedFileNamed("xunit_test.go") - goldMaster := readGoldMasterNamed("xunit_test.go") - Ω(convertedFile).Should(Equal(goldMaster)) - }) - - It("rewrites all usages of *testing.T as mr.T()", func() { - convertedFile := readConvertedFileNamed("extra_functions_test.go") - goldMaster := readGoldMasterNamed("extra_functions_test.go") - Ω(convertedFile).Should(Equal(goldMaster)) - }) - - It("rewrites tests in the package dir that belong to other packages", func() { - convertedFile := readConvertedFileNamed("outside_package_test.go") - goldMaster := readGoldMasterNamed("outside_package_test.go") - Ω(convertedFile).Should(Equal(goldMaster)) - }) - - It("rewrites tests in nested packages", func() { - convertedFile := readConvertedFileNamed("nested", "nested_test.go") - goldMaster := readGoldMasterNamed("nested_test.go") - Ω(convertedFile).Should(Equal(goldMaster)) - }) - - Context("ginkgo test suite files", func() { - It("creates a ginkgo test suite file for the package you specified", func() { - testsuite := readConvertedFileNamed("convert_fixtures_suite_test.go") - goldMaster := readGoldMasterNamed("suite_test.go") - Ω(testsuite).Should(Equal(goldMaster)) - }) - - It("converts go tests in deeply nested packages (some may not contain go files)", func() { - testsuite := readConvertedFileNamed("nested_without_gofiles", "subpackage", "nested_subpackage_test.go") - goldMaster := readGoldMasterNamed("nested_subpackage_test.go") - Ω(testsuite).Should(Equal(goldMaster)) - }) - - It("creates ginkgo test suites for all nested packages", func() { - testsuite := readConvertedFileNamed("nested", "nested_suite_test.go") - goldMaster := readGoldMasterNamed("nested_suite_test.go") - Ω(testsuite).Should(Equal(goldMaster)) - }) - }) - - Context("with an existing test suite file", func() { - BeforeEach(func() { - goldMaster := readGoldMasterNamed("fixtures_suite_test.go") - err := ioutil.WriteFile(filepath.Join(tmpDir, "convert_fixtures", "tmp_suite_test.go"), []byte(goldMaster), 0600) - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("gracefully handles existing test suite files", func() { - //nothing should have gone wrong! - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/coverage_test.go b/kit/github.com/onsi/ginkgo/integration/coverage_test.go deleted file mode 100644 index 17a386d..0000000 --- a/kit/github.com/onsi/ginkgo/integration/coverage_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package integration_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" - "os" - "os/exec" -) - -var _ = Describe("Coverage Specs", func() { - AfterEach(func() { - os.RemoveAll("./_fixtures/coverage_fixture/coverage_fixture.coverprofile") - }) - - It("runs coverage analysis in series and in parallel", func() { - session := startGinkgo("./_fixtures/coverage_fixture", "-cover") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - Ω(output).Should(ContainSubstring("coverage: 80.0% of statements")) - - serialCoverProfileOutput, err := exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput() - Ω(err).ShouldNot(HaveOccurred()) - - os.RemoveAll("./_fixtures/coverage_fixture/coverage_fixture.coverprofile") - - Eventually(startGinkgo("./_fixtures/coverage_fixture", "-cover", "-nodes=4")).Should(gexec.Exit(0)) - - parallelCoverProfileOutput, err := exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput() - Ω(err).ShouldNot(HaveOccurred()) - - Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput)) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/fail_test.go b/kit/github.com/onsi/ginkgo/integration/fail_test.go deleted file mode 100644 index ca6c73c..0000000 --- a/kit/github.com/onsi/ginkgo/integration/fail_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package integration_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" -) - -var _ = Describe("Failing Specs", func() { - var pathToTest string - - BeforeEach(func() { - pathToTest = tmpPath("failing") - copyIn("fail_fixture", pathToTest) - }) - - It("should fail in all the possible ways", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - Ω(output).ShouldNot(ContainSubstring("NEVER SEE THIS")) - - Ω(output).Should(ContainSubstring("a top level failure on line 9")) - Ω(output).Should(ContainSubstring("fail_fixture_test.go:9")) - Ω(output).Should(ContainSubstring("an async top level failure on line 14")) - Ω(output).Should(ContainSubstring("fail_fixture_test.go:14")) - Ω(output).Should(ContainSubstring("a top level goroutine failure on line 21")) - Ω(output).Should(ContainSubstring("fail_fixture_test.go:21")) - - Ω(output).Should(ContainSubstring("a sync failure")) - Ω(output).Should(MatchRegexp(`Test Panicked\n\s+a sync panic`)) - Ω(output).Should(ContainSubstring("a sync FAIL failure")) - Ω(output).Should(ContainSubstring("async timeout [It]")) - Ω(output).Should(ContainSubstring("Timed out")) - Ω(output).Should(ContainSubstring("an async failure")) - Ω(output).Should(MatchRegexp(`Test Panicked\n\s+an async panic`)) - Ω(output).Should(ContainSubstring("an async FAIL failure")) - Ω(output).Should(ContainSubstring("a goroutine FAIL failure")) - Ω(output).Should(ContainSubstring("a goroutine failure")) - Ω(output).Should(MatchRegexp(`Test Panicked\n\s+a goroutine panic`)) - Ω(output).Should(ContainSubstring("a measure failure")) - Ω(output).Should(ContainSubstring("a measure FAIL failure")) - Ω(output).Should(MatchRegexp(`Test Panicked\n\s+a measure panic`)) - - Ω(output).Should(ContainSubstring("0 Passed | 16 Failed")) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/flags_test.go b/kit/github.com/onsi/ginkgo/integration/flags_test.go deleted file mode 100644 index 9488692..0000000 --- a/kit/github.com/onsi/ginkgo/integration/flags_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package integration_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" - "os" - "path/filepath" - "strings" -) - -var _ = Describe("Flags Specs", func() { - var pathToTest string - - BeforeEach(func() { - pathToTest = tmpPath("flags") - copyIn("flags_tests", pathToTest) - }) - - getRandomOrders := func(output string) []int { - return []int{strings.Index(output, "RANDOM_A"), strings.Index(output, "RANDOM_B"), strings.Index(output, "RANDOM_C")} - } - - It("normally passes, runs measurements, prints out noisy pendings, does not randomize tests, and honors the programmatic focus", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("Ran 3 samples:"), "has a measurement") - Ω(output).Should(ContainSubstring("10 Passed")) - Ω(output).Should(ContainSubstring("0 Failed")) - Ω(output).Should(ContainSubstring("1 Pending")) - Ω(output).Should(ContainSubstring("2 Skipped")) - Ω(output).Should(ContainSubstring("[PENDING]")) - Ω(output).Should(ContainSubstring("marshmallow")) - Ω(output).Should(ContainSubstring("chocolate")) - Ω(output).Should(ContainSubstring("CUSTOM_FLAG: default")) - Ω(output).Should(ContainSubstring("Detected Programmatic Focus - setting exit status to %d", types.GINKGO_FOCUS_EXIT_CODE)) - Ω(output).ShouldNot(ContainSubstring("smores")) - Ω(output).ShouldNot(ContainSubstring("SLOW TEST")) - Ω(output).ShouldNot(ContainSubstring("should honor -slowSpecThreshold")) - - orders := getRandomOrders(output) - Ω(orders[0]).Should(BeNumerically("<", orders[1])) - Ω(orders[1]).Should(BeNumerically("<", orders[2])) - }) - - It("should run a coverprofile when passed -cover", func() { - session := startGinkgo(pathToTest, "--noColor", "--cover", "--focus=the focused set") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - _, err := os.Stat(filepath.Join(pathToTest, "flags.coverprofile")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(output).Should(ContainSubstring("coverage: ")) - }) - - It("should fail when there are pending tests and it is passed --failOnPending", func() { - session := startGinkgo(pathToTest, "--noColor", "--failOnPending") - Eventually(session).Should(gexec.Exit(1)) - }) - - It("should not print out pendings when --noisyPendings=false", func() { - session := startGinkgo(pathToTest, "--noColor", "--noisyPendings=false") - Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE)) - output := string(session.Out.Contents()) - - Ω(output).ShouldNot(ContainSubstring("[PENDING]")) - Ω(output).Should(ContainSubstring("1 Pending")) - }) - - It("should override the programmatic focus when told to focus", func() { - session := startGinkgo(pathToTest, "--noColor", "--focus=smores") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("marshmallow")) - Ω(output).Should(ContainSubstring("chocolate")) - Ω(output).Should(ContainSubstring("smores")) - Ω(output).Should(ContainSubstring("3 Passed")) - Ω(output).Should(ContainSubstring("0 Failed")) - Ω(output).Should(ContainSubstring("0 Pending")) - Ω(output).Should(ContainSubstring("10 Skipped")) - }) - - It("should override the programmatic focus when told to skip", func() { - session := startGinkgo(pathToTest, "--noColor", "--skip=marshmallow|failing") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).ShouldNot(ContainSubstring("marshmallow")) - Ω(output).Should(ContainSubstring("chocolate")) - Ω(output).Should(ContainSubstring("smores")) - Ω(output).Should(ContainSubstring("10 Passed")) - Ω(output).Should(ContainSubstring("0 Failed")) - Ω(output).Should(ContainSubstring("1 Pending")) - Ω(output).Should(ContainSubstring("2 Skipped")) - }) - - It("should run the race detector when told to", func() { - session := startGinkgo(pathToTest, "--noColor", "--race") - Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("WARNING: DATA RACE")) - }) - - It("should randomize tests when told to", func() { - session := startGinkgo(pathToTest, "--noColor", "--randomizeAllSpecs", "--seed=21") - Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE)) - output := string(session.Out.Contents()) - - orders := getRandomOrders(output) - Ω(orders[0]).ShouldNot(BeNumerically("<", orders[1])) - }) - - It("should skip measurements when told to", func() { - session := startGinkgo(pathToTest, "--skipMeasurements") - Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE)) - output := string(session.Out.Contents()) - - Ω(output).ShouldNot(ContainSubstring("Ran 3 samples:"), "has a measurement") - Ω(output).Should(ContainSubstring("3 Skipped")) - }) - - It("should watch for slow specs", func() { - session := startGinkgo(pathToTest, "--slowSpecThreshold=0.05") - Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("SLOW TEST")) - Ω(output).Should(ContainSubstring("should honor -slowSpecThreshold")) - }) - - It("should pass additional arguments in", func() { - session := startGinkgo(pathToTest, "--", "--customFlag=madagascar") - Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("CUSTOM_FLAG: madagascar")) - }) - - It("should print out full stack traces for failures when told to", func() { - session := startGinkgo(pathToTest, "--focus=a failing test", "--trace") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("Full Stack Trace")) - }) - - It("should fail fast when told to", func() { - pathToTest = tmpPath("fail") - copyIn("fail_fixture", pathToTest) - session := startGinkgo(pathToTest, "--failFast") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("1 Failed")) - Ω(output).Should(ContainSubstring("15 Skipped")) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/integration.go b/kit/github.com/onsi/ginkgo/integration/integration.go deleted file mode 100644 index 76ab1b7..0000000 --- a/kit/github.com/onsi/ginkgo/integration/integration.go +++ /dev/null @@ -1 +0,0 @@ -package integration diff --git a/kit/github.com/onsi/ginkgo/integration/integration_suite_test.go b/kit/github.com/onsi/ginkgo/integration/integration_suite_test.go deleted file mode 100644 index d00ebf1..0000000 --- a/kit/github.com/onsi/ginkgo/integration/integration_suite_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package integration_test - -import ( - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" - - "testing" - "time" -) - -var tmpDir string -var pathToGinkgo string - -func TestIntegration(t *testing.T) { - SetDefaultEventuallyTimeout(15 * time.Second) - RegisterFailHandler(Fail) - RunSpecs(t, "Integration Suite") -} - -var _ = SynchronizedBeforeSuite(func() []byte { - pathToGinkgo, err := gexec.Build("github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/ginkgo") - Ω(err).ShouldNot(HaveOccurred()) - return []byte(pathToGinkgo) -}, func(computedPathToGinkgo []byte) { - pathToGinkgo = string(computedPathToGinkgo) -}) - -var _ = BeforeEach(func() { - var err error - tmpDir, err = ioutil.TempDir("", "ginkgo-run") - Ω(err).ShouldNot(HaveOccurred()) -}) - -var _ = AfterEach(func() { - err := os.RemoveAll(tmpDir) - Ω(err).ShouldNot(HaveOccurred()) -}) - -var _ = SynchronizedAfterSuite(func() {}, func() { - gexec.CleanupBuildArtifacts() -}) - -func tmpPath(destination string) string { - return filepath.Join(tmpDir, destination) -} - -func copyIn(fixture string, destination string) { - err := os.MkdirAll(destination, 0777) - Ω(err).ShouldNot(HaveOccurred()) - - filepath.Walk(filepath.Join("_fixtures", fixture), func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - return nil - } - - base := filepath.Base(path) - - src, err := os.Open(path) - Ω(err).ShouldNot(HaveOccurred()) - - dst, err := os.Create(filepath.Join(destination, base)) - Ω(err).ShouldNot(HaveOccurred()) - - _, err = io.Copy(dst, src) - Ω(err).ShouldNot(HaveOccurred()) - return nil - }) -} - -func ginkgoCommand(dir string, args ...string) *exec.Cmd { - cmd := exec.Command(pathToGinkgo, args...) - cmd.Dir = dir - cmd.Env = []string{} - for _, env := range os.Environ() { - if !strings.Contains(env, "GINKGO_REMOTE_REPORTING_SERVER") { - cmd.Env = append(cmd.Env, env) - } - } - - return cmd -} - -func startGinkgo(dir string, args ...string) *gexec.Session { - cmd := ginkgoCommand(dir, args...) - session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) - Ω(err).ShouldNot(HaveOccurred()) - return session -} diff --git a/kit/github.com/onsi/ginkgo/integration/run_test.go b/kit/github.com/onsi/ginkgo/integration/run_test.go deleted file mode 100644 index c12e90a..0000000 --- a/kit/github.com/onsi/ginkgo/integration/run_test.go +++ /dev/null @@ -1,348 +0,0 @@ -package integration_test - -import ( - "runtime" - "strings" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gbytes" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" -) - -var _ = Describe("Running Specs", func() { - var pathToTest string - - Context("when pointed at the current directory", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - copyIn("passing_ginkgo_tests", pathToTest) - }) - - It("should run the tests in the working directory", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("•••")) - Ω(output).Should(ContainSubstring("SUCCESS! -- 3 Passed")) - Ω(output).Should(ContainSubstring("Test Suite Passed")) - }) - }) - - Context("when passed an explicit package to run", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - copyIn("passing_ginkgo_tests", pathToTest) - }) - - It("should run the ginkgo style tests", func() { - session := startGinkgo(tmpDir, "--noColor", pathToTest) - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("•••")) - Ω(output).Should(ContainSubstring("SUCCESS! -- 3 Passed")) - Ω(output).Should(ContainSubstring("Test Suite Passed")) - }) - }) - - Context("when passed a number of packages to run", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - otherPathToTest := tmpPath("other") - copyIn("passing_ginkgo_tests", pathToTest) - copyIn("more_ginkgo_tests", otherPathToTest) - }) - - It("should run the ginkgo style tests", func() { - session := startGinkgo(tmpDir, "--noColor", "--succinct=false", "ginkgo", "./other") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("Running Suite: More_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("Test Suite Passed")) - }) - }) - - Context("when passed a number of packages to run, some of which have focused tests", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - otherPathToTest := tmpPath("other") - focusedPathToTest := tmpPath("focused") - copyIn("passing_ginkgo_tests", pathToTest) - copyIn("more_ginkgo_tests", otherPathToTest) - copyIn("focused_fixture", focusedPathToTest) - }) - - It("should exit with a status code of 2 and explain why", func() { - session := startGinkgo(tmpDir, "--noColor", "--succinct=false", "-r") - Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("Running Suite: More_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("Test Suite Passed")) - Ω(output).Should(ContainSubstring("Detected Programmatic Focus - setting exit status to %d", types.GINKGO_FOCUS_EXIT_CODE)) - }) - }) - - Context("when told to skipPackages", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - otherPathToTest := tmpPath("other") - focusedPathToTest := tmpPath("focused") - copyIn("passing_ginkgo_tests", pathToTest) - copyIn("more_ginkgo_tests", otherPathToTest) - copyIn("focused_fixture", focusedPathToTest) - }) - - It("should skip packages that match the list", func() { - session := startGinkgo(tmpDir, "--noColor", "--skipPackage=other,focused", "-r") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("Passing_ginkgo_tests Suite")) - Ω(output).ShouldNot(ContainSubstring("More_ginkgo_tests Suite")) - Ω(output).ShouldNot(ContainSubstring("Focused_fixture Suite")) - Ω(output).Should(ContainSubstring("Test Suite Passed")) - }) - }) - - Context("when told to randomizeSuites", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - otherPathToTest := tmpPath("other") - copyIn("passing_ginkgo_tests", pathToTest) - copyIn("more_ginkgo_tests", otherPathToTest) - }) - - It("should skip packages that match the regexp", func() { - session := startGinkgo(tmpDir, "--noColor", "--randomizeSuites", "-r", "--seed=2") - Eventually(session).Should(gexec.Exit(0)) - - Ω(session).Should(gbytes.Say("More_ginkgo_tests Suite")) - Ω(session).Should(gbytes.Say("Passing_ginkgo_tests Suite")) - - session = startGinkgo(tmpDir, "--noColor", "--randomizeSuites", "-r", "--seed=3") - Eventually(session).Should(gexec.Exit(0)) - - Ω(session).Should(gbytes.Say("Passing_ginkgo_tests Suite")) - Ω(session).Should(gbytes.Say("More_ginkgo_tests Suite")) - }) - }) - - Context("when pointed at a package with xunit style tests", func() { - BeforeEach(func() { - pathToTest = tmpPath("xunit") - copyIn("xunit_tests", pathToTest) - }) - - It("should run the xunit style tests", func() { - session := startGinkgo(pathToTest) - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("--- PASS: TestAlwaysTrue")) - Ω(output).Should(ContainSubstring("Test Suite Passed")) - }) - }) - - Context("when pointed at a package with no tests", func() { - BeforeEach(func() { - pathToTest = tmpPath("no_tests") - copyIn("no_tests", pathToTest) - }) - - It("should fail", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(1)) - - Ω(session.Err.Contents()).Should(ContainSubstring("Found no test suites")) - }) - }) - - Context("when pointed at a package that fails to compile", func() { - BeforeEach(func() { - pathToTest = tmpPath("does_not_compile") - copyIn("does_not_compile", pathToTest) - }) - - It("should fail", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("Failed to compile")) - }) - }) - - Context("when running in parallel", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - copyIn("passing_ginkgo_tests", pathToTest) - }) - - Context("with a specific number of -nodes", func() { - It("should use the specified number of nodes", func() { - session := startGinkgo(pathToTest, "--noColor", "-succinct", "-nodes=2") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 3/3 specs - 2 nodes ••• SUCCESS! [\d.mus]+`)) - Ω(output).Should(ContainSubstring("Test Suite Passed")) - }) - }) - - Context("with -p", func() { - It("it should autocompute the number of nodes", func() { - session := startGinkgo(pathToTest, "--noColor", "-succinct", "-p") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - nodes := runtime.NumCPU() - if nodes > 4 { - nodes = nodes - 1 - } - Ω(output).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 3/3 specs - %d nodes ••• SUCCESS! [\d.mus]+`, nodes)) - Ω(output).Should(ContainSubstring("Test Suite Passed")) - }) - }) - }) - - Context("when streaming in parallel", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - copyIn("passing_ginkgo_tests", pathToTest) - }) - - It("should print output in realtime", func() { - session := startGinkgo(pathToTest, "--noColor", "-stream", "-nodes=2") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring(`[1] Parallel test node 1/2.`)) - Ω(output).Should(ContainSubstring(`[2] Parallel test node 2/2.`)) - Ω(output).Should(ContainSubstring(`[1] SUCCESS!`)) - Ω(output).Should(ContainSubstring(`[2] SUCCESS!`)) - Ω(output).Should(ContainSubstring("Test Suite Passed")) - }) - }) - - Context("when running recursively", func() { - BeforeEach(func() { - passingTest := tmpPath("A") - otherPassingTest := tmpPath("E") - copyIn("passing_ginkgo_tests", passingTest) - copyIn("more_ginkgo_tests", otherPassingTest) - }) - - Context("when all the tests pass", func() { - It("should run all the tests (in succinct mode) and succeed", func() { - session := startGinkgo(tmpDir, "--noColor", "-r") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - outputLines := strings.Split(output, "\n") - Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 3/3 specs ••• SUCCESS! [\d.mus]+ PASS`)) - Ω(outputLines[1]).Should(MatchRegexp(`\[\d+\] More_ginkgo_tests Suite - 2/2 specs •• SUCCESS! [\d.mus]+ PASS`)) - Ω(output).Should(ContainSubstring("Test Suite Passed")) - }) - }) - - Context("when one of the packages has a failing tests", func() { - BeforeEach(func() { - failingTest := tmpPath("C") - copyIn("failing_ginkgo_tests", failingTest) - }) - - It("should fail and stop running tests", func() { - session := startGinkgo(tmpDir, "--noColor", "-r") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - outputLines := strings.Split(output, "\n") - Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 3/3 specs ••• SUCCESS! [\d.mus]+ PASS`)) - Ω(outputLines[1]).Should(MatchRegexp(`\[\d+\] Failing_ginkgo_tests Suite - 2/2 specs`)) - Ω(output).Should(ContainSubstring("• Failure")) - Ω(output).ShouldNot(ContainSubstring("More_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("Test Suite Failed")) - - Ω(output).Should(ContainSubstring("Summarizing 1 Failure:")) - Ω(output).Should(ContainSubstring("[Fail] FailingGinkgoTests [It] should fail")) - }) - }) - - Context("when one of the packages fails to compile", func() { - BeforeEach(func() { - doesNotCompileTest := tmpPath("C") - copyIn("does_not_compile", doesNotCompileTest) - }) - - It("should fail and stop running tests", func() { - session := startGinkgo(tmpDir, "--noColor", "-r") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - outputLines := strings.Split(output, "\n") - Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 3/3 specs ••• SUCCESS! [\d.mus]+ PASS`)) - Ω(outputLines[1]).Should(ContainSubstring("Failed to compile C:")) - Ω(output).ShouldNot(ContainSubstring("More_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("Test Suite Failed")) - }) - }) - - Context("when either is the case, but the keepGoing flag is set", func() { - BeforeEach(func() { - doesNotCompileTest := tmpPath("B") - copyIn("does_not_compile", doesNotCompileTest) - - failingTest := tmpPath("C") - copyIn("failing_ginkgo_tests", failingTest) - }) - - It("should soldier on", func() { - session := startGinkgo(tmpDir, "--noColor", "-r", "-keepGoing") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - outputLines := strings.Split(output, "\n") - Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 3/3 specs ••• SUCCESS! [\d.mus]+ PASS`)) - Ω(outputLines[1]).Should(ContainSubstring("Failed to compile B:")) - Ω(output).Should(MatchRegexp(`\[\d+\] Failing_ginkgo_tests Suite - 2/2 specs`)) - Ω(output).Should(ContainSubstring("• Failure")) - Ω(output).Should(MatchRegexp(`\[\d+\] More_ginkgo_tests Suite - 2/2 specs •• SUCCESS! [\d.mus]+ PASS`)) - Ω(output).Should(ContainSubstring("Test Suite Failed")) - }) - }) - }) - - Context("when told to keep going --untilItFails", func() { - BeforeEach(func() { - copyIn("eventually_failing", tmpDir) - }) - - It("should keep rerunning the tests, until a failure occurs", func() { - session := startGinkgo(tmpDir, "--untilItFails", "--noColor") - Eventually(session).Should(gexec.Exit(1)) - Ω(session).Should(gbytes.Say("This was attempt #1")) - Ω(session).Should(gbytes.Say("This was attempt #2")) - Ω(session).Should(gbytes.Say("Tests failed on attempt #3")) - - //it should change the random seed between each test - lines := strings.Split(string(session.Out.Contents()), "\n") - randomSeeds := []string{} - for _, line := range lines { - if strings.Contains(line, "Random Seed:") { - randomSeeds = append(randomSeeds, strings.Split(line, ": ")[1]) - } - } - Ω(randomSeeds[0]).ShouldNot(Equal(randomSeeds[1])) - Ω(randomSeeds[1]).ShouldNot(Equal(randomSeeds[2])) - Ω(randomSeeds[0]).ShouldNot(Equal(randomSeeds[2])) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/subcommand_test.go b/kit/github.com/onsi/ginkgo/integration/subcommand_test.go deleted file mode 100644 index 99cd581..0000000 --- a/kit/github.com/onsi/ginkgo/integration/subcommand_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package integration_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" - "io/ioutil" - "os" - "path/filepath" - "strings" -) - -var _ = Describe("Subcommand", func() { - Describe("ginkgo bootstrap", func() { - It("should generate a bootstrap file, as long as one does not exist", func() { - pkgPath := tmpPath("foo") - os.Mkdir(pkgPath, 0777) - session := startGinkgo(pkgPath, "bootstrap") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("foo_suite_test.go")) - - content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(content).Should(ContainSubstring("func TestFoo(t *testing.T) {")) - Ω(content).Should(ContainSubstring("RegisterFailHandler")) - Ω(content).Should(ContainSubstring("RunSpecs")) - - Ω(content).Should(ContainSubstring("\t" + `. "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo"`)) - Ω(content).Should(ContainSubstring("\t" + `. "github.com/gocircuit/escher/kit/github.com/onsi/gomega"`)) - - session = startGinkgo(pkgPath, "bootstrap") - Eventually(session).Should(gexec.Exit(1)) - output = session.Out.Contents() - Ω(output).Should(ContainSubstring("foo_suite_test.go already exists")) - }) - - It("should import nodot declarations when told to", func() { - pkgPath := tmpPath("foo") - os.Mkdir(pkgPath, 0777) - session := startGinkgo(pkgPath, "bootstrap", "--nodot") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("foo_suite_test.go")) - - content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(content).Should(ContainSubstring("func TestFoo(t *testing.T) {")) - Ω(content).Should(ContainSubstring("RegisterFailHandler")) - Ω(content).Should(ContainSubstring("RunSpecs")) - - Ω(content).Should(ContainSubstring("var It = ginkgo.It")) - Ω(content).Should(ContainSubstring("var Ω = gomega.Ω")) - - Ω(content).Should(ContainSubstring("\t" + `"github.com/gocircuit/escher/kit/github.com/onsi/ginkgo"`)) - Ω(content).Should(ContainSubstring("\t" + `"github.com/gocircuit/escher/kit/github.com/onsi/gomega"`)) - }) - }) - - Describe("nodot", func() { - It("should update the declarations in the bootstrap file", func() { - pkgPath := tmpPath("foo") - os.Mkdir(pkgPath, 0777) - - session := startGinkgo(pkgPath, "bootstrap", "--nodot") - Eventually(session).Should(gexec.Exit(0)) - - byteContent, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - - content := string(byteContent) - content = strings.Replace(content, "var It =", "var MyIt =", -1) - content = strings.Replace(content, "var Ω = gomega.Ω\n", "", -1) - - err = ioutil.WriteFile(filepath.Join(pkgPath, "foo_suite_test.go"), []byte(content), os.ModePerm) - Ω(err).ShouldNot(HaveOccurred()) - - session = startGinkgo(pkgPath, "nodot") - Eventually(session).Should(gexec.Exit(0)) - - byteContent, err = ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(byteContent).Should(ContainSubstring("var MyIt = ginkgo.It")) - Ω(byteContent).ShouldNot(ContainSubstring("var It = ginkgo.It")) - Ω(byteContent).Should(ContainSubstring("var Ω = gomega.Ω")) - }) - }) - - Describe("ginkgo generate", func() { - var pkgPath string - - BeforeEach(func() { - pkgPath = tmpPath("foo_bar") - os.Mkdir(pkgPath, 0777) - }) - - Context("with no arguments", func() { - It("should generate a test file named after the package", func() { - session := startGinkgo(pkgPath, "generate") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("foo_bar_test.go")) - - content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_bar_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(content).Should(ContainSubstring(`var _ = Describe("FooBar", func() {`)) - Ω(content).Should(ContainSubstring("\t" + `. "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo"`)) - Ω(content).Should(ContainSubstring("\t" + `. "github.com/gocircuit/escher/kit/github.com/onsi/gomega"`)) - - session = startGinkgo(pkgPath, "generate") - Eventually(session).Should(gexec.Exit(1)) - output = session.Out.Contents() - - Ω(output).Should(ContainSubstring("foo_bar_test.go already exists")) - }) - }) - - Context("with an argument of the form: foo", func() { - It("should generate a test file named after the argument", func() { - session := startGinkgo(pkgPath, "generate", "baz_buzz") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("baz_buzz_test.go")) - - content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`)) - }) - }) - - Context("with an argument of the form: foo.go", func() { - It("should generate a test file named after the argument", func() { - session := startGinkgo(pkgPath, "generate", "baz_buzz.go") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("baz_buzz_test.go")) - - content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`)) - - }) - }) - - Context("with an argument of the form: foo_test", func() { - It("should generate a test file named after the argument", func() { - session := startGinkgo(pkgPath, "generate", "baz_buzz_test") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("baz_buzz_test.go")) - - content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`)) - }) - }) - - Context("with an argument of the form: foo_test.go", func() { - It("should generate a test file named after the argument", func() { - session := startGinkgo(pkgPath, "generate", "baz_buzz_test.go") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("baz_buzz_test.go")) - - content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`)) - }) - }) - - Context("with multiple arguments", func() { - It("should generate a test file named after the argument", func() { - session := startGinkgo(pkgPath, "generate", "baz", "buzz") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("baz_test.go")) - Ω(output).Should(ContainSubstring("buzz_test.go")) - - content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(content).Should(ContainSubstring(`var _ = Describe("Baz", func() {`)) - - content, err = ioutil.ReadFile(filepath.Join(pkgPath, "buzz_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(content).Should(ContainSubstring(`var _ = Describe("Buzz", func() {`)) - }) - }) - - Context("with nodot", func() { - It("should not import ginkgo or gomega", func() { - session := startGinkgo(pkgPath, "generate", "--nodot") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("foo_bar_test.go")) - - content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_bar_test.go")) - Ω(err).ShouldNot(HaveOccurred()) - Ω(content).ShouldNot(ContainSubstring("\t" + `. "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo"`)) - Ω(content).ShouldNot(ContainSubstring("\t" + `. "github.com/gocircuit/escher/kit/github.com/onsi/gomega"`)) - }) - }) - }) - - Describe("ginkgo blur", func() { - It("should unfocus tests", func() { - pathToTest := tmpPath("focused") - copyIn("focused_fixture", pathToTest) - - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("3 Passed")) - Ω(output).Should(ContainSubstring("3 Skipped")) - - session = startGinkgo(pathToTest, "blur") - Eventually(session).Should(gexec.Exit(0)) - - session = startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(0)) - output = session.Out.Contents() - Ω(output).Should(ContainSubstring("6 Passed")) - Ω(output).Should(ContainSubstring("0 Skipped")) - }) - }) - - Describe("ginkgo version", func() { - It("should print out the version info", func() { - session := startGinkgo("", "version") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(MatchRegexp(`Ginkgo Version \d+\.\d+\.\d+`)) - }) - }) - - Describe("ginkgo help", func() { - It("should print out usage information", func() { - session := startGinkgo("", "help") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Err.Contents()) - - Ω(output).Should(MatchRegexp(`Ginkgo Version \d+\.\d+\.\d+`)) - Ω(output).Should(ContainSubstring("ginkgo watch")) - Ω(output).Should(ContainSubstring("-succinct")) - Ω(output).Should(ContainSubstring("-nodes")) - Ω(output).Should(ContainSubstring("ginkgo generate")) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/suite_setup_test.go b/kit/github.com/onsi/ginkgo/integration/suite_setup_test.go deleted file mode 100644 index 11077a5..0000000 --- a/kit/github.com/onsi/ginkgo/integration/suite_setup_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package integration_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" - "strings" -) - -var _ = Describe("SuiteSetup", func() { - var pathToTest string - - Context("when the BeforeSuite and AfterSuite pass", func() { - BeforeEach(func() { - pathToTest = tmpPath("suite_setup") - copyIn("passing_suite_setup", pathToTest) - }) - - It("should run the BeforeSuite once, then run all the tests", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(1)) - Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(1)) - }) - - It("should run the BeforeSuite once per parallel node, then run all the tests", func() { - session := startGinkgo(pathToTest, "--noColor", "--nodes=2") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(2)) - Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(2)) - }) - }) - - Context("when the BeforeSuite fails", func() { - BeforeEach(func() { - pathToTest = tmpPath("suite_setup") - copyIn("failing_before_suite", pathToTest) - }) - - It("should run the BeforeSuite once, none of the tests, but it should run the AfterSuite", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(1)) - Ω(strings.Count(output, "Test Panicked")).Should(Equal(1)) - Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(1)) - Ω(output).ShouldNot(ContainSubstring("NEVER SEE THIS")) - }) - - It("should run the BeforeSuite once per parallel node, none of the tests, but it should run the AfterSuite for each node", func() { - session := startGinkgo(pathToTest, "--noColor", "--nodes=2") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(2)) - Ω(strings.Count(output, "Test Panicked")).Should(Equal(2)) - Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(2)) - Ω(output).ShouldNot(ContainSubstring("NEVER SEE THIS")) - }) - }) - - Context("when the AfterSuite fails", func() { - BeforeEach(func() { - pathToTest = tmpPath("suite_setup") - copyIn("failing_after_suite", pathToTest) - }) - - It("should run the BeforeSuite once, none of the tests, but it should run the AfterSuite", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(1)) - Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(1)) - Ω(strings.Count(output, "Test Panicked")).Should(Equal(1)) - Ω(strings.Count(output, "A TEST")).Should(Equal(2)) - }) - - It("should run the BeforeSuite once per parallel node, none of the tests, but it should run the AfterSuite for each node", func() { - session := startGinkgo(pathToTest, "--noColor", "--nodes=2") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - Ω(strings.Count(output, "BEFORE SUITE")).Should(Equal(2)) - Ω(strings.Count(output, "AFTER SUITE")).Should(Equal(2)) - Ω(strings.Count(output, "Test Panicked")).Should(Equal(2)) - Ω(strings.Count(output, "A TEST")).Should(Equal(2)) - }) - }) - - Context("With passing synchronized before and after suites", func() { - BeforeEach(func() { - pathToTest = tmpPath("suite_setup") - copyIn("synchronized_setup_tests", pathToTest) - }) - - Context("when run with one node", func() { - It("should do all the work on that one node", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("BEFORE_A_1\nBEFORE_B_1: DATA")) - Ω(output).Should(ContainSubstring("AFTER_A_1\nAFTER_B_1")) - }) - }) - - Context("when run across multiple nodes", func() { - It("should run the first BeforeSuite function (BEFORE_A) on node 1, the second (BEFORE_B) on all the nodes, the first AfterSuite (AFTER_A) on all the nodes, and then the second (AFTER_B) on Node 1 *after* everything else is finished", func() { - session := startGinkgo(pathToTest, "--noColor", "--nodes=3") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("BEFORE_A_1")) - Ω(output).Should(ContainSubstring("BEFORE_B_1: DATA")) - Ω(output).Should(ContainSubstring("BEFORE_B_2: DATA")) - Ω(output).Should(ContainSubstring("BEFORE_B_3: DATA")) - - Ω(output).ShouldNot(ContainSubstring("BEFORE_A_2")) - Ω(output).ShouldNot(ContainSubstring("BEFORE_A_3")) - - Ω(output).Should(ContainSubstring("AFTER_A_1")) - Ω(output).Should(ContainSubstring("AFTER_A_2")) - Ω(output).Should(ContainSubstring("AFTER_A_3")) - Ω(output).Should(ContainSubstring("AFTER_B_1")) - - Ω(output).ShouldNot(ContainSubstring("AFTER_B_2")) - Ω(output).ShouldNot(ContainSubstring("AFTER_B_3")) - }) - }) - - Context("when streaming across multiple nodes", func() { - It("should run the first BeforeSuite function (BEFORE_A) on node 1, the second (BEFORE_B) on all the nodes, the first AfterSuite (AFTER_A) on all the nodes, and then the second (AFTER_B) on Node 1 *after* everything else is finished", func() { - session := startGinkgo(pathToTest, "--noColor", "--nodes=3", "--stream") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("[1] BEFORE_A_1")) - Ω(output).Should(ContainSubstring("[1] BEFORE_B_1: DATA")) - Ω(output).Should(ContainSubstring("[2] BEFORE_B_2: DATA")) - Ω(output).Should(ContainSubstring("[3] BEFORE_B_3: DATA")) - - Ω(output).ShouldNot(ContainSubstring("BEFORE_A_2")) - Ω(output).ShouldNot(ContainSubstring("BEFORE_A_3")) - - Ω(output).Should(ContainSubstring("[1] AFTER_A_1")) - Ω(output).Should(ContainSubstring("[2] AFTER_A_2")) - Ω(output).Should(ContainSubstring("[3] AFTER_A_3")) - Ω(output).Should(ContainSubstring("[1] AFTER_B_1")) - - Ω(output).ShouldNot(ContainSubstring("AFTER_B_2")) - Ω(output).ShouldNot(ContainSubstring("AFTER_B_3")) - }) - }) - }) - - Context("With a failing synchronized before suite", func() { - BeforeEach(func() { - pathToTest = tmpPath("suite_setup") - copyIn("exiting_synchronized_setup_tests", pathToTest) - }) - - It("should fail and let the user know that node 1 disappeared prematurely", func() { - session := startGinkgo(pathToTest, "--noColor", "--nodes=3") - Eventually(session).Should(gexec.Exit(1)) - output := string(session.Out.Contents()) - - Ω(output).Should(ContainSubstring("Node 1 disappeared before completing BeforeSuite")) - Ω(output).Should(ContainSubstring("Ginkgo timed out waiting for all parallel nodes to end")) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/tags_test.go b/kit/github.com/onsi/ginkgo/integration/tags_test.go deleted file mode 100644 index 64d6e77..0000000 --- a/kit/github.com/onsi/ginkgo/integration/tags_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package integration_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" -) - -var _ = Describe("Tags", func() { - var pathToTest string - BeforeEach(func() { - pathToTest = tmpPath("tags") - copyIn("tags_tests", pathToTest) - }) - - It("should honor the passed in -tags flag", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(0)) - output := string(session.Out.Contents()) - Ω(output).Should(ContainSubstring("Ran 1 of 1 Specs")) - - session = startGinkgo(pathToTest, "--noColor", "-tags=complex_tests") - Eventually(session).Should(gexec.Exit(0)) - output = string(session.Out.Contents()) - Ω(output).Should(ContainSubstring("Ran 3 of 3 Specs")) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/verbose_and_succinct_test.go b/kit/github.com/onsi/ginkgo/integration/verbose_and_succinct_test.go deleted file mode 100644 index a71a777..0000000 --- a/kit/github.com/onsi/ginkgo/integration/verbose_and_succinct_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package integration_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" -) - -var _ = Describe("Verbose And Succinct Mode", func() { - var pathToTest string - var otherPathToTest string - - Context("when running one package", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - copyIn("passing_ginkgo_tests", pathToTest) - }) - - It("should default to non-succinct mode", func() { - session := startGinkgo(pathToTest, "--noColor") - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite")) - }) - }) - - Context("when running more than one package", func() { - BeforeEach(func() { - pathToTest = tmpPath("ginkgo") - copyIn("passing_ginkgo_tests", pathToTest) - otherPathToTest = tmpPath("more_ginkgo") - copyIn("more_ginkgo_tests", otherPathToTest) - }) - - Context("with no flags set", func() { - It("should default to succinct mode", func() { - session := startGinkgo(pathToTest, "--noColor", pathToTest, otherPathToTest) - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("] Passing_ginkgo_tests Suite - 3/3 specs ••• SUCCESS!")) - Ω(output).Should(ContainSubstring("] More_ginkgo_tests Suite - 2/2 specs •• SUCCESS!")) - }) - }) - - Context("with --succinct=false", func() { - It("should not be in succinct mode", func() { - session := startGinkgo(pathToTest, "--noColor", "--succinct=false", pathToTest, otherPathToTest) - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("Running Suite: More_ginkgo_tests Suite")) - }) - }) - - Context("with -v", func() { - It("should not be in succinct mode, but should be verbose", func() { - session := startGinkgo(pathToTest, "--noColor", "-v", pathToTest, otherPathToTest) - Eventually(session).Should(gexec.Exit(0)) - output := session.Out.Contents() - - Ω(output).Should(ContainSubstring("Running Suite: Passing_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("Running Suite: More_ginkgo_tests Suite")) - Ω(output).Should(ContainSubstring("should proxy strings")) - Ω(output).Should(ContainSubstring("should always pass")) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/integration/watch_test.go b/kit/github.com/onsi/ginkgo/integration/watch_test.go deleted file mode 100644 index 092982d..0000000 --- a/kit/github.com/onsi/ginkgo/integration/watch_test.go +++ /dev/null @@ -1,238 +0,0 @@ -package integration_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "time" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gbytes" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" -) - -var _ = Describe("Watch", func() { - var rootPath string - var pathA string - var pathB string - var pathC string - var session *gexec.Session - - BeforeEach(func() { - rootPath = tmpPath("root") - pathA = filepath.Join(rootPath, "src", "github.com", "onsi", "A") - pathB = filepath.Join(rootPath, "src", "github.com", "onsi", "B") - pathC = filepath.Join(rootPath, "src", "github.com", "onsi", "C") - - err := os.MkdirAll(pathA, 0700) - Ω(err).ShouldNot(HaveOccurred()) - - err = os.MkdirAll(pathB, 0700) - Ω(err).ShouldNot(HaveOccurred()) - - err = os.MkdirAll(pathC, 0700) - Ω(err).ShouldNot(HaveOccurred()) - - copyIn(filepath.Join("watch_fixtures", "A"), pathA) - copyIn(filepath.Join("watch_fixtures", "B"), pathB) - copyIn(filepath.Join("watch_fixtures", "C"), pathC) - }) - - startGinkgoWithGopath := func(args ...string) *gexec.Session { - cmd := ginkgoCommand(rootPath, args...) - cmd.Env = append([]string{"GOPATH=" + rootPath + ":" + os.Getenv("GOPATH")}, cmd.Env...) - session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) - Ω(err).ShouldNot(HaveOccurred()) - return session - } - - modifyFile := func(path string) { - time.Sleep(time.Second) - content, err := ioutil.ReadFile(path) - Ω(err).ShouldNot(HaveOccurred()) - content = append(content, []byte("//")...) - err = ioutil.WriteFile(path, content, 0666) - Ω(err).ShouldNot(HaveOccurred()) - } - - modifyCode := func(pkgToModify string) { - modifyFile(filepath.Join(rootPath, "src", "github.com", "onsi", pkgToModify, pkgToModify+".go")) - } - - modifyTest := func(pkgToModify string) { - modifyFile(filepath.Join(rootPath, "src", "github.com", "onsi", pkgToModify, pkgToModify+"_test.go")) - } - - AfterEach(func() { - if session != nil { - session.Kill().Wait() - } - }) - - It("should be set up correctly", func() { - session = startGinkgoWithGopath("-r") - Eventually(session).Should(gexec.Exit(0)) - Ω(session.Out.Contents()).Should(ContainSubstring("A Suite")) - Ω(session.Out.Contents()).Should(ContainSubstring("B Suite")) - Ω(session.Out.Contents()).Should(ContainSubstring("C Suite")) - Ω(session.Out.Contents()).Should(ContainSubstring("Ginkgo ran 3 suites")) - }) - - Context("when watching just one test suite", func() { - It("should immediately run, and should rerun when the test suite changes", func() { - session = startGinkgoWithGopath("watch", "-succinct", pathA) - Eventually(session).Should(gbytes.Say("A Suite")) - modifyCode("A") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("A Suite")) - session.Kill().Wait() - }) - }) - - Context("when watching several test suites", func() { - It("should not immediately run, but should rerun a test when its code changes", func() { - session = startGinkgoWithGopath("watch", "-succinct", "-r") - Eventually(session).Should(gbytes.Say("Identified 3 test suites")) - Consistently(session).ShouldNot(gbytes.Say("A Suite|B Suite|C Suite")) - modifyCode("A") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("A Suite")) - Consistently(session).ShouldNot(gbytes.Say("B Suite|C Suite")) - session.Kill().Wait() - }) - }) - - Describe("watching dependencies", func() { - Context("with a depth of 2", func() { - It("should watch down to that depth", func() { - session = startGinkgoWithGopath("watch", "-succinct", "-r", "-depth=2") - Eventually(session).Should(gbytes.Say("Identified 3 test suites")) - Eventually(session).Should(gbytes.Say(`A \[2 dependencies\]`)) - Eventually(session).Should(gbytes.Say(`B \[1 dependency\]`)) - Eventually(session).Should(gbytes.Say(`C \[0 dependencies\]`)) - - modifyCode("A") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("A Suite")) - Consistently(session).ShouldNot(gbytes.Say("B Suite|C Suite")) - - modifyCode("B") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("B Suite")) - Eventually(session).Should(gbytes.Say("A Suite")) - Consistently(session).ShouldNot(gbytes.Say("C Suite")) - - modifyCode("C") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("C Suite")) - Eventually(session).Should(gbytes.Say("B Suite")) - Eventually(session).Should(gbytes.Say("A Suite")) - }) - }) - - Context("with a depth of 1", func() { - It("should watch down to that depth", func() { - session = startGinkgoWithGopath("watch", "-succinct", "-r", "-depth=1") - Eventually(session).Should(gbytes.Say("Identified 3 test suites")) - Eventually(session).Should(gbytes.Say(`A \[1 dependency\]`)) - Eventually(session).Should(gbytes.Say(`B \[1 dependency\]`)) - Eventually(session).Should(gbytes.Say(`C \[0 dependencies\]`)) - - modifyCode("A") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("A Suite")) - Consistently(session).ShouldNot(gbytes.Say("B Suite|C Suite")) - - modifyCode("B") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("B Suite")) - Eventually(session).Should(gbytes.Say("A Suite")) - Consistently(session).ShouldNot(gbytes.Say("C Suite")) - - modifyCode("C") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("C Suite")) - Eventually(session).Should(gbytes.Say("B Suite")) - Consistently(session).ShouldNot(gbytes.Say("A Suite")) - }) - }) - - Context("with a depth of 0", func() { - It("should not watch any dependencies", func() { - session = startGinkgoWithGopath("watch", "-succinct", "-r", "-depth=0") - Eventually(session).Should(gbytes.Say("Identified 3 test suites")) - Eventually(session).Should(gbytes.Say(`A \[0 dependencies\]`)) - Eventually(session).Should(gbytes.Say(`B \[0 dependencies\]`)) - Eventually(session).Should(gbytes.Say(`C \[0 dependencies\]`)) - - modifyCode("A") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("A Suite")) - Consistently(session).ShouldNot(gbytes.Say("B Suite|C Suite")) - - modifyCode("B") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("B Suite")) - Consistently(session).ShouldNot(gbytes.Say("A Suite|C Suite")) - - modifyCode("C") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("C Suite")) - Consistently(session).ShouldNot(gbytes.Say("A Suite|B Suite")) - }) - }) - - It("should not trigger dependents when tests are changed", func() { - session = startGinkgoWithGopath("watch", "-succinct", "-r", "-depth=2") - Eventually(session).Should(gbytes.Say("Identified 3 test suites")) - Eventually(session).Should(gbytes.Say(`A \[2 dependencies\]`)) - Eventually(session).Should(gbytes.Say(`B \[1 dependency\]`)) - Eventually(session).Should(gbytes.Say(`C \[0 dependencies\]`)) - - modifyTest("A") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("A Suite")) - Consistently(session).ShouldNot(gbytes.Say("B Suite|C Suite")) - - modifyTest("B") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("B Suite")) - Consistently(session).ShouldNot(gbytes.Say("A Suite|C Suite")) - - modifyTest("C") - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("C Suite")) - Consistently(session).ShouldNot(gbytes.Say("A Suite|B Suite")) - }) - }) - - Describe("when new test suite is added", func() { - It("should start monitoring that test suite", func() { - session = startGinkgoWithGopath("watch", "-succinct", "-r") - - Eventually(session).Should(gbytes.Say("Watching 3 suites")) - - pathD := filepath.Join(rootPath, "src", "github.com", "onsi", "D") - - err := os.MkdirAll(pathD, 0700) - Ω(err).ShouldNot(HaveOccurred()) - - copyIn(filepath.Join("watch_fixtures", "D"), pathD) - - Eventually(session).Should(gbytes.Say("Detected 1 new suite")) - Eventually(session).Should(gbytes.Say(`D \[1 dependency\]`)) - Eventually(session).Should(gbytes.Say("D Suite")) - - modifyCode("D") - - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("D Suite")) - - modifyCode("C") - - Eventually(session).Should(gbytes.Say("Detected changes in")) - Eventually(session).Should(gbytes.Say("C Suite")) - Eventually(session).Should(gbytes.Say("D Suite")) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/kit/github.com/onsi/ginkgo/internal/codelocation/code_location.go deleted file mode 100644 index ec025db..0000000 --- a/kit/github.com/onsi/ginkgo/internal/codelocation/code_location.go +++ /dev/null @@ -1,32 +0,0 @@ -package codelocation - -import ( - "regexp" - "runtime" - "runtime/debug" - "strings" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -func New(skip int) types.CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - stackTrace := PruneStack(string(debug.Stack()), skip) - return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} -} - -func PruneStack(fullStackTrace string, skip int) string { - stack := strings.Split(fullStackTrace, "\n") - if len(stack) > 2*(skip+1) { - stack = stack[2*(skip+1):] - } - prunedStack := []string{} - re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) - for i := 0; i < len(stack)/2; i++ { - if !re.Match([]byte(stack[i*2])) { - prunedStack = append(prunedStack, stack[i*2]) - prunedStack = append(prunedStack, stack[i*2+1]) - } - } - return strings.Join(prunedStack, "\n") -} diff --git a/kit/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go b/kit/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go deleted file mode 100644 index 7df5102..0000000 --- a/kit/github.com/onsi/ginkgo/internal/codelocation/code_location_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package codelocation_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestCodelocation(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "CodeLocation Suite") -} diff --git a/kit/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go b/kit/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go deleted file mode 100644 index aa95446..0000000 --- a/kit/github.com/onsi/ginkgo/internal/codelocation/code_location_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package codelocation_test - -import ( - "runtime" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("CodeLocation", func() { - var ( - codeLocation types.CodeLocation - expectedFileName string - expectedLineNumber int - ) - - caller0 := func() { - codeLocation = codelocation.New(1) - } - - caller1 := func() { - _, expectedFileName, expectedLineNumber, _ = runtime.Caller(0) - expectedLineNumber += 2 - caller0() - } - - BeforeEach(func() { - caller1() - }) - - It("should use the passed in skip parameter to pick out the correct file & line number", func() { - Ω(codeLocation.FileName).Should(Equal(expectedFileName)) - Ω(codeLocation.LineNumber).Should(Equal(expectedLineNumber)) - }) - - Describe("stringer behavior", func() { - It("should stringify nicely", func() { - Ω(codeLocation.String()).Should(ContainSubstring("code_location_test.go:%d", expectedLineNumber)) - }) - }) - - //There's no better way than to test this private method as it - //goes out of its way to prune out ginkgo related code in the stack trace - Describe("PruneStack", func() { - It("should remove any references to ginkgo and pkg/testing and pkg/runtime", func() { - input := `/Skip/me -Skip: skip() -/Skip/me -Skip: skip() -/Users/whoever/gospace/src/github.com/onsi/ginkgo/whatever.go:10 (0x12314) -Something: Func() -/Users/whoever/gospace/src/github.com/onsi/ginkgo/whatever_else.go:10 (0x12314) -SomethingInternalToGinkgo: Func() -/usr/goroot/pkg/strings/oops.go:10 (0x12341) -Oops: BlowUp() -/Users/whoever/gospace/src/mycode/code.go:10 (0x12341) -MyCode: Func() -/Users/whoever/gospace/src/mycode/code_test.go:10 (0x12341) -MyCodeTest: Func() -/Users/whoever/gospace/src/mycode/code_suite_test.go:12 (0x37f08) -TestFoo: RunSpecs(t, "Foo Suite") -/usr/goroot/pkg/testing/testing.go:12 (0x37f08) -TestingT: Blah() -/usr/goroot/pkg/runtime/runtime.go:12 (0x37f08) -Something: Func() -` - prunedStack := codelocation.PruneStack(input, 1) - Ω(prunedStack).Should(Equal(`/usr/goroot/pkg/strings/oops.go:10 (0x12341) -Oops: BlowUp() -/Users/whoever/gospace/src/mycode/code.go:10 (0x12341) -MyCode: Func() -/Users/whoever/gospace/src/mycode/code_test.go:10 (0x12341) -MyCodeTest: Func() -/Users/whoever/gospace/src/mycode/code_suite_test.go:12 (0x37f08) -TestFoo: RunSpecs(t, "Foo Suite")`)) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/containernode/container_node.go b/kit/github.com/onsi/ginkgo/internal/containernode/container_node.go deleted file mode 100644 index bb161f2..0000000 --- a/kit/github.com/onsi/ginkgo/internal/containernode/container_node.go +++ /dev/null @@ -1,127 +0,0 @@ -package containernode - -import ( - "math/rand" - "sort" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -type subjectOrContainerNode struct { - containerNode *ContainerNode - subjectNode leafnodes.SubjectNode -} - -func (n subjectOrContainerNode) text() string { - if n.containerNode != nil { - return n.containerNode.Text() - } else { - return n.subjectNode.Text() - } -} - -type CollatedNodes struct { - Containers []*ContainerNode - Subject leafnodes.SubjectNode -} - -type ContainerNode struct { - text string - flag types.FlagType - codeLocation types.CodeLocation - - setupNodes []leafnodes.BasicNode - subjectAndContainerNodes []subjectOrContainerNode -} - -func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode { - return &ContainerNode{ - text: text, - flag: flag, - codeLocation: codeLocation, - } -} - -func (container *ContainerNode) Shuffle(r *rand.Rand) { - sort.Sort(container) - permutation := r.Perm(len(container.subjectAndContainerNodes)) - shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes)) - for i, j := range permutation { - shuffledNodes[i] = container.subjectAndContainerNodes[j] - } - container.subjectAndContainerNodes = shuffledNodes -} - -func (node *ContainerNode) Collate() []CollatedNodes { - return node.collate([]*ContainerNode{}) -} - -func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes { - collated := make([]CollatedNodes, 0) - - containers := make([]*ContainerNode, len(enclosingContainers)) - copy(containers, enclosingContainers) - containers = append(containers, node) - - for _, subjectOrContainer := range node.subjectAndContainerNodes { - if subjectOrContainer.containerNode != nil { - collated = append(collated, subjectOrContainer.containerNode.collate(containers)...) - } else { - collated = append(collated, CollatedNodes{ - Containers: containers, - Subject: subjectOrContainer.subjectNode, - }) - } - } - - return collated -} - -func (node *ContainerNode) PushContainerNode(container *ContainerNode) { - node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container}) -} - -func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) { - node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject}) -} - -func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) { - node.setupNodes = append(node.setupNodes, setupNode) -} - -func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode { - nodes := []leafnodes.BasicNode{} - for _, setupNode := range node.setupNodes { - if setupNode.Type() == nodeType { - nodes = append(nodes, setupNode) - } - } - return nodes -} - -func (node *ContainerNode) Text() string { - return node.text -} - -func (node *ContainerNode) CodeLocation() types.CodeLocation { - return node.codeLocation -} - -func (node *ContainerNode) Flag() types.FlagType { - return node.flag -} - -//sort.Interface - -func (node *ContainerNode) Len() int { - return len(node.subjectAndContainerNodes) -} - -func (node *ContainerNode) Less(i, j int) bool { - return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text() -} - -func (node *ContainerNode) Swap(i, j int) { - node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i] -} diff --git a/kit/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go b/kit/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go deleted file mode 100644 index 99c8c2d..0000000 --- a/kit/github.com/onsi/ginkgo/internal/containernode/container_node_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package containernode_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestContainernode(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Containernode Suite") -} diff --git a/kit/github.com/onsi/ginkgo/internal/containernode/container_node_test.go b/kit/github.com/onsi/ginkgo/internal/containernode/container_node_test.go deleted file mode 100644 index 91c9c32..0000000 --- a/kit/github.com/onsi/ginkgo/internal/containernode/container_node_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package containernode_test - -import ( - "math/rand" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/containernode" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -var _ = Describe("Container Node", func() { - var ( - codeLocation types.CodeLocation - container *ContainerNode - ) - - BeforeEach(func() { - codeLocation = codelocation.New(0) - container = New("description text", types.FlagTypeFocused, codeLocation) - }) - - Describe("creating a container node", func() { - It("can answer questions about itself", func() { - Ω(container.Text()).Should(Equal("description text")) - Ω(container.Flag()).Should(Equal(types.FlagTypeFocused)) - Ω(container.CodeLocation()).Should(Equal(codeLocation)) - }) - }) - - Describe("pushing setup nodes", func() { - It("can append setup nodes of various types and fetch them by type", func() { - befA := leafnodes.NewBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0) - befB := leafnodes.NewBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0) - aftA := leafnodes.NewAfterEachNode(func() {}, codelocation.New(0), 0, nil, 0) - aftB := leafnodes.NewAfterEachNode(func() {}, codelocation.New(0), 0, nil, 0) - jusBefA := leafnodes.NewJustBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0) - jusBefB := leafnodes.NewJustBeforeEachNode(func() {}, codelocation.New(0), 0, nil, 0) - - container.PushSetupNode(befA) - container.PushSetupNode(befB) - container.PushSetupNode(aftA) - container.PushSetupNode(aftB) - container.PushSetupNode(jusBefA) - container.PushSetupNode(jusBefB) - - subject := leafnodes.NewItNode("subject", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) - container.PushSubjectNode(subject) - - Ω(container.SetupNodesOfType(types.SpecComponentTypeBeforeEach)).Should(Equal([]leafnodes.BasicNode{befA, befB})) - Ω(container.SetupNodesOfType(types.SpecComponentTypeAfterEach)).Should(Equal([]leafnodes.BasicNode{aftA, aftB})) - Ω(container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach)).Should(Equal([]leafnodes.BasicNode{jusBefA, jusBefB})) - Ω(container.SetupNodesOfType(types.SpecComponentTypeIt)).Should(BeEmpty()) //subjects are not setup nodes - }) - }) - - Context("With appended containers and subject nodes", func() { - var ( - itA, itB, innerItA, innerItB leafnodes.SubjectNode - innerContainer *ContainerNode - ) - - BeforeEach(func() { - itA = leafnodes.NewItNode("Banana", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) - itB = leafnodes.NewItNode("Apple", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) - - innerItA = leafnodes.NewItNode("inner A", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) - innerItB = leafnodes.NewItNode("inner B", func() {}, types.FlagTypeNone, codelocation.New(0), 0, nil, 0) - - innerContainer = New("Orange", types.FlagTypeNone, codelocation.New(0)) - - container.PushSubjectNode(itA) - container.PushContainerNode(innerContainer) - innerContainer.PushSubjectNode(innerItA) - innerContainer.PushSubjectNode(innerItB) - container.PushSubjectNode(itB) - }) - - Describe("Collating", func() { - It("should return a collated set of containers and subject nodes in the correct order", func() { - collated := container.Collate() - Ω(collated).Should(HaveLen(4)) - - Ω(collated[0]).Should(Equal(CollatedNodes{ - Containers: []*ContainerNode{container}, - Subject: itA, - })) - - Ω(collated[1]).Should(Equal(CollatedNodes{ - Containers: []*ContainerNode{container, innerContainer}, - Subject: innerItA, - })) - - Ω(collated[2]).Should(Equal(CollatedNodes{ - Containers: []*ContainerNode{container, innerContainer}, - Subject: innerItB, - })) - - Ω(collated[3]).Should(Equal(CollatedNodes{ - Containers: []*ContainerNode{container}, - Subject: itB, - })) - }) - }) - - Describe("Shuffling", func() { - var unshuffledCollation []CollatedNodes - BeforeEach(func() { - unshuffledCollation = container.Collate() - - r := rand.New(rand.NewSource(17)) - container.Shuffle(r) - }) - - It("should sort, and then shuffle, the top level contents of the container", func() { - shuffledCollation := container.Collate() - Ω(shuffledCollation).Should(HaveLen(len(unshuffledCollation))) - Ω(shuffledCollation).ShouldNot(Equal(unshuffledCollation)) - - for _, entry := range unshuffledCollation { - Ω(shuffledCollation).Should(ContainElement(entry)) - } - - innerAIndex, innerBIndex := 0, 0 - for i, entry := range shuffledCollation { - if entry.Subject == innerItA { - innerAIndex = i - } else if entry.Subject == innerItB { - innerBIndex = i - } - } - - Ω(innerAIndex).Should(Equal(innerBIndex - 1)) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/failer/failer.go b/kit/github.com/onsi/ginkgo/internal/failer/failer.go deleted file mode 100644 index e2e3af6..0000000 --- a/kit/github.com/onsi/ginkgo/internal/failer/failer.go +++ /dev/null @@ -1,77 +0,0 @@ -package failer - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "sync" -) - -type Failer struct { - lock *sync.Mutex - failure types.SpecFailure - state types.SpecState -} - -func New() *Failer { - return &Failer{ - lock: &sync.Mutex{}, - state: types.SpecStatePassed, - } -} - -func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { - f.lock.Lock() - defer f.lock.Unlock() - - if f.state == types.SpecStatePassed { - f.state = types.SpecStatePanicked - f.failure = types.SpecFailure{ - Message: "Test Panicked", - Location: location, - ForwardedPanic: forwardedPanic, - } - } -} - -func (f *Failer) Timeout(location types.CodeLocation) { - f.lock.Lock() - defer f.lock.Unlock() - - if f.state == types.SpecStatePassed { - f.state = types.SpecStateTimedOut - f.failure = types.SpecFailure{ - Message: "Timed out", - Location: location, - } - } -} - -func (f *Failer) Fail(message string, location types.CodeLocation) { - f.lock.Lock() - defer f.lock.Unlock() - - if f.state == types.SpecStatePassed { - f.state = types.SpecStateFailed - f.failure = types.SpecFailure{ - Message: message, - Location: location, - } - } -} - -func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) { - f.lock.Lock() - defer f.lock.Unlock() - - failure := f.failure - outcome := f.state - if outcome != types.SpecStatePassed { - failure.ComponentType = componentType - failure.ComponentIndex = componentIndex - failure.ComponentCodeLocation = componentCodeLocation - } - - f.state = types.SpecStatePassed - f.failure = types.SpecFailure{} - - return failure, outcome -} diff --git a/kit/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go b/kit/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go deleted file mode 100644 index e131ac1..0000000 --- a/kit/github.com/onsi/ginkgo/internal/failer/failer_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package failer_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestFailer(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Failer Suite") -} diff --git a/kit/github.com/onsi/ginkgo/internal/failer/failer_test.go b/kit/github.com/onsi/ginkgo/internal/failer/failer_test.go deleted file mode 100644 index da45a96..0000000 --- a/kit/github.com/onsi/ginkgo/internal/failer/failer_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package failer_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -var _ = Describe("Failer", func() { - var ( - failer *Failer - codeLocationA types.CodeLocation - codeLocationB types.CodeLocation - ) - - BeforeEach(func() { - codeLocationA = codelocation.New(0) - codeLocationB = codelocation.New(0) - failer = New() - }) - - Context("with no failures", func() { - It("should return success when drained", func() { - failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) - Ω(failure).Should(BeZero()) - Ω(state).Should(Equal(types.SpecStatePassed)) - }) - }) - - Describe("Fail", func() { - It("should handle failures", func() { - failer.Fail("something failed", codeLocationA) - failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) - Ω(failure).Should(Equal(types.SpecFailure{ - Message: "something failed", - Location: codeLocationA, - ForwardedPanic: nil, - ComponentType: types.SpecComponentTypeIt, - ComponentIndex: 3, - ComponentCodeLocation: codeLocationB, - })) - Ω(state).Should(Equal(types.SpecStateFailed)) - }) - }) - - Describe("Panic", func() { - It("should handle panics", func() { - failer.Panic(codeLocationA, "some forwarded panic") - failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) - Ω(failure).Should(Equal(types.SpecFailure{ - Message: "Test Panicked", - Location: codeLocationA, - ForwardedPanic: "some forwarded panic", - ComponentType: types.SpecComponentTypeIt, - ComponentIndex: 3, - ComponentCodeLocation: codeLocationB, - })) - Ω(state).Should(Equal(types.SpecStatePanicked)) - }) - }) - - Describe("Timeout", func() { - It("should handle timeouts", func() { - failer.Timeout(codeLocationA) - failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) - Ω(failure).Should(Equal(types.SpecFailure{ - Message: "Timed out", - Location: codeLocationA, - ForwardedPanic: nil, - ComponentType: types.SpecComponentTypeIt, - ComponentIndex: 3, - ComponentCodeLocation: codeLocationB, - })) - Ω(state).Should(Equal(types.SpecStateTimedOut)) - }) - }) - - Context("when multiple failures are registered", func() { - BeforeEach(func() { - failer.Fail("something failed", codeLocationA) - failer.Fail("something else failed", codeLocationA) - }) - - It("should only report the first one when drained", func() { - failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) - - Ω(failure).Should(Equal(types.SpecFailure{ - Message: "something failed", - Location: codeLocationA, - ForwardedPanic: nil, - ComponentType: types.SpecComponentTypeIt, - ComponentIndex: 3, - ComponentCodeLocation: codeLocationB, - })) - Ω(state).Should(Equal(types.SpecStateFailed)) - }) - - It("should report subsequent failures after being drained", func() { - failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) - failer.Fail("yet another thing failed", codeLocationA) - - failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) - - Ω(failure).Should(Equal(types.SpecFailure{ - Message: "yet another thing failed", - Location: codeLocationA, - ForwardedPanic: nil, - ComponentType: types.SpecComponentTypeIt, - ComponentIndex: 3, - ComponentCodeLocation: codeLocationB, - })) - Ω(state).Should(Equal(types.SpecStateFailed)) - }) - - It("should report sucess on subsequent drains if no errors occur", func() { - failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) - failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB) - Ω(failure).Should(BeZero()) - Ω(state).Should(Equal(types.SpecStatePassed)) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go deleted file mode 100644 index 49bcc32..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go +++ /dev/null @@ -1,86 +0,0 @@ -package leafnodes - -import ( - "math" - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -type benchmarker struct { - measurements map[string]*types.SpecMeasurement - orderCounter int -} - -func newBenchmarker() *benchmarker { - return &benchmarker{ - measurements: make(map[string]*types.SpecMeasurement, 0), - } -} - -func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) { - t := time.Now() - body() - elapsedTime = time.Since(t) - - measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", info...) - measurement.Results = append(measurement.Results, elapsedTime.Seconds()) - - return -} - -func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) { - measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", info...) - measurement.Results = append(measurement.Results, value) -} - -func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement { - measurement, ok := b.measurements[name] - if !ok { - var computedInfo interface{} - computedInfo = nil - if len(info) > 0 { - computedInfo = info[0] - } - measurement = &types.SpecMeasurement{ - Name: name, - Info: computedInfo, - Order: b.orderCounter, - SmallestLabel: smallestLabel, - LargestLabel: largestLabel, - AverageLabel: averageLabel, - Units: units, - Results: make([]float64, 0), - } - b.measurements[name] = measurement - b.orderCounter++ - } - - return measurement -} - -func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement { - for _, measurement := range b.measurements { - measurement.Smallest = math.MaxFloat64 - measurement.Largest = -math.MaxFloat64 - sum := float64(0) - sumOfSquares := float64(0) - - for _, result := range measurement.Results { - if result > measurement.Largest { - measurement.Largest = result - } - if result < measurement.Smallest { - measurement.Smallest = result - } - sum += result - sumOfSquares += result * result - } - - n := float64(len(measurement.Results)) - measurement.Average = sum / n - measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n)) - } - - return b.measurements -} diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go deleted file mode 100644 index 377a5cd..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go +++ /dev/null @@ -1,19 +0,0 @@ -package leafnodes - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -type BasicNode interface { - Type() types.SpecComponentType - Run() (types.SpecState, types.SpecFailure) - CodeLocation() types.CodeLocation -} - -type SubjectNode interface { - BasicNode - - Text() string - Flag() types.FlagType - Samples() int -} diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/it_node.go deleted file mode 100644 index 3bb080b..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/it_node.go +++ /dev/null @@ -1,46 +0,0 @@ -package leafnodes - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "time" -) - -type ItNode struct { - runner *runner - - flag types.FlagType - text string -} - -func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode { - return &ItNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex), - flag: flag, - text: text, - } -} - -func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) { - return node.runner.run() -} - -func (node *ItNode) Type() types.SpecComponentType { - return types.SpecComponentTypeIt -} - -func (node *ItNode) Text() string { - return node.text -} - -func (node *ItNode) Flag() types.FlagType { - return node.flag -} - -func (node *ItNode) CodeLocation() types.CodeLocation { - return node.runner.codeLocation -} - -func (node *ItNode) Samples() int { - return 1 -} diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go deleted file mode 100644 index a565c1e..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/it_node_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package leafnodes_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -var _ = Describe("It Nodes", func() { - It("should report the correct type, text, flag, and code location", func() { - codeLocation := codelocation.New(0) - it := NewItNode("my it node", func() {}, types.FlagTypeFocused, codeLocation, 0, nil, 3) - Ω(it.Type()).Should(Equal(types.SpecComponentTypeIt)) - Ω(it.Flag()).Should(Equal(types.FlagTypeFocused)) - Ω(it.Text()).Should(Equal("my it node")) - Ω(it.CodeLocation()).Should(Equal(codeLocation)) - Ω(it.Samples()).Should(Equal(1)) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go deleted file mode 100644 index 84f2880..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/leaf_node_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package leafnodes_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestLeafNode(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "LeafNode Suite") -} diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go deleted file mode 100644 index 98b2976..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go +++ /dev/null @@ -1,61 +0,0 @@ -package leafnodes - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "reflect" -) - -type MeasureNode struct { - runner *runner - - text string - flag types.FlagType - samples int - benchmarker *benchmarker -} - -func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode { - benchmarker := newBenchmarker() - - wrappedBody := func() { - reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)}) - } - - return &MeasureNode{ - runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex), - - text: text, - flag: flag, - samples: samples, - benchmarker: benchmarker, - } -} - -func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) { - return node.runner.run() -} - -func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement { - return node.benchmarker.measurementsReport() -} - -func (node *MeasureNode) Type() types.SpecComponentType { - return types.SpecComponentTypeMeasure -} - -func (node *MeasureNode) Text() string { - return node.text -} - -func (node *MeasureNode) Flag() types.FlagType { - return node.flag -} - -func (node *MeasureNode) CodeLocation() types.CodeLocation { - return node.runner.codeLocation -} - -func (node *MeasureNode) Samples() int { - return node.samples -} diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go deleted file mode 100644 index d1c67ac..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/measure_node_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package leafnodes_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "time" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - Failer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -var _ = Describe("Measure Nodes", func() { - It("should report the correct type, text, flag, and code location", func() { - codeLocation := codelocation.New(0) - measure := NewMeasureNode("my measure node", func(b Benchmarker) {}, types.FlagTypeFocused, codeLocation, 10, nil, 3) - Ω(measure.Type()).Should(Equal(types.SpecComponentTypeMeasure)) - Ω(measure.Flag()).Should(Equal(types.FlagTypeFocused)) - Ω(measure.Text()).Should(Equal("my measure node")) - Ω(measure.CodeLocation()).Should(Equal(codeLocation)) - Ω(measure.Samples()).Should(Equal(10)) - }) - - Describe("benchmarking", func() { - var measure *MeasureNode - - Describe("Value", func() { - BeforeEach(func() { - measure = NewMeasureNode("the measurement", func(b Benchmarker) { - b.RecordValue("foo", 7, "info!") - b.RecordValue("foo", 2) - b.RecordValue("foo", 3) - b.RecordValue("bar", 0.3) - b.RecordValue("bar", 0.1) - b.RecordValue("bar", 0.5) - b.RecordValue("bar", 0.7) - }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3) - Ω(measure.Run()).Should(Equal(types.SpecStatePassed)) - }) - - It("records passed in values and reports on them", func() { - report := measure.MeasurementsReport() - Ω(report).Should(HaveLen(2)) - Ω(report["foo"].Name).Should(Equal("foo")) - Ω(report["foo"].Info).Should(Equal("info!")) - Ω(report["foo"].Order).Should(Equal(0)) - Ω(report["foo"].SmallestLabel).Should(Equal("Smallest")) - Ω(report["foo"].LargestLabel).Should(Equal(" Largest")) - Ω(report["foo"].AverageLabel).Should(Equal(" Average")) - Ω(report["foo"].Units).Should(Equal("")) - Ω(report["foo"].Results).Should(Equal([]float64{7, 2, 3})) - Ω(report["foo"].Smallest).Should(BeNumerically("==", 2)) - Ω(report["foo"].Largest).Should(BeNumerically("==", 7)) - Ω(report["foo"].Average).Should(BeNumerically("==", 4)) - Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 2.16, 0.01)) - - Ω(report["bar"].Name).Should(Equal("bar")) - Ω(report["bar"].Info).Should(BeNil()) - Ω(report["bar"].SmallestLabel).Should(Equal("Smallest")) - Ω(report["bar"].Order).Should(Equal(1)) - Ω(report["bar"].LargestLabel).Should(Equal(" Largest")) - Ω(report["bar"].AverageLabel).Should(Equal(" Average")) - Ω(report["bar"].Units).Should(Equal("")) - Ω(report["bar"].Results).Should(Equal([]float64{0.3, 0.1, 0.5, 0.7})) - Ω(report["bar"].Smallest).Should(BeNumerically("==", 0.1)) - Ω(report["bar"].Largest).Should(BeNumerically("==", 0.7)) - Ω(report["bar"].Average).Should(BeNumerically("==", 0.4)) - Ω(report["bar"].StdDeviation).Should(BeNumerically("~", 0.22, 0.01)) - }) - }) - - Describe("Time", func() { - BeforeEach(func() { - measure = NewMeasureNode("the measurement", func(b Benchmarker) { - b.Time("foo", func() { - time.Sleep(100 * time.Millisecond) - }, "info!") - b.Time("foo", func() { - time.Sleep(200 * time.Millisecond) - }) - b.Time("foo", func() { - time.Sleep(170 * time.Millisecond) - }) - }, types.FlagTypeFocused, codelocation.New(0), 1, Failer.New(), 3) - Ω(measure.Run()).Should(Equal(types.SpecStatePassed)) - }) - - It("records passed in values and reports on them", func() { - report := measure.MeasurementsReport() - Ω(report).Should(HaveLen(1)) - Ω(report["foo"].Name).Should(Equal("foo")) - Ω(report["foo"].Info).Should(Equal("info!")) - Ω(report["foo"].SmallestLabel).Should(Equal("Fastest Time")) - Ω(report["foo"].LargestLabel).Should(Equal("Slowest Time")) - Ω(report["foo"].AverageLabel).Should(Equal("Average Time")) - Ω(report["foo"].Units).Should(Equal("s")) - Ω(report["foo"].Results).Should(HaveLen(3)) - Ω(report["foo"].Results[0]).Should(BeNumerically("~", 0.1, 0.01)) - Ω(report["foo"].Results[1]).Should(BeNumerically("~", 0.2, 0.01)) - Ω(report["foo"].Results[2]).Should(BeNumerically("~", 0.17, 0.01)) - Ω(report["foo"].Smallest).Should(BeNumerically("~", 0.1, 0.01)) - Ω(report["foo"].Largest).Should(BeNumerically("~", 0.2, 0.01)) - Ω(report["foo"].Average).Should(BeNumerically("~", 0.16, 0.01)) - Ω(report["foo"].StdDeviation).Should(BeNumerically("~", 0.04, 0.01)) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/runner.go deleted file mode 100644 index 621046b..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/runner.go +++ /dev/null @@ -1,107 +0,0 @@ -package leafnodes - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "reflect" - "time" -) - -type runner struct { - isAsync bool - asyncFunc func(chan<- interface{}) - syncFunc func() - codeLocation types.CodeLocation - timeoutThreshold time.Duration - nodeType types.SpecComponentType - componentIndex int - failer *failer.Failer -} - -func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner { - bodyType := reflect.TypeOf(body) - if bodyType.Kind() != reflect.Func { - panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation)) - } - - runner := &runner{ - codeLocation: codeLocation, - timeoutThreshold: timeout, - failer: failer, - nodeType: nodeType, - componentIndex: componentIndex, - } - - switch bodyType.NumIn() { - case 0: - runner.syncFunc = body.(func()) - return runner - case 1: - if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) { - panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation)) - } - - wrappedBody := func(done chan<- interface{}) { - bodyValue := reflect.ValueOf(body) - bodyValue.Call([]reflect.Value{reflect.ValueOf(done)}) - } - - runner.isAsync = true - runner.asyncFunc = wrappedBody - return runner - } - - panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation)) -} - -func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) { - if r.isAsync { - return r.runAsync() - } else { - return r.runSync() - } -} - -func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) { - done := make(chan interface{}, 1) - - go func() { - defer func() { - if e := recover(); e != nil { - r.failer.Panic(codelocation.New(2), e) - select { - case <-done: - break - default: - close(done) - } - } - }() - - r.asyncFunc(done) - }() - - select { - case <-done: - case <-time.After(r.timeoutThreshold): - r.failer.Timeout(r.codeLocation) - } - - failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) - return -} -func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) { - defer func() { - if e := recover(); e != nil { - r.failer.Panic(codelocation.New(2), e) - } - - failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) - }() - - r.syncFunc() - - return -} diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go deleted file mode 100644 index 056ec1c..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go +++ /dev/null @@ -1,41 +0,0 @@ -package leafnodes - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "time" -) - -type SetupNode struct { - runner *runner -} - -func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) { - return node.runner.run() -} - -func (node *SetupNode) Type() types.SpecComponentType { - return node.runner.nodeType -} - -func (node *SetupNode) CodeLocation() types.CodeLocation { - return node.runner.codeLocation -} - -func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex), - } -} - -func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex), - } -} - -func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex), - } -} diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go deleted file mode 100644 index 4f51e7c..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package leafnodes_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" -) - -var _ = Describe("Setup Nodes", func() { - Describe("BeforeEachNodes", func() { - It("should report the correct type and code location", func() { - codeLocation := codelocation.New(0) - beforeEach := NewBeforeEachNode(func() {}, codeLocation, 0, nil, 3) - Ω(beforeEach.Type()).Should(Equal(types.SpecComponentTypeBeforeEach)) - Ω(beforeEach.CodeLocation()).Should(Equal(codeLocation)) - }) - }) - - Describe("AfterEachNodes", func() { - It("should report the correct type and code location", func() { - codeLocation := codelocation.New(0) - afterEach := NewAfterEachNode(func() {}, codeLocation, 0, nil, 3) - Ω(afterEach.Type()).Should(Equal(types.SpecComponentTypeAfterEach)) - Ω(afterEach.CodeLocation()).Should(Equal(codeLocation)) - }) - }) - - Describe("JustBeforeEachNodes", func() { - It("should report the correct type and code location", func() { - codeLocation := codelocation.New(0) - justBeforeEach := NewJustBeforeEachNode(func() {}, codeLocation, 0, nil, 3) - Ω(justBeforeEach.Type()).Should(Equal(types.SpecComponentTypeJustBeforeEach)) - Ω(justBeforeEach.CodeLocation()).Should(Equal(codeLocation)) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go deleted file mode 100644 index 02d0cc4..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go +++ /dev/null @@ -1,342 +0,0 @@ -package leafnodes_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "reflect" - "runtime" - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - Failer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -type runnable interface { - Run() (outcome types.SpecState, failure types.SpecFailure) - CodeLocation() types.CodeLocation -} - -func SynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) { - var ( - outcome types.SpecState - failure types.SpecFailure - - failer *Failer.Failer - - componentCodeLocation types.CodeLocation - innerCodeLocation types.CodeLocation - - didRun bool - ) - - BeforeEach(func() { - failer = Failer.New() - componentCodeLocation = codelocation.New(0) - innerCodeLocation = codelocation.New(0) - - didRun = false - }) - - Describe("synchronous functions", func() { - Context("when the function passes", func() { - BeforeEach(func() { - outcome, failure = build(func() { - didRun = true - }, 0, failer, componentCodeLocation).Run() - }) - - It("should have a succesful outcome", func() { - Ω(didRun).Should(BeTrue()) - - Ω(outcome).Should(Equal(types.SpecStatePassed)) - Ω(failure).Should(BeZero()) - }) - }) - - Context("when a failure occurs", func() { - BeforeEach(func() { - outcome, failure = build(func() { - didRun = true - failer.Fail("bam", innerCodeLocation) - panic("should not matter") - }, 0, failer, componentCodeLocation).Run() - }) - - It("should return the failure", func() { - Ω(didRun).Should(BeTrue()) - - Ω(outcome).Should(Equal(types.SpecStateFailed)) - Ω(failure).Should(Equal(types.SpecFailure{ - Message: "bam", - Location: innerCodeLocation, - ForwardedPanic: nil, - ComponentIndex: componentIndex, - ComponentType: componentType, - ComponentCodeLocation: componentCodeLocation, - })) - }) - }) - - Context("when a panic occurs", func() { - BeforeEach(func() { - outcome, failure = build(func() { - didRun = true - innerCodeLocation = codelocation.New(0) - panic("ack!") - }, 0, failer, componentCodeLocation).Run() - }) - - It("should return the panic", func() { - Ω(didRun).Should(BeTrue()) - - Ω(outcome).Should(Equal(types.SpecStatePanicked)) - innerCodeLocation.LineNumber++ - Ω(failure).Should(Equal(types.SpecFailure{ - Message: "Test Panicked", - Location: innerCodeLocation, - ForwardedPanic: "ack!", - ComponentIndex: componentIndex, - ComponentType: componentType, - ComponentCodeLocation: componentCodeLocation, - })) - }) - }) - }) -} - -func AsynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) { - var ( - outcome types.SpecState - failure types.SpecFailure - - failer *Failer.Failer - - componentCodeLocation types.CodeLocation - innerCodeLocation types.CodeLocation - - didRun bool - ) - - BeforeEach(func() { - failer = Failer.New() - componentCodeLocation = codelocation.New(0) - innerCodeLocation = codelocation.New(0) - - didRun = false - }) - - Describe("asynchronous functions", func() { - var timeoutDuration time.Duration - - BeforeEach(func() { - timeoutDuration = time.Duration(1 * float64(time.Second)) - }) - - Context("when running", func() { - It("should run the function as a goroutine, and block until it's done", func() { - initialNumberOfGoRoutines := runtime.NumGoroutine() - numberOfGoRoutines := 0 - - build(func(done Done) { - didRun = true - numberOfGoRoutines = runtime.NumGoroutine() - close(done) - }, timeoutDuration, failer, componentCodeLocation).Run() - - Ω(didRun).Should(BeTrue()) - Ω(numberOfGoRoutines).Should(BeNumerically(">=", initialNumberOfGoRoutines+1)) - }) - }) - - Context("when the function passes", func() { - BeforeEach(func() { - outcome, failure = build(func(done Done) { - didRun = true - close(done) - }, timeoutDuration, failer, componentCodeLocation).Run() - }) - - It("should have a succesful outcome", func() { - Ω(didRun).Should(BeTrue()) - Ω(outcome).Should(Equal(types.SpecStatePassed)) - Ω(failure).Should(BeZero()) - }) - }) - - Context("when the function fails", func() { - BeforeEach(func() { - outcome, failure = build(func(done Done) { - didRun = true - failer.Fail("bam", innerCodeLocation) - time.Sleep(20 * time.Millisecond) - panic("doesn't matter") - close(done) - }, 10*time.Millisecond, failer, componentCodeLocation).Run() - }) - - It("should return the failure", func() { - Ω(didRun).Should(BeTrue()) - - Ω(outcome).Should(Equal(types.SpecStateFailed)) - Ω(failure).Should(Equal(types.SpecFailure{ - Message: "bam", - Location: innerCodeLocation, - ForwardedPanic: nil, - ComponentIndex: componentIndex, - ComponentType: componentType, - ComponentCodeLocation: componentCodeLocation, - })) - }) - }) - - Context("when the function times out", func() { - var guard chan struct{} - - BeforeEach(func() { - guard = make(chan struct{}) - outcome, failure = build(func(done Done) { - didRun = true - time.Sleep(20 * time.Millisecond) - close(guard) - panic("doesn't matter") - close(done) - }, 10*time.Millisecond, failer, componentCodeLocation).Run() - }) - - It("should return the timeout", func() { - <-guard - Ω(didRun).Should(BeTrue()) - - Ω(outcome).Should(Equal(types.SpecStateTimedOut)) - Ω(failure).Should(Equal(types.SpecFailure{ - Message: "Timed out", - Location: componentCodeLocation, - ForwardedPanic: nil, - ComponentIndex: componentIndex, - ComponentType: componentType, - ComponentCodeLocation: componentCodeLocation, - })) - }) - }) - - Context("when the function panics", func() { - BeforeEach(func() { - outcome, failure = build(func(done Done) { - didRun = true - innerCodeLocation = codelocation.New(0) - panic("ack!") - }, 100*time.Millisecond, failer, componentCodeLocation).Run() - }) - - It("should return the panic", func() { - Ω(didRun).Should(BeTrue()) - - Ω(outcome).Should(Equal(types.SpecStatePanicked)) - innerCodeLocation.LineNumber++ - Ω(failure).Should(Equal(types.SpecFailure{ - Message: "Test Panicked", - Location: innerCodeLocation, - ForwardedPanic: "ack!", - ComponentIndex: componentIndex, - ComponentType: componentType, - ComponentCodeLocation: componentCodeLocation, - })) - }) - }) - }) -} - -func InvalidSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType) { - var ( - failer *Failer.Failer - componentCodeLocation types.CodeLocation - innerCodeLocation types.CodeLocation - ) - - BeforeEach(func() { - failer = Failer.New() - componentCodeLocation = codelocation.New(0) - innerCodeLocation = codelocation.New(0) - }) - - Describe("invalid functions", func() { - Context("when passed something that's not a function", func() { - It("should panic", func() { - Ω(func() { - build("not a function", 0, failer, componentCodeLocation) - }).Should(Panic()) - }) - }) - - Context("when the function takes the wrong kind of argument", func() { - It("should panic", func() { - Ω(func() { - build(func(oops string) {}, 0, failer, componentCodeLocation) - }).Should(Panic()) - }) - }) - - Context("when the function takes more than one argument", func() { - It("should panic", func() { - Ω(func() { - build(func(done Done, oops string) {}, 0, failer, componentCodeLocation) - }).Should(Panic()) - }) - }) - }) -} - -var _ = Describe("Shared RunnableNode behavior", func() { - Describe("It Nodes", func() { - build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable { - return NewItNode("", body, types.FlagTypeFocused, componentCodeLocation, timeout, failer, 3) - } - - SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3) - AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3) - InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeIt) - }) - - Describe("Measure Nodes", func() { - build := func(body interface{}, _ time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable { - return NewMeasureNode("", func(Benchmarker) { - reflect.ValueOf(body).Call([]reflect.Value{}) - }, types.FlagTypeFocused, componentCodeLocation, 10, failer, 3) - } - - SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeMeasure, 3) - }) - - Describe("BeforeEach Nodes", func() { - build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable { - return NewBeforeEachNode(body, componentCodeLocation, timeout, failer, 3) - } - - SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3) - AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3) - InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach) - }) - - Describe("AfterEach Nodes", func() { - build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable { - return NewAfterEachNode(body, componentCodeLocation, timeout, failer, 3) - } - - SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3) - AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3) - InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach) - }) - - Describe("JustBeforeEach Nodes", func() { - build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable { - return NewJustBeforeEachNode(body, componentCodeLocation, timeout, failer, 3) - } - - SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3) - AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3) - InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go deleted file mode 100644 index f558b5c..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go +++ /dev/null @@ -1,54 +0,0 @@ -package leafnodes - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "time" -) - -type SuiteNode interface { - Run(parallelNode int, parallelTotal int, syncHost string) bool - Passed() bool - Summary() *types.SetupSummary -} - -type simpleSuiteNode struct { - runner *runner - outcome types.SpecState - failure types.SpecFailure - runTime time.Duration -} - -func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { - t := time.Now() - node.outcome, node.failure = node.runner.run() - node.runTime = time.Since(t) - - return node.outcome == types.SpecStatePassed -} - -func (node *simpleSuiteNode) Passed() bool { - return node.outcome == types.SpecStatePassed -} - -func (node *simpleSuiteNode) Summary() *types.SetupSummary { - return &types.SetupSummary{ - ComponentType: node.runner.nodeType, - CodeLocation: node.runner.codeLocation, - State: node.outcome, - RunTime: node.runTime, - Failure: node.failure, - } -} - -func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - return &simpleSuiteNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0), - } -} - -func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - return &simpleSuiteNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), - } -} diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go deleted file mode 100644 index 0e7e51c..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package leafnodes_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - Failer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "time" -) - -var _ = Describe("SuiteNodes", func() { - Describe("BeforeSuite nodes", func() { - var befSuite SuiteNode - var failer *Failer.Failer - var codeLocation types.CodeLocation - var innerCodeLocation types.CodeLocation - var outcome bool - - BeforeEach(func() { - failer = Failer.New() - codeLocation = codelocation.New(0) - innerCodeLocation = codelocation.New(0) - }) - - Context("when the body passes", func() { - BeforeEach(func() { - befSuite = NewBeforeSuiteNode(func() { - time.Sleep(10 * time.Millisecond) - }, codeLocation, 0, failer) - outcome = befSuite.Run(0, 0, "") - }) - - It("should return true when run and report as passed", func() { - Ω(outcome).Should(BeTrue()) - Ω(befSuite.Passed()).Should(BeTrue()) - }) - - It("should have the correct summary", func() { - summary := befSuite.Summary() - Ω(summary.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) - Ω(summary.CodeLocation).Should(Equal(codeLocation)) - Ω(summary.State).Should(Equal(types.SpecStatePassed)) - Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond)) - Ω(summary.Failure).Should(BeZero()) - }) - }) - - Context("when the body fails", func() { - BeforeEach(func() { - befSuite = NewBeforeSuiteNode(func() { - failer.Fail("oops", innerCodeLocation) - }, codeLocation, 0, failer) - outcome = befSuite.Run(0, 0, "") - }) - - It("should return false when run and report as failed", func() { - Ω(outcome).Should(BeFalse()) - Ω(befSuite.Passed()).Should(BeFalse()) - }) - - It("should have the correct summary", func() { - summary := befSuite.Summary() - Ω(summary.State).Should(Equal(types.SpecStateFailed)) - Ω(summary.Failure.Message).Should(Equal("oops")) - Ω(summary.Failure.Location).Should(Equal(innerCodeLocation)) - Ω(summary.Failure.ForwardedPanic).Should(BeNil()) - Ω(summary.Failure.ComponentIndex).Should(Equal(0)) - Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) - Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) - }) - }) - - Context("when the body times out", func() { - BeforeEach(func() { - befSuite = NewBeforeSuiteNode(func(done Done) { - }, codeLocation, time.Millisecond, failer) - outcome = befSuite.Run(0, 0, "") - }) - - It("should return false when run and report as failed", func() { - Ω(outcome).Should(BeFalse()) - Ω(befSuite.Passed()).Should(BeFalse()) - }) - - It("should have the correct summary", func() { - summary := befSuite.Summary() - Ω(summary.State).Should(Equal(types.SpecStateTimedOut)) - Ω(summary.Failure.ForwardedPanic).Should(BeNil()) - Ω(summary.Failure.ComponentIndex).Should(Equal(0)) - Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) - Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) - }) - }) - - Context("when the body panics", func() { - BeforeEach(func() { - befSuite = NewBeforeSuiteNode(func() { - panic("bam") - }, codeLocation, 0, failer) - outcome = befSuite.Run(0, 0, "") - }) - - It("should return false when run and report as failed", func() { - Ω(outcome).Should(BeFalse()) - Ω(befSuite.Passed()).Should(BeFalse()) - }) - - It("should have the correct summary", func() { - summary := befSuite.Summary() - Ω(summary.State).Should(Equal(types.SpecStatePanicked)) - Ω(summary.Failure.ForwardedPanic).Should(Equal("bam")) - Ω(summary.Failure.ComponentIndex).Should(Equal(0)) - Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) - Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) - }) - }) - }) - - Describe("AfterSuite nodes", func() { - var aftSuite SuiteNode - var failer *Failer.Failer - var codeLocation types.CodeLocation - var innerCodeLocation types.CodeLocation - var outcome bool - - BeforeEach(func() { - failer = Failer.New() - codeLocation = codelocation.New(0) - innerCodeLocation = codelocation.New(0) - }) - - Context("when the body passes", func() { - BeforeEach(func() { - aftSuite = NewAfterSuiteNode(func() { - time.Sleep(10 * time.Millisecond) - }, codeLocation, 0, failer) - outcome = aftSuite.Run(0, 0, "") - }) - - It("should return true when run and report as passed", func() { - Ω(outcome).Should(BeTrue()) - Ω(aftSuite.Passed()).Should(BeTrue()) - }) - - It("should have the correct summary", func() { - summary := aftSuite.Summary() - Ω(summary.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite)) - Ω(summary.CodeLocation).Should(Equal(codeLocation)) - Ω(summary.State).Should(Equal(types.SpecStatePassed)) - Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond)) - Ω(summary.Failure).Should(BeZero()) - }) - }) - - Context("when the body fails", func() { - BeforeEach(func() { - aftSuite = NewAfterSuiteNode(func() { - failer.Fail("oops", innerCodeLocation) - }, codeLocation, 0, failer) - outcome = aftSuite.Run(0, 0, "") - }) - - It("should return false when run and report as failed", func() { - Ω(outcome).Should(BeFalse()) - Ω(aftSuite.Passed()).Should(BeFalse()) - }) - - It("should have the correct summary", func() { - summary := aftSuite.Summary() - Ω(summary.State).Should(Equal(types.SpecStateFailed)) - Ω(summary.Failure.Message).Should(Equal("oops")) - Ω(summary.Failure.Location).Should(Equal(innerCodeLocation)) - Ω(summary.Failure.ForwardedPanic).Should(BeNil()) - Ω(summary.Failure.ComponentIndex).Should(Equal(0)) - Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite)) - Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) - }) - }) - - Context("when the body times out", func() { - BeforeEach(func() { - aftSuite = NewAfterSuiteNode(func(done Done) { - }, codeLocation, time.Millisecond, failer) - outcome = aftSuite.Run(0, 0, "") - }) - - It("should return false when run and report as failed", func() { - Ω(outcome).Should(BeFalse()) - Ω(aftSuite.Passed()).Should(BeFalse()) - }) - - It("should have the correct summary", func() { - summary := aftSuite.Summary() - Ω(summary.State).Should(Equal(types.SpecStateTimedOut)) - Ω(summary.Failure.ForwardedPanic).Should(BeNil()) - Ω(summary.Failure.ComponentIndex).Should(Equal(0)) - Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite)) - Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) - }) - }) - - Context("when the body panics", func() { - BeforeEach(func() { - aftSuite = NewAfterSuiteNode(func() { - panic("bam") - }, codeLocation, 0, failer) - outcome = aftSuite.Run(0, 0, "") - }) - - It("should return false when run and report as failed", func() { - Ω(outcome).Should(BeFalse()) - Ω(aftSuite.Passed()).Should(BeFalse()) - }) - - It("should have the correct summary", func() { - summary := aftSuite.Summary() - Ω(summary.State).Should(Equal(types.SpecStatePanicked)) - Ω(summary.Failure.ForwardedPanic).Should(Equal("bam")) - Ω(summary.Failure.ComponentIndex).Should(Equal(0)) - Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeAfterSuite)) - Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go deleted file mode 100644 index 2086e9b..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go +++ /dev/null @@ -1,89 +0,0 @@ -package leafnodes - -import ( - "encoding/json" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "io/ioutil" - "net/http" - "time" -) - -type synchronizedAfterSuiteNode struct { - runnerA *runner - runnerB *runner - - outcome types.SpecState - failure types.SpecFailure - runTime time.Duration -} - -func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - return &synchronizedAfterSuiteNode{ - runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), - runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), - } -} - -func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { - node.outcome, node.failure = node.runnerA.run() - - if parallelNode == 1 { - if parallelTotal > 1 { - node.waitUntilOtherNodesAreDone(syncHost) - } - - outcome, failure := node.runnerB.run() - - if node.outcome == types.SpecStatePassed { - node.outcome, node.failure = outcome, failure - } - } - - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedAfterSuiteNode) Passed() bool { - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary { - return &types.SetupSummary{ - ComponentType: node.runnerA.nodeType, - CodeLocation: node.runnerA.codeLocation, - State: node.outcome, - RunTime: node.runTime, - Failure: node.failure, - } -} - -func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) { - for { - if node.canRun(syncHost) { - return - } - - time.Sleep(50 * time.Millisecond) - } -} - -func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool { - resp, err := http.Get(syncHost + "/RemoteAfterSuiteData") - if err != nil || resp.StatusCode != http.StatusOK { - return false - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return false - } - resp.Body.Close() - - afterSuiteData := types.RemoteAfterSuiteData{} - err = json.Unmarshal(body, &afterSuiteData) - if err != nil { - return false - } - - return afterSuiteData.CanRun -} diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go deleted file mode 100644 index c8ebb9a..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package leafnodes_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "sync" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/ghttp" - "net/http" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - Failer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "time" -) - -var _ = Describe("SynchronizedAfterSuiteNode", func() { - var failer *Failer.Failer - var node SuiteNode - var codeLocation types.CodeLocation - var innerCodeLocation types.CodeLocation - var outcome bool - var server *ghttp.Server - var things []string - var lock *sync.Mutex - - BeforeEach(func() { - things = []string{} - server = ghttp.NewServer() - codeLocation = codelocation.New(0) - innerCodeLocation = codelocation.New(0) - failer = Failer.New() - lock = &sync.Mutex{} - }) - - AfterEach(func() { - server.Close() - }) - - newNode := func(bodyA interface{}, bodyB interface{}) SuiteNode { - return NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, time.Millisecond, failer) - } - - ranThing := func(thing string) { - lock.Lock() - defer lock.Unlock() - things = append(things, thing) - } - - thingsThatRan := func() []string { - lock.Lock() - defer lock.Unlock() - return things - } - - Context("when not running in parallel", func() { - Context("when all is well", func() { - BeforeEach(func() { - node = newNode(func() { - ranThing("A") - }, func() { - ranThing("B") - }) - - outcome = node.Run(1, 1, server.URL()) - }) - - It("should run A, then B", func() { - Ω(thingsThatRan()).Should(Equal([]string{"A", "B"})) - }) - - It("should report success", func() { - Ω(outcome).Should(BeTrue()) - Ω(node.Passed()).Should(BeTrue()) - Ω(node.Summary().State).Should(Equal(types.SpecStatePassed)) - }) - }) - - Context("when A fails", func() { - BeforeEach(func() { - node = newNode(func() { - ranThing("A") - failer.Fail("bam", innerCodeLocation) - }, func() { - ranThing("B") - }) - - outcome = node.Run(1, 1, server.URL()) - }) - - It("should still run B", func() { - Ω(thingsThatRan()).Should(Equal([]string{"A", "B"})) - }) - - It("should report failure", func() { - Ω(outcome).Should(BeFalse()) - Ω(node.Passed()).Should(BeFalse()) - Ω(node.Summary().State).Should(Equal(types.SpecStateFailed)) - }) - }) - - Context("when B fails", func() { - BeforeEach(func() { - node = newNode(func() { - ranThing("A") - }, func() { - ranThing("B") - failer.Fail("bam", innerCodeLocation) - }) - - outcome = node.Run(1, 1, server.URL()) - }) - - It("should run all the things", func() { - Ω(thingsThatRan()).Should(Equal([]string{"A", "B"})) - }) - - It("should report failure", func() { - Ω(outcome).Should(BeFalse()) - Ω(node.Passed()).Should(BeFalse()) - Ω(node.Summary().State).Should(Equal(types.SpecStateFailed)) - }) - }) - }) - - Context("when running in parallel", func() { - Context("as the first node", func() { - BeforeEach(func() { - server.AppendHandlers(ghttp.CombineHandlers( - ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"), - func(writer http.ResponseWriter, request *http.Request) { - ranThing("Request1") - }, - ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{false}), - ), ghttp.CombineHandlers( - ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"), - func(writer http.ResponseWriter, request *http.Request) { - ranThing("Request2") - }, - ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{false}), - ), ghttp.CombineHandlers( - ghttp.VerifyRequest("GET", "/RemoteAfterSuiteData"), - func(writer http.ResponseWriter, request *http.Request) { - ranThing("Request3") - }, - ghttp.RespondWithJSONEncoded(200, types.RemoteAfterSuiteData{true}), - )) - - node = newNode(func() { - ranThing("A") - }, func() { - ranThing("B") - }) - - outcome = node.Run(1, 3, server.URL()) - }) - - It("should run A and, when the server says its time, run B", func() { - Ω(thingsThatRan()).Should(Equal([]string{"A", "Request1", "Request2", "Request3", "B"})) - }) - - It("should report success", func() { - Ω(outcome).Should(BeTrue()) - Ω(node.Passed()).Should(BeTrue()) - Ω(node.Summary().State).Should(Equal(types.SpecStatePassed)) - }) - }) - - Context("as any other node", func() { - BeforeEach(func() { - node = newNode(func() { - ranThing("A") - }, func() { - ranThing("B") - }) - - outcome = node.Run(2, 3, server.URL()) - }) - - It("should run A, and not run B", func() { - Ω(thingsThatRan()).Should(Equal([]string{"A"})) - }) - - It("should not talk to the server", func() { - Ω(server.ReceivedRequests()).Should(BeEmpty()) - }) - - It("should report success", func() { - Ω(outcome).Should(BeTrue()) - Ω(node.Passed()).Should(BeTrue()) - Ω(node.Summary().State).Should(Equal(types.SpecStatePassed)) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go deleted file mode 100644 index 8f99c76..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go +++ /dev/null @@ -1,182 +0,0 @@ -package leafnodes - -import ( - "bytes" - "encoding/json" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "io/ioutil" - "net/http" - "reflect" - "time" -) - -type synchronizedBeforeSuiteNode struct { - runnerA *runner - runnerB *runner - - data []byte - - outcome types.SpecState - failure types.SpecFailure - runTime time.Duration -} - -func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - node := &synchronizedBeforeSuiteNode{} - - node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) - node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) - - return node -} - -func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { - t := time.Now() - defer func() { - node.runTime = time.Since(t) - }() - - if parallelNode == 1 { - node.outcome, node.failure = node.runA(parallelTotal, syncHost) - } else { - node.outcome, node.failure = node.waitForA(syncHost) - } - - if node.outcome != types.SpecStatePassed { - return false - } - node.outcome, node.failure = node.runnerB.run() - - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) { - outcome, failure := node.runnerA.run() - - if parallelTotal > 1 { - state := types.RemoteBeforeSuiteStatePassed - if outcome != types.SpecStatePassed { - state = types.RemoteBeforeSuiteStateFailed - } - json := (types.RemoteBeforeSuiteData{ - Data: node.data, - State: state, - }).ToJSON() - http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json)) - } - - return outcome, failure -} - -func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) { - failure := func(message string) types.SpecFailure { - return types.SpecFailure{ - Message: message, - Location: node.runnerA.codeLocation, - ComponentType: node.runnerA.nodeType, - ComponentIndex: node.runnerA.componentIndex, - ComponentCodeLocation: node.runnerA.codeLocation, - } - } - for { - resp, err := http.Get(syncHost + "/BeforeSuiteState") - if err != nil || resp.StatusCode != http.StatusOK { - return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state") - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return types.SpecStateFailed, failure("Failed to read BeforeSuite state") - } - resp.Body.Close() - - beforeSuiteData := types.RemoteBeforeSuiteData{} - err = json.Unmarshal(body, &beforeSuiteData) - if err != nil { - return types.SpecStateFailed, failure("Failed to decode BeforeSuite state") - } - - switch beforeSuiteData.State { - case types.RemoteBeforeSuiteStatePassed: - node.data = beforeSuiteData.Data - return types.SpecStatePassed, types.SpecFailure{} - case types.RemoteBeforeSuiteStateFailed: - return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed") - case types.RemoteBeforeSuiteStateDisappeared: - return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite") - } - - time.Sleep(50 * time.Millisecond) - } - - return types.SpecStateFailed, failure("Shouldn't get here!") -} - -func (node *synchronizedBeforeSuiteNode) Passed() bool { - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary { - return &types.SetupSummary{ - ComponentType: node.runnerA.nodeType, - CodeLocation: node.runnerA.codeLocation, - State: node.outcome, - RunTime: node.runTime, - Failure: node.failure, - } -} - -func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} { - typeA := reflect.TypeOf(bodyA) - if typeA.Kind() != reflect.Func { - panic("SynchronizedBeforeSuite expects a function as its first argument") - } - - takesNothing := typeA.NumIn() == 0 - takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface - returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8 - - if !((takesNothing || takesADoneChannel) && returnsBytes) { - panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.") - } - - if takesADoneChannel { - return func(done chan<- interface{}) { - out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)}) - node.data = out[0].Interface().([]byte) - } - } - - return func() { - out := reflect.ValueOf(bodyA).Call([]reflect.Value{}) - node.data = out[0].Interface().([]byte) - } -} - -func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} { - typeB := reflect.TypeOf(bodyB) - if typeB.Kind() != reflect.Func { - panic("SynchronizedBeforeSuite expects a function as its second argument") - } - - returnsNothing := typeB.NumOut() == 0 - takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 - takesBytesAndDone := typeB.NumIn() == 2 && - typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 && - typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface - - if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) { - panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)") - } - - if takesBytesAndDone { - return func(done chan<- interface{}) { - reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)}) - } - } - - return func() { - reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)}) - } -} diff --git a/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go b/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go deleted file mode 100644 index 66f09d6..0000000 --- a/kit/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node_test.go +++ /dev/null @@ -1,445 +0,0 @@ -package leafnodes_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/ghttp" - "net/http" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - Failer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "time" -) - -var _ = Describe("SynchronizedBeforeSuiteNode", func() { - var failer *Failer.Failer - var node SuiteNode - var codeLocation types.CodeLocation - var innerCodeLocation types.CodeLocation - var outcome bool - var server *ghttp.Server - - BeforeEach(func() { - server = ghttp.NewServer() - codeLocation = codelocation.New(0) - innerCodeLocation = codelocation.New(0) - failer = Failer.New() - }) - - AfterEach(func() { - server.Close() - }) - - newNode := func(bodyA interface{}, bodyB interface{}) SuiteNode { - return NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, time.Millisecond, failer) - } - - Describe("when not running in parallel", func() { - Context("when all is well", func() { - var data []byte - BeforeEach(func() { - data = nil - - node = newNode(func() []byte { - return []byte("my data") - }, func(d []byte) { - data = d - }) - - outcome = node.Run(1, 1, server.URL()) - }) - - It("should run A, then B passing the output from A to B", func() { - Ω(data).Should(Equal([]byte("my data"))) - }) - - It("should report success", func() { - Ω(outcome).Should(BeTrue()) - Ω(node.Passed()).Should(BeTrue()) - Ω(node.Summary().State).Should(Equal(types.SpecStatePassed)) - }) - }) - - Context("when A fails", func() { - var ranB bool - BeforeEach(func() { - ranB = false - node = newNode(func() []byte { - failer.Fail("boom", innerCodeLocation) - return nil - }, func([]byte) { - ranB = true - }) - - outcome = node.Run(1, 1, server.URL()) - }) - - It("should not run B", func() { - Ω(ranB).Should(BeFalse()) - }) - - It("should report failure", func() { - Ω(outcome).Should(BeFalse()) - Ω(node.Passed()).Should(BeFalse()) - Ω(node.Summary().State).Should(Equal(types.SpecStateFailed)) - }) - }) - - Context("when B fails", func() { - BeforeEach(func() { - node = newNode(func() []byte { - return nil - }, func([]byte) { - failer.Fail("boom", innerCodeLocation) - }) - - outcome = node.Run(1, 1, server.URL()) - }) - - It("should report failure", func() { - Ω(outcome).Should(BeFalse()) - Ω(node.Passed()).Should(BeFalse()) - Ω(node.Summary().State).Should(Equal(types.SpecStateFailed)) - }) - }) - - Context("when A times out", func() { - var ranB bool - BeforeEach(func() { - ranB = false - node = newNode(func(Done) []byte { - time.Sleep(time.Second) - return nil - }, func([]byte) { - ranB = true - }) - - outcome = node.Run(1, 1, server.URL()) - }) - - It("should not run B", func() { - Ω(ranB).Should(BeFalse()) - }) - - It("should report failure", func() { - Ω(outcome).Should(BeFalse()) - Ω(node.Passed()).Should(BeFalse()) - Ω(node.Summary().State).Should(Equal(types.SpecStateTimedOut)) - }) - }) - - Context("when B times out", func() { - BeforeEach(func() { - node = newNode(func() []byte { - return nil - }, func([]byte, Done) { - time.Sleep(time.Second) - }) - - outcome = node.Run(1, 1, server.URL()) - }) - - It("should report failure", func() { - Ω(outcome).Should(BeFalse()) - Ω(node.Passed()).Should(BeFalse()) - Ω(node.Summary().State).Should(Equal(types.SpecStateTimedOut)) - }) - }) - }) - - Describe("when running in parallel", func() { - var ranB bool - var parallelNode, parallelTotal int - BeforeEach(func() { - ranB = false - parallelNode, parallelTotal = 1, 3 - }) - - Context("as the first node, it runs A", func() { - var expectedState types.RemoteBeforeSuiteData - - BeforeEach(func() { - parallelNode, parallelTotal = 1, 3 - }) - - JustBeforeEach(func() { - server.AppendHandlers(ghttp.CombineHandlers( - ghttp.VerifyRequest("POST", "/BeforeSuiteState"), - ghttp.VerifyJSONRepresenting(expectedState), - )) - - outcome = node.Run(parallelNode, parallelTotal, server.URL()) - }) - - Context("when A succeeds", func() { - BeforeEach(func() { - expectedState = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStatePassed} - - node = newNode(func() []byte { - return []byte("my data") - }, func([]byte) { - ranB = true - }) - }) - - It("should post about A succeeding", func() { - Ω(server.ReceivedRequests()).Should(HaveLen(1)) - }) - - It("should run B", func() { - Ω(ranB).Should(BeTrue()) - }) - - It("should report success", func() { - Ω(outcome).Should(BeTrue()) - }) - }) - - Context("when A fails", func() { - BeforeEach(func() { - expectedState = types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStateFailed} - - node = newNode(func() []byte { - panic("BAM") - return []byte("my data") - }, func([]byte) { - ranB = true - }) - }) - - It("should post about A failing", func() { - Ω(server.ReceivedRequests()).Should(HaveLen(1)) - }) - - It("should not run B", func() { - Ω(ranB).Should(BeFalse()) - }) - - It("should report failure", func() { - Ω(outcome).Should(BeFalse()) - }) - }) - }) - - Context("as the Nth node", func() { - var statusCode int - var response interface{} - var ranA bool - var bData []byte - - BeforeEach(func() { - ranA = false - bData = nil - - statusCode = http.StatusOK - - server.AppendHandlers(ghttp.CombineHandlers( - ghttp.VerifyRequest("GET", "/BeforeSuiteState"), - ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}).ToJSON())), - ), ghttp.CombineHandlers( - ghttp.VerifyRequest("GET", "/BeforeSuiteState"), - ghttp.RespondWith(http.StatusOK, string((types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}).ToJSON())), - ), ghttp.CombineHandlers( - ghttp.VerifyRequest("GET", "/BeforeSuiteState"), - ghttp.RespondWithJSONEncodedPtr(&statusCode, &response), - )) - - node = newNode(func() []byte { - ranA = true - return nil - }, func(data []byte) { - bData = data - }) - - parallelNode, parallelTotal = 2, 3 - }) - - Context("when A on node1 succeeds", func() { - BeforeEach(func() { - response = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStatePassed} - outcome = node.Run(parallelNode, parallelTotal, server.URL()) - }) - - It("should not run A", func() { - Ω(ranA).Should(BeFalse()) - }) - - It("should poll for A", func() { - Ω(server.ReceivedRequests()).Should(HaveLen(3)) - }) - - It("should run B when the polling succeeds", func() { - Ω(bData).Should(Equal([]byte("my data"))) - }) - - It("should succeed", func() { - Ω(outcome).Should(BeTrue()) - Ω(node.Passed()).Should(BeTrue()) - }) - }) - - Context("when A on node1 fails", func() { - BeforeEach(func() { - response = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStateFailed} - outcome = node.Run(parallelNode, parallelTotal, server.URL()) - }) - - It("should not run A", func() { - Ω(ranA).Should(BeFalse()) - }) - - It("should poll for A", func() { - Ω(server.ReceivedRequests()).Should(HaveLen(3)) - }) - - It("should not run B", func() { - Ω(bData).Should(BeNil()) - }) - - It("should fail", func() { - Ω(outcome).Should(BeFalse()) - Ω(node.Passed()).Should(BeFalse()) - - summary := node.Summary() - Ω(summary.State).Should(Equal(types.SpecStateFailed)) - Ω(summary.Failure.Message).Should(Equal("BeforeSuite on Node 1 failed")) - Ω(summary.Failure.Location).Should(Equal(codeLocation)) - Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) - Ω(summary.Failure.ComponentIndex).Should(Equal(0)) - Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) - }) - }) - - Context("when node1 disappears", func() { - BeforeEach(func() { - response = types.RemoteBeforeSuiteData{[]byte("my data"), types.RemoteBeforeSuiteStateDisappeared} - outcome = node.Run(parallelNode, parallelTotal, server.URL()) - }) - - It("should not run A", func() { - Ω(ranA).Should(BeFalse()) - }) - - It("should poll for A", func() { - Ω(server.ReceivedRequests()).Should(HaveLen(3)) - }) - - It("should not run B", func() { - Ω(bData).Should(BeNil()) - }) - - It("should fail", func() { - Ω(outcome).Should(BeFalse()) - Ω(node.Passed()).Should(BeFalse()) - - summary := node.Summary() - Ω(summary.State).Should(Equal(types.SpecStateFailed)) - Ω(summary.Failure.Message).Should(Equal("Node 1 disappeared before completing BeforeSuite")) - Ω(summary.Failure.Location).Should(Equal(codeLocation)) - Ω(summary.Failure.ComponentType).Should(Equal(types.SpecComponentTypeBeforeSuite)) - Ω(summary.Failure.ComponentIndex).Should(Equal(0)) - Ω(summary.Failure.ComponentCodeLocation).Should(Equal(codeLocation)) - }) - }) - }) - }) - - Describe("construction", func() { - Describe("the first function", func() { - Context("when the first function returns a byte array", func() { - Context("and takes nothing", func() { - It("should be fine", func() { - Ω(func() { - newNode(func() []byte { return nil }, func([]byte) {}) - }).ShouldNot(Panic()) - }) - }) - - Context("and takes a done function", func() { - It("should be fine", func() { - Ω(func() { - newNode(func(Done) []byte { return nil }, func([]byte) {}) - }).ShouldNot(Panic()) - }) - }) - - Context("and takes more than one thing", func() { - It("should panic", func() { - Ω(func() { - newNode(func(Done, Done) []byte { return nil }, func([]byte) {}) - }).Should(Panic()) - }) - }) - - Context("and takes something else", func() { - It("should panic", func() { - Ω(func() { - newNode(func(bool) []byte { return nil }, func([]byte) {}) - }).Should(Panic()) - }) - }) - }) - - Context("when the first function does not return a byte array", func() { - It("should panic", func() { - Ω(func() { - newNode(func() {}, func([]byte) {}) - }).Should(Panic()) - - Ω(func() { - newNode(func() []int { return nil }, func([]byte) {}) - }).Should(Panic()) - }) - }) - }) - - Describe("the second function", func() { - Context("when the second function takes a byte array", func() { - It("should be fine", func() { - Ω(func() { - newNode(func() []byte { return nil }, func([]byte) {}) - }).ShouldNot(Panic()) - }) - }) - - Context("when it also takes a done channel", func() { - It("should be fine", func() { - Ω(func() { - newNode(func() []byte { return nil }, func([]byte, Done) {}) - }).ShouldNot(Panic()) - }) - }) - - Context("if it takes anything else", func() { - It("should panic", func() { - Ω(func() { - newNode(func() []byte { return nil }, func([]byte, chan bool) {}) - }).Should(Panic()) - - Ω(func() { - newNode(func() []byte { return nil }, func(string) {}) - }).Should(Panic()) - }) - }) - - Context("if it takes nothing at all", func() { - It("should panic", func() { - Ω(func() { - newNode(func() []byte { return nil }, func() {}) - }).Should(Panic()) - }) - }) - - Context("if it returns something", func() { - It("should panic", func() { - Ω(func() { - newNode(func() []byte { return nil }, func([]byte) []byte { return nil }) - }).Should(Panic()) - }) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/remote/aggregator.go b/kit/github.com/onsi/ginkgo/internal/remote/aggregator.go deleted file mode 100644 index 1630e35..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/aggregator.go +++ /dev/null @@ -1,250 +0,0 @@ -/* - -Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output -coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel: - - ginkgo -nodes=N - -where N is the number of nodes you desire. -*/ -package remote - -import ( - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters/stenographer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -type configAndSuite struct { - config config.GinkgoConfigType - summary *types.SuiteSummary -} - -type Aggregator struct { - nodeCount int - config config.DefaultReporterConfigType - stenographer stenographer.Stenographer - result chan bool - - suiteBeginnings chan configAndSuite - aggregatedSuiteBeginnings []configAndSuite - - beforeSuites chan *types.SetupSummary - aggregatedBeforeSuites []*types.SetupSummary - - afterSuites chan *types.SetupSummary - aggregatedAfterSuites []*types.SetupSummary - - specCompletions chan *types.SpecSummary - completedSpecs []*types.SpecSummary - - suiteEndings chan *types.SuiteSummary - aggregatedSuiteEndings []*types.SuiteSummary - specs []*types.SpecSummary - - startTime time.Time -} - -func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator { - aggregator := &Aggregator{ - nodeCount: nodeCount, - result: result, - config: config, - stenographer: stenographer, - - suiteBeginnings: make(chan configAndSuite, 0), - beforeSuites: make(chan *types.SetupSummary, 0), - afterSuites: make(chan *types.SetupSummary, 0), - specCompletions: make(chan *types.SpecSummary, 0), - suiteEndings: make(chan *types.SuiteSummary, 0), - } - - go aggregator.mux() - - return aggregator -} - -func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - aggregator.suiteBeginnings <- configAndSuite{config, summary} -} - -func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - aggregator.beforeSuites <- setupSummary -} - -func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - aggregator.afterSuites <- setupSummary -} - -func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) { - //noop -} - -func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) { - aggregator.specCompletions <- specSummary -} - -func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) { - aggregator.suiteEndings <- summary -} - -func (aggregator *Aggregator) mux() { -loop: - for { - select { - case configAndSuite := <-aggregator.suiteBeginnings: - aggregator.registerSuiteBeginning(configAndSuite) - case setupSummary := <-aggregator.beforeSuites: - aggregator.registerBeforeSuite(setupSummary) - case setupSummary := <-aggregator.afterSuites: - aggregator.registerAfterSuite(setupSummary) - case specSummary := <-aggregator.specCompletions: - aggregator.registerSpecCompletion(specSummary) - case suite := <-aggregator.suiteEndings: - finished, passed := aggregator.registerSuiteEnding(suite) - if finished { - aggregator.result <- passed - break loop - } - } - } -} - -func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) { - aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite) - - if len(aggregator.aggregatedSuiteBeginnings) == 1 { - aggregator.startTime = time.Now() - } - - if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { - return - } - - aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) - - numberOfSpecsToRun := 0 - totalNumberOfSpecs := 0 - for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings { - numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun - totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs - } - - aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct) - aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) { - aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) { - aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) { - aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary) - aggregator.specs = append(aggregator.specs, specSummary) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) flushCompletedSpecs() { - if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { - return - } - - for _, setupSummary := range aggregator.aggregatedBeforeSuites { - aggregator.announceBeforeSuite(setupSummary) - } - - for _, specSummary := range aggregator.completedSpecs { - aggregator.announceSpec(specSummary) - } - - for _, setupSummary := range aggregator.aggregatedAfterSuites { - aggregator.announceAfterSuite(setupSummary) - } - - aggregator.aggregatedBeforeSuites = []*types.SetupSummary{} - aggregator.completedSpecs = []*types.SpecSummary{} - aggregator.aggregatedAfterSuites = []*types.SetupSummary{} -} - -func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) { - aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) - if setupSummary.State != types.SpecStatePassed { - aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - } -} - -func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) { - aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) - if setupSummary.State != types.SpecStatePassed { - aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - } -} - -func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { - if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { - aggregator.stenographer.AnnounceSpecWillRun(specSummary) - } - - aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) - - switch specSummary.State { - case types.SpecStatePassed: - if specSummary.IsMeasurement { - aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct) - } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold { - aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct) - } else { - aggregator.stenographer.AnnounceSuccesfulSpec(specSummary) - } - - case types.SpecStatePending: - aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct) - case types.SpecStateSkipped: - aggregator.stenographer.AnnounceSkippedSpec(specSummary) - case types.SpecStateTimedOut: - aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - case types.SpecStatePanicked: - aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - case types.SpecStateFailed: - aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - } -} - -func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) { - aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite) - if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount { - return false, false - } - - aggregatedSuiteSummary := &types.SuiteSummary{} - aggregatedSuiteSummary.SuiteSucceeded = true - - for _, suiteSummary := range aggregator.aggregatedSuiteEndings { - if suiteSummary.SuiteSucceeded == false { - aggregatedSuiteSummary.SuiteSucceeded = false - } - - aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun - aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs - aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs - aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs - aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs - aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs - } - - aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) - - aggregator.stenographer.SummarizeFailures(aggregator.specs) - aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct) - - return true, aggregatedSuiteSummary.SuiteSucceeded -} diff --git a/kit/github.com/onsi/ginkgo/internal/remote/aggregator_test.go b/kit/github.com/onsi/ginkgo/internal/remote/aggregator_test.go deleted file mode 100644 index 11ba162..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/aggregator_test.go +++ /dev/null @@ -1,311 +0,0 @@ -package remote_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "time" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/remote" - st "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters/stenographer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -var _ = Describe("Aggregator", func() { - var ( - aggregator *Aggregator - reporterConfig config.DefaultReporterConfigType - stenographer *st.FakeStenographer - result chan bool - - ginkgoConfig1 config.GinkgoConfigType - ginkgoConfig2 config.GinkgoConfigType - - suiteSummary1 *types.SuiteSummary - suiteSummary2 *types.SuiteSummary - - beforeSummary *types.SetupSummary - afterSummary *types.SetupSummary - specSummary *types.SpecSummary - - suiteDescription string - ) - - BeforeEach(func() { - reporterConfig = config.DefaultReporterConfigType{ - NoColor: false, - SlowSpecThreshold: 0.1, - NoisyPendings: true, - Succinct: false, - Verbose: true, - } - stenographer = st.NewFakeStenographer() - result = make(chan bool, 1) - aggregator = NewAggregator(2, result, reporterConfig, stenographer) - - // - // now set up some fixture data - // - - ginkgoConfig1 = config.GinkgoConfigType{ - RandomSeed: 1138, - RandomizeAllSpecs: true, - ParallelNode: 1, - ParallelTotal: 2, - } - - ginkgoConfig2 = config.GinkgoConfigType{ - RandomSeed: 1138, - RandomizeAllSpecs: true, - ParallelNode: 2, - ParallelTotal: 2, - } - - suiteDescription = "My Parallel Suite" - - suiteSummary1 = &types.SuiteSummary{ - SuiteDescription: suiteDescription, - - NumberOfSpecsBeforeParallelization: 30, - NumberOfTotalSpecs: 17, - NumberOfSpecsThatWillBeRun: 15, - NumberOfPendingSpecs: 1, - NumberOfSkippedSpecs: 1, - } - - suiteSummary2 = &types.SuiteSummary{ - SuiteDescription: suiteDescription, - - NumberOfSpecsBeforeParallelization: 30, - NumberOfTotalSpecs: 13, - NumberOfSpecsThatWillBeRun: 8, - NumberOfPendingSpecs: 2, - NumberOfSkippedSpecs: 3, - } - - beforeSummary = &types.SetupSummary{ - State: types.SpecStatePassed, - CapturedOutput: "BeforeSuiteOutput", - } - - afterSummary = &types.SetupSummary{ - State: types.SpecStatePassed, - CapturedOutput: "AfterSuiteOutput", - } - - specSummary = &types.SpecSummary{ - State: types.SpecStatePassed, - CapturedOutput: "SpecOutput", - } - }) - - call := func(method string, args ...interface{}) st.FakeStenographerCall { - return st.NewFakeStenographerCall(method, args...) - } - - beginSuite := func() { - stenographer.Reset() - aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2) - aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1) - Eventually(func() interface{} { - return len(stenographer.Calls()) - }).Should(BeNumerically(">=", 3)) - } - - Describe("Announcing the beginning of the suite", func() { - Context("When one of the parallel-suites starts", func() { - BeforeEach(func() { - aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2) - }) - - It("should be silent", func() { - Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty()) - }) - }) - - Context("once all of the parallel-suites have started", func() { - BeforeEach(func() { - aggregator.SpecSuiteWillBegin(ginkgoConfig2, suiteSummary2) - aggregator.SpecSuiteWillBegin(ginkgoConfig1, suiteSummary1) - Eventually(func() interface{} { - return stenographer.Calls() - }).Should(HaveLen(3)) - }) - - It("should announce the beginning of the suite", func() { - Ω(stenographer.Calls()).Should(HaveLen(3)) - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", suiteDescription, ginkgoConfig1.RandomSeed, true, false))) - Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceNumberOfSpecs", 23, 30, false))) - Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceAggregatedParallelRun", 2, false))) - }) - }) - }) - - Describe("Announcing specs and before suites", func() { - Context("when the parallel-suites have not all started", func() { - BeforeEach(func() { - aggregator.BeforeSuiteDidRun(beforeSummary) - aggregator.AfterSuiteDidRun(afterSummary) - aggregator.SpecDidComplete(specSummary) - }) - - It("should not announce any specs", func() { - Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty()) - }) - - Context("when the parallel-suites subsequently start", func() { - BeforeEach(func() { - beginSuite() - }) - - It("should announce the specs, the before suites and the after suites", func() { - Eventually(func() interface{} { - return stenographer.Calls() - }).Should(ContainElement(call("AnnounceSuccesfulSpec", specSummary))) - - Ω(stenographer.Calls()).Should(ContainElement(call("AnnounceCapturedOutput", beforeSummary.CapturedOutput))) - Ω(stenographer.Calls()).Should(ContainElement(call("AnnounceCapturedOutput", afterSummary.CapturedOutput))) - }) - }) - }) - - Context("When the parallel-suites have all started", func() { - BeforeEach(func() { - beginSuite() - stenographer.Reset() - }) - - Context("When a spec completes", func() { - BeforeEach(func() { - aggregator.BeforeSuiteDidRun(beforeSummary) - aggregator.SpecDidComplete(specSummary) - aggregator.AfterSuiteDidRun(afterSummary) - Eventually(func() interface{} { - return stenographer.Calls() - }).Should(HaveLen(5)) - }) - - It("should announce the captured output of the BeforeSuite", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceCapturedOutput", beforeSummary.CapturedOutput))) - }) - - It("should announce that the spec will run (when in verbose mode)", func() { - Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceSpecWillRun", specSummary))) - }) - - It("should announce the captured stdout of the spec", func() { - Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceCapturedOutput", specSummary.CapturedOutput))) - }) - - It("should announce completion", func() { - Ω(stenographer.Calls()[3]).Should(Equal(call("AnnounceSuccesfulSpec", specSummary))) - }) - - It("should announce the captured output of the AfterSuite", func() { - Ω(stenographer.Calls()[4]).Should(Equal(call("AnnounceCapturedOutput", afterSummary.CapturedOutput))) - }) - }) - }) - }) - - Describe("Announcing the end of the suite", func() { - BeforeEach(func() { - beginSuite() - stenographer.Reset() - }) - - Context("When one of the parallel-suites ends", func() { - BeforeEach(func() { - aggregator.SpecSuiteDidEnd(suiteSummary2) - }) - - It("should be silent", func() { - Consistently(func() interface{} { return stenographer.Calls() }).Should(BeEmpty()) - }) - - It("should not notify the channel", func() { - Ω(result).Should(BeEmpty()) - }) - }) - - Context("once all of the parallel-suites end", func() { - BeforeEach(func() { - time.Sleep(200 * time.Millisecond) - - suiteSummary1.SuiteSucceeded = true - suiteSummary1.NumberOfPassedSpecs = 15 - suiteSummary1.NumberOfFailedSpecs = 0 - suiteSummary2.SuiteSucceeded = false - suiteSummary2.NumberOfPassedSpecs = 5 - suiteSummary2.NumberOfFailedSpecs = 3 - - aggregator.SpecSuiteDidEnd(suiteSummary2) - aggregator.SpecSuiteDidEnd(suiteSummary1) - Eventually(func() interface{} { - return stenographer.Calls() - }).Should(HaveLen(2)) - }) - - It("should announce the end of the suite", func() { - compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary) - - Ω(compositeSummary.SuiteSucceeded).Should(BeFalse()) - Ω(compositeSummary.NumberOfSpecsThatWillBeRun).Should(Equal(23)) - Ω(compositeSummary.NumberOfTotalSpecs).Should(Equal(30)) - Ω(compositeSummary.NumberOfPassedSpecs).Should(Equal(20)) - Ω(compositeSummary.NumberOfFailedSpecs).Should(Equal(3)) - Ω(compositeSummary.NumberOfPendingSpecs).Should(Equal(3)) - Ω(compositeSummary.NumberOfSkippedSpecs).Should(Equal(4)) - Ω(compositeSummary.RunTime.Seconds()).Should(BeNumerically(">", 0.2)) - }) - }) - - Context("when all the parallel-suites pass", func() { - BeforeEach(func() { - suiteSummary1.SuiteSucceeded = true - suiteSummary2.SuiteSucceeded = true - - aggregator.SpecSuiteDidEnd(suiteSummary2) - aggregator.SpecSuiteDidEnd(suiteSummary1) - Eventually(func() interface{} { - return stenographer.Calls() - }).Should(HaveLen(2)) - }) - - It("should report success", func() { - compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary) - - Ω(compositeSummary.SuiteSucceeded).Should(BeTrue()) - }) - - It("should notify the channel that it succeded", func(done Done) { - Ω(<-result).Should(BeTrue()) - close(done) - }) - }) - - Context("when one of the parallel-suites fails", func() { - BeforeEach(func() { - suiteSummary1.SuiteSucceeded = true - suiteSummary2.SuiteSucceeded = false - - aggregator.SpecSuiteDidEnd(suiteSummary2) - aggregator.SpecSuiteDidEnd(suiteSummary1) - Eventually(func() interface{} { - return stenographer.Calls() - }).Should(HaveLen(2)) - }) - - It("should report failure", func() { - compositeSummary := stenographer.Calls()[1].Args[0].(*types.SuiteSummary) - - Ω(compositeSummary.SuiteSucceeded).Should(BeFalse()) - }) - - It("should notify the channel that it failed", func(done Done) { - Ω(<-result).Should(BeFalse()) - close(done) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go b/kit/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go deleted file mode 100644 index a928f93..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/fake_output_interceptor_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package remote_test - -type fakeOutputInterceptor struct { - DidStartInterceptingOutput bool - DidStopInterceptingOutput bool - InterceptedOutput string -} - -func (interceptor *fakeOutputInterceptor) StartInterceptingOutput() error { - interceptor.DidStartInterceptingOutput = true - return nil -} - -func (interceptor *fakeOutputInterceptor) StopInterceptingAndReturnOutput() (string, error) { - interceptor.DidStopInterceptingOutput = true - return interceptor.InterceptedOutput, nil -} diff --git a/kit/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go b/kit/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go deleted file mode 100644 index 3543c59..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/fake_poster_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package remote_test - -import ( - "io" - "io/ioutil" - "net/http" -) - -type post struct { - url string - bodyType string - bodyContent []byte -} - -type fakePoster struct { - posts []post -} - -func newFakePoster() *fakePoster { - return &fakePoster{ - posts: make([]post, 0), - } -} - -func (poster *fakePoster) Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) { - bodyContent, _ := ioutil.ReadAll(body) - poster.posts = append(poster.posts, post{ - url: url, - bodyType: bodyType, - bodyContent: bodyContent, - }) - return nil, nil -} diff --git a/kit/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/kit/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go deleted file mode 100644 index 8ac7ad1..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go +++ /dev/null @@ -1,88 +0,0 @@ -package remote - -import ( - "bytes" - "encoding/json" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "io" - "net/http" -) - -//An interface to net/http's client to allow the injection of fakes under test -type Poster interface { - Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) -} - -/* -The ForwardingReporter is a Ginkgo reporter that forwards information to -a Ginkgo remote server. - -When streaming parallel test output, this repoter is automatically installed by Ginkgo. - -This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner -detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter -in place of Ginkgo's DefaultReporter. -*/ - -type ForwardingReporter struct { - serverHost string - poster Poster - outputInterceptor OutputInterceptor -} - -func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter { - return &ForwardingReporter{ - serverHost: serverHost, - poster: poster, - outputInterceptor: outputInterceptor, - } -} - -func (reporter *ForwardingReporter) post(path string, data interface{}) { - encoded, _ := json.Marshal(data) - buffer := bytes.NewBuffer(encoded) - reporter.poster.Post(reporter.serverHost+path, "application/json", buffer) -} - -func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) { - data := struct { - Config config.GinkgoConfigType `json:"config"` - Summary *types.SuiteSummary `json:"suite-summary"` - }{ - conf, - summary, - } - - reporter.outputInterceptor.StartInterceptingOutput() - reporter.post("/SpecSuiteWillBegin", data) -} - -func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() - reporter.outputInterceptor.StartInterceptingOutput() - setupSummary.CapturedOutput = output - reporter.post("/BeforeSuiteDidRun", setupSummary) -} - -func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) { - reporter.post("/SpecWillRun", specSummary) -} - -func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) { - output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() - reporter.outputInterceptor.StartInterceptingOutput() - specSummary.CapturedOutput = output - reporter.post("/SpecDidComplete", specSummary) -} - -func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() - reporter.outputInterceptor.StartInterceptingOutput() - setupSummary.CapturedOutput = output - reporter.post("/AfterSuiteDidRun", setupSummary) -} - -func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - reporter.post("/SpecSuiteDidEnd", summary) -} diff --git a/kit/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go b/kit/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go deleted file mode 100644 index f08e05a..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/forwarding_reporter_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package remote_test - -import ( - "encoding/json" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/remote" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("ForwardingReporter", func() { - var ( - reporter *ForwardingReporter - interceptor *fakeOutputInterceptor - poster *fakePoster - suiteSummary *types.SuiteSummary - specSummary *types.SpecSummary - setupSummary *types.SetupSummary - serverHost string - ) - - BeforeEach(func() { - serverHost = "http://127.0.0.1:7788" - - poster = newFakePoster() - - interceptor = &fakeOutputInterceptor{ - InterceptedOutput: "The intercepted output!", - } - - reporter = NewForwardingReporter(serverHost, poster, interceptor) - - suiteSummary = &types.SuiteSummary{ - SuiteDescription: "My Test Suite", - } - - setupSummary = &types.SetupSummary{ - State: types.SpecStatePassed, - } - - specSummary = &types.SpecSummary{ - ComponentTexts: []string{"My", "Spec"}, - State: types.SpecStatePassed, - } - }) - - Context("When a suite begins", func() { - BeforeEach(func() { - reporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary) - }) - - It("should start intercepting output", func() { - Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue()) - }) - - It("should POST the SuiteSummary and Ginkgo Config to the Ginkgo server", func() { - Ω(poster.posts).Should(HaveLen(1)) - Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecSuiteWillBegin")) - Ω(poster.posts[0].bodyType).Should(Equal("application/json")) - - var sentData struct { - SentConfig config.GinkgoConfigType `json:"config"` - SentSuiteSummary *types.SuiteSummary `json:"suite-summary"` - } - - err := json.Unmarshal(poster.posts[0].bodyContent, &sentData) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(sentData.SentConfig).Should(Equal(config.GinkgoConfig)) - Ω(sentData.SentSuiteSummary).Should(Equal(suiteSummary)) - }) - }) - - Context("when a BeforeSuite completes", func() { - BeforeEach(func() { - reporter.BeforeSuiteDidRun(setupSummary) - }) - - It("should stop, then start intercepting output", func() { - Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue()) - Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue()) - }) - - It("should POST the SetupSummary to the Ginkgo server", func() { - Ω(poster.posts).Should(HaveLen(1)) - Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/BeforeSuiteDidRun")) - Ω(poster.posts[0].bodyType).Should(Equal("application/json")) - - var summary *types.SetupSummary - err := json.Unmarshal(poster.posts[0].bodyContent, &summary) - Ω(err).ShouldNot(HaveOccurred()) - setupSummary.CapturedOutput = interceptor.InterceptedOutput - Ω(summary).Should(Equal(setupSummary)) - }) - }) - - Context("when an AfterSuite completes", func() { - BeforeEach(func() { - reporter.AfterSuiteDidRun(setupSummary) - }) - - It("should stop, then start intercepting output", func() { - Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue()) - Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue()) - }) - - It("should POST the SetupSummary to the Ginkgo server", func() { - Ω(poster.posts).Should(HaveLen(1)) - Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/AfterSuiteDidRun")) - Ω(poster.posts[0].bodyType).Should(Equal("application/json")) - - var summary *types.SetupSummary - err := json.Unmarshal(poster.posts[0].bodyContent, &summary) - Ω(err).ShouldNot(HaveOccurred()) - setupSummary.CapturedOutput = interceptor.InterceptedOutput - Ω(summary).Should(Equal(setupSummary)) - }) - }) - - Context("When a spec will run", func() { - BeforeEach(func() { - reporter.SpecWillRun(specSummary) - }) - - It("should POST the SpecSummary to the Ginkgo server", func() { - Ω(poster.posts).Should(HaveLen(1)) - Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecWillRun")) - Ω(poster.posts[0].bodyType).Should(Equal("application/json")) - - var summary *types.SpecSummary - err := json.Unmarshal(poster.posts[0].bodyContent, &summary) - Ω(err).ShouldNot(HaveOccurred()) - Ω(summary).Should(Equal(specSummary)) - }) - - Context("When a spec completes", func() { - BeforeEach(func() { - specSummary.State = types.SpecStatePanicked - reporter.SpecDidComplete(specSummary) - }) - - It("should POST the SpecSummary to the Ginkgo server and include any intercepted output", func() { - Ω(poster.posts).Should(HaveLen(2)) - Ω(poster.posts[1].url).Should(Equal("http://127.0.0.1:7788/SpecDidComplete")) - Ω(poster.posts[1].bodyType).Should(Equal("application/json")) - - var summary *types.SpecSummary - err := json.Unmarshal(poster.posts[1].bodyContent, &summary) - Ω(err).ShouldNot(HaveOccurred()) - specSummary.CapturedOutput = interceptor.InterceptedOutput - Ω(summary).Should(Equal(specSummary)) - }) - - It("should stop, then start intercepting output", func() { - Ω(interceptor.DidStopInterceptingOutput).Should(BeTrue()) - Ω(interceptor.DidStartInterceptingOutput).Should(BeTrue()) - }) - }) - }) - - Context("When a suite ends", func() { - BeforeEach(func() { - reporter.SpecSuiteDidEnd(suiteSummary) - }) - - It("should POST the SuiteSummary to the Ginkgo server", func() { - Ω(poster.posts).Should(HaveLen(1)) - Ω(poster.posts[0].url).Should(Equal("http://127.0.0.1:7788/SpecSuiteDidEnd")) - Ω(poster.posts[0].bodyType).Should(Equal("application/json")) - - var summary *types.SuiteSummary - - err := json.Unmarshal(poster.posts[0].bodyContent, &summary) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(summary).Should(Equal(suiteSummary)) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor.go deleted file mode 100644 index 093f451..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor.go +++ /dev/null @@ -1,10 +0,0 @@ -package remote - -/* -The OutputInterceptor is used by the ForwardingReporter to -intercept and capture all stdin and stderr output during a test run. -*/ -type OutputInterceptor interface { - StartInterceptingOutput() error - StopInterceptingAndReturnOutput() (string, error) -} diff --git a/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor_test.go b/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor_test.go deleted file mode 100644 index 53788ac..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package remote_test - -import ( - "fmt" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/remote" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "os" -) - -var _ = Describe("OutputInterceptor", func() { - var interceptor OutputInterceptor - - BeforeEach(func() { - interceptor = NewOutputInterceptor() - }) - - It("should capture all stdout/stderr output", func() { - err := interceptor.StartInterceptingOutput() - Ω(err).ShouldNot(HaveOccurred()) - - fmt.Fprint(os.Stdout, "STDOUT") - fmt.Fprint(os.Stderr, "STDERR") - print("PRINT") - - output, err := interceptor.StopInterceptingAndReturnOutput() - - Ω(output).Should(Equal("STDOUTSTDERRPRINT")) - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("should error if told to intercept output twice", func() { - err := interceptor.StartInterceptingOutput() - Ω(err).ShouldNot(HaveOccurred()) - - print("A") - - err = interceptor.StartInterceptingOutput() - Ω(err).Should(HaveOccurred()) - - print("B") - - output, err := interceptor.StopInterceptingAndReturnOutput() - - Ω(output).Should(Equal("AB")) - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("should allow multiple interception sessions", func() { - err := interceptor.StartInterceptingOutput() - Ω(err).ShouldNot(HaveOccurred()) - print("A") - output, err := interceptor.StopInterceptingAndReturnOutput() - Ω(output).Should(Equal("A")) - Ω(err).ShouldNot(HaveOccurred()) - - err = interceptor.StartInterceptingOutput() - Ω(err).ShouldNot(HaveOccurred()) - print("B") - output, err = interceptor.StopInterceptingAndReturnOutput() - Ω(output).Should(Equal("B")) - Ω(err).ShouldNot(HaveOccurred()) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go deleted file mode 100644 index ccda032..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build freebsd openbsd netbsd darwin linux - -package remote - -import ( - "errors" - "io/ioutil" - "os" - "syscall" -) - -func NewOutputInterceptor() OutputInterceptor { - return &outputInterceptor{} -} - -type outputInterceptor struct { - stdoutPlaceholder *os.File - stderrPlaceholder *os.File - redirectFile *os.File - intercepting bool -} - -func (interceptor *outputInterceptor) StartInterceptingOutput() error { - if interceptor.intercepting { - return errors.New("Already intercepting output!") - } - interceptor.intercepting = true - - var err error - - interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo") - if err != nil { - return err - } - - interceptor.stdoutPlaceholder, err = ioutil.TempFile("", "ginkgo") - if err != nil { - return err - } - - interceptor.stderrPlaceholder, err = ioutil.TempFile("", "ginkgo") - if err != nil { - return err - } - - syscall.Dup2(1, int(interceptor.stdoutPlaceholder.Fd())) - syscall.Dup2(2, int(interceptor.stderrPlaceholder.Fd())) - - syscall.Dup2(int(interceptor.redirectFile.Fd()), 1) - syscall.Dup2(int(interceptor.redirectFile.Fd()), 2) - - return nil -} - -func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { - if !interceptor.intercepting { - return "", errors.New("Not intercepting output!") - } - - syscall.Dup2(int(interceptor.stdoutPlaceholder.Fd()), 1) - syscall.Dup2(int(interceptor.stderrPlaceholder.Fd()), 2) - - for _, f := range []*os.File{interceptor.redirectFile, interceptor.stdoutPlaceholder, interceptor.stderrPlaceholder} { - f.Close() - } - - output, err := ioutil.ReadFile(interceptor.redirectFile.Name()) - - for _, f := range []*os.File{interceptor.redirectFile, interceptor.stdoutPlaceholder, interceptor.stderrPlaceholder} { - os.Remove(f.Name()) - } - - interceptor.intercepting = false - - return string(output), err -} diff --git a/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go deleted file mode 100644 index c8f97d9..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build windows - -package remote - -import ( - "errors" -) - -func NewOutputInterceptor() OutputInterceptor { - return &outputInterceptor{} -} - -type outputInterceptor struct { - intercepting bool -} - -func (interceptor *outputInterceptor) StartInterceptingOutput() error { - if interceptor.intercepting { - return errors.New("Already intercepting output!") - } - interceptor.intercepting = true - - // not working on windows... - - return nil -} - -func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { - // not working on windows... - interceptor.intercepting = false - - return "", nil -} diff --git a/kit/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go b/kit/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go deleted file mode 100644 index 0f04d79..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/remote_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package remote_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestRemote(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Remote Spec Forwarding Suite") -} diff --git a/kit/github.com/onsi/ginkgo/internal/remote/server.go b/kit/github.com/onsi/ginkgo/internal/remote/server.go deleted file mode 100644 index 05459a4..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/server.go +++ /dev/null @@ -1,204 +0,0 @@ -/* - -The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. -This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). - -*/ - -package remote - -import ( - "encoding/json" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "io/ioutil" - "net" - "net/http" - "sync" -) - -/* -Server spins up on an automatically selected port and listens for communication from the forwarding reporter. -It then forwards that communication to attached reporters. -*/ -type Server struct { - listener net.Listener - reporters []reporters.Reporter - alives []func() bool - lock *sync.Mutex - beforeSuiteData types.RemoteBeforeSuiteData - parallelTotal int -} - -//Create a new server, automatically selecting a port -func NewServer(parallelTotal int) (*Server, error) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return nil, err - } - return &Server{ - listener: listener, - lock: &sync.Mutex{}, - alives: make([]func() bool, parallelTotal), - beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending}, - parallelTotal: parallelTotal, - }, nil -} - -//Start the server. You don't need to `go s.Start()`, just `s.Start()` -func (server *Server) Start() { - httpServer := &http.Server{} - mux := http.NewServeMux() - httpServer.Handler = mux - - //streaming endpoints - mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin) - mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun) - mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun) - mux.HandleFunc("/SpecWillRun", server.specWillRun) - mux.HandleFunc("/SpecDidComplete", server.specDidComplete) - mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd) - - //synchronization endpoints - mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState) - mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData) - - go httpServer.Serve(server.listener) -} - -//Stop the server -func (server *Server) Close() { - server.listener.Close() -} - -//The address the server can be reached it. Pass this into the `ForwardingReporter`. -func (server *Server) Address() string { - return "http://" + server.listener.Addr().String() -} - -// -// Streaming Endpoints -// - -//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` -func (server *Server) readAll(request *http.Request) []byte { - defer request.Body.Close() - body, _ := ioutil.ReadAll(request.Body) - return body -} - -func (server *Server) RegisterReporters(reporters ...reporters.Reporter) { - server.reporters = reporters -} - -func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - - var data struct { - Config config.GinkgoConfigType `json:"config"` - Summary *types.SuiteSummary `json:"suite-summary"` - } - - json.Unmarshal(body, &data) - - for _, reporter := range server.reporters { - reporter.SpecSuiteWillBegin(data.Config, data.Summary) - } -} - -func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var setupSummary *types.SetupSummary - json.Unmarshal(body, &setupSummary) - - for _, reporter := range server.reporters { - reporter.BeforeSuiteDidRun(setupSummary) - } -} - -func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var setupSummary *types.SetupSummary - json.Unmarshal(body, &setupSummary) - - for _, reporter := range server.reporters { - reporter.AfterSuiteDidRun(setupSummary) - } -} - -func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var specSummary *types.SpecSummary - json.Unmarshal(body, &specSummary) - - for _, reporter := range server.reporters { - reporter.SpecWillRun(specSummary) - } -} - -func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var specSummary *types.SpecSummary - json.Unmarshal(body, &specSummary) - - for _, reporter := range server.reporters { - reporter.SpecDidComplete(specSummary) - } -} - -func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var suiteSummary *types.SuiteSummary - json.Unmarshal(body, &suiteSummary) - - for _, reporter := range server.reporters { - reporter.SpecSuiteDidEnd(suiteSummary) - } -} - -// -// Synchronization Endpoints -// - -func (server *Server) RegisterAlive(node int, alive func() bool) { - server.lock.Lock() - defer server.lock.Unlock() - server.alives[node-1] = alive -} - -func (server *Server) nodeIsAlive(node int) bool { - server.lock.Lock() - defer server.lock.Unlock() - alive := server.alives[node-1] - if alive == nil { - return true - } - return alive() -} - -func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { - if request.Method == "POST" { - dec := json.NewDecoder(request.Body) - dec.Decode(&(server.beforeSuiteData)) - } else { - beforeSuiteData := server.beforeSuiteData - if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) { - beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared - } - enc := json.NewEncoder(writer) - enc.Encode(beforeSuiteData) - } -} - -func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) { - afterSuiteData := types.RemoteAfterSuiteData{ - CanRun: true, - } - for i := 2; i <= server.parallelTotal; i++ { - afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i) - } - - enc := json.NewEncoder(writer) - enc.Encode(afterSuiteData) -} diff --git a/kit/github.com/onsi/ginkgo/internal/remote/server_test.go b/kit/github.com/onsi/ginkgo/internal/remote/server_test.go deleted file mode 100644 index fc95dac..0000000 --- a/kit/github.com/onsi/ginkgo/internal/remote/server_test.go +++ /dev/null @@ -1,269 +0,0 @@ -package remote_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/remote" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - - "bytes" - "encoding/json" - "net/http" -) - -var _ = Describe("Server", func() { - var ( - server *Server - ) - - BeforeEach(func() { - var err error - server, err = NewServer(3) - Ω(err).ShouldNot(HaveOccurred()) - - server.Start() - }) - - AfterEach(func() { - server.Close() - }) - - Describe("Streaming endpoints", func() { - var ( - reporterA, reporterB *reporters.FakeReporter - forwardingReporter *ForwardingReporter - - suiteSummary *types.SuiteSummary - setupSummary *types.SetupSummary - specSummary *types.SpecSummary - ) - - BeforeEach(func() { - reporterA = reporters.NewFakeReporter() - reporterB = reporters.NewFakeReporter() - - server.RegisterReporters(reporterA, reporterB) - - forwardingReporter = NewForwardingReporter(server.Address(), &http.Client{}, &fakeOutputInterceptor{}) - - suiteSummary = &types.SuiteSummary{ - SuiteDescription: "My Test Suite", - } - - setupSummary = &types.SetupSummary{ - State: types.SpecStatePassed, - } - - specSummary = &types.SpecSummary{ - ComponentTexts: []string{"My", "Spec"}, - State: types.SpecStatePassed, - } - }) - - It("should make its address available", func() { - Ω(server.Address()).Should(MatchRegexp(`http://127.0.0.1:\d{2,}`)) - }) - - Describe("/SpecSuiteWillBegin", func() { - It("should decode and forward the Ginkgo config and suite summary", func(done Done) { - forwardingReporter.SpecSuiteWillBegin(config.GinkgoConfig, suiteSummary) - Ω(reporterA.Config).Should(Equal(config.GinkgoConfig)) - Ω(reporterB.Config).Should(Equal(config.GinkgoConfig)) - Ω(reporterA.BeginSummary).Should(Equal(suiteSummary)) - Ω(reporterB.BeginSummary).Should(Equal(suiteSummary)) - close(done) - }) - }) - - Describe("/BeforeSuiteDidRun", func() { - It("should decode and forward the setup summary", func() { - forwardingReporter.BeforeSuiteDidRun(setupSummary) - Ω(reporterA.BeforeSuiteSummary).Should(Equal(setupSummary)) - Ω(reporterB.BeforeSuiteSummary).Should(Equal(setupSummary)) - }) - }) - - Describe("/AfterSuiteDidRun", func() { - It("should decode and forward the setup summary", func() { - forwardingReporter.AfterSuiteDidRun(setupSummary) - Ω(reporterA.AfterSuiteSummary).Should(Equal(setupSummary)) - Ω(reporterB.AfterSuiteSummary).Should(Equal(setupSummary)) - }) - }) - - Describe("/SpecWillRun", func() { - It("should decode and forward the spec summary", func(done Done) { - forwardingReporter.SpecWillRun(specSummary) - Ω(reporterA.SpecWillRunSummaries[0]).Should(Equal(specSummary)) - Ω(reporterB.SpecWillRunSummaries[0]).Should(Equal(specSummary)) - close(done) - }) - }) - - Describe("/SpecDidComplete", func() { - It("should decode and forward the spec summary", func(done Done) { - forwardingReporter.SpecDidComplete(specSummary) - Ω(reporterA.SpecSummaries[0]).Should(Equal(specSummary)) - Ω(reporterB.SpecSummaries[0]).Should(Equal(specSummary)) - close(done) - }) - }) - - Describe("/SpecSuiteDidEnd", func() { - It("should decode and forward the suite summary", func(done Done) { - forwardingReporter.SpecSuiteDidEnd(suiteSummary) - Ω(reporterA.EndSummary).Should(Equal(suiteSummary)) - Ω(reporterB.EndSummary).Should(Equal(suiteSummary)) - close(done) - }) - }) - }) - - Describe("Synchronization endpoints", func() { - Describe("GETting and POSTing BeforeSuiteState", func() { - getBeforeSuite := func() types.RemoteBeforeSuiteData { - resp, err := http.Get(server.Address() + "/BeforeSuiteState") - Ω(err).ShouldNot(HaveOccurred()) - Ω(resp.StatusCode).Should(Equal(http.StatusOK)) - - r := types.RemoteBeforeSuiteData{} - decoder := json.NewDecoder(resp.Body) - err = decoder.Decode(&r) - Ω(err).ShouldNot(HaveOccurred()) - - return r - } - - postBeforeSuite := func(r types.RemoteBeforeSuiteData) { - resp, err := http.Post(server.Address()+"/BeforeSuiteState", "application/json", bytes.NewReader(r.ToJSON())) - Ω(err).ShouldNot(HaveOccurred()) - Ω(resp.StatusCode).Should(Equal(http.StatusOK)) - } - - Context("when the first node's Alive has not been registered yet", func() { - It("should return pending", func() { - state := getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending})) - - state = getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending})) - }) - }) - - Context("when the first node is Alive but has not responded yet", func() { - BeforeEach(func() { - server.RegisterAlive(1, func() bool { - return true - }) - }) - - It("should return pending", func() { - state := getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending})) - - state = getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending})) - }) - }) - - Context("when the first node has responded", func() { - var state types.RemoteBeforeSuiteData - BeforeEach(func() { - server.RegisterAlive(1, func() bool { - return false - }) - - state = types.RemoteBeforeSuiteData{ - Data: []byte("my data"), - State: types.RemoteBeforeSuiteStatePassed, - } - postBeforeSuite(state) - }) - - It("should return the passed in state", func() { - returnedState := getBeforeSuite() - Ω(returnedState).Should(Equal(state)) - }) - }) - - Context("when the first node is no longer Alive and has not responded yet", func() { - BeforeEach(func() { - server.RegisterAlive(1, func() bool { - return false - }) - }) - - It("should return disappeared", func() { - state := getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStateDisappeared})) - - state = getBeforeSuite() - Ω(state).Should(Equal(types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStateDisappeared})) - }) - }) - }) - - Describe("GETting RemoteAfterSuiteData", func() { - getRemoteAfterSuiteData := func() bool { - resp, err := http.Get(server.Address() + "/RemoteAfterSuiteData") - Ω(err).ShouldNot(HaveOccurred()) - Ω(resp.StatusCode).Should(Equal(http.StatusOK)) - - a := types.RemoteAfterSuiteData{} - decoder := json.NewDecoder(resp.Body) - err = decoder.Decode(&a) - Ω(err).ShouldNot(HaveOccurred()) - - return a.CanRun - } - - Context("when there are unregistered nodes", func() { - BeforeEach(func() { - server.RegisterAlive(2, func() bool { - return false - }) - }) - - It("should return false", func() { - Ω(getRemoteAfterSuiteData()).Should(BeFalse()) - }) - }) - - Context("when all none-node-1 nodes are still running", func() { - BeforeEach(func() { - server.RegisterAlive(2, func() bool { - return true - }) - - server.RegisterAlive(3, func() bool { - return false - }) - }) - - It("should return false", func() { - Ω(getRemoteAfterSuiteData()).Should(BeFalse()) - }) - }) - - Context("when all none-1 nodes are done", func() { - BeforeEach(func() { - server.RegisterAlive(2, func() bool { - return false - }) - - server.RegisterAlive(3, func() bool { - return false - }) - }) - - It("should return true", func() { - Ω(getRemoteAfterSuiteData()).Should(BeTrue()) - }) - - }) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/spec/index_computer.go b/kit/github.com/onsi/ginkgo/internal/spec/index_computer.go deleted file mode 100644 index 5a67fc7..0000000 --- a/kit/github.com/onsi/ginkgo/internal/spec/index_computer.go +++ /dev/null @@ -1,55 +0,0 @@ -package spec - -func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { - if length == 0 { - return 0, 0 - } - - // We have more nodes than tests. Trivial case. - if parallelTotal >= length { - if parallelNode > length { - return 0, 0 - } else { - return parallelNode - 1, 1 - } - } - - // This is the minimum amount of tests that a node will be required to run - minTestsPerNode := length / parallelTotal - - // This is the maximum amount of tests that a node will be required to run - // The algorithm guarantees that this would be equal to at least the minimum amount - // and at most one more - maxTestsPerNode := minTestsPerNode - if length%parallelTotal != 0 { - maxTestsPerNode++ - } - - // Number of nodes that will have to run the maximum amount of tests per node - numMaxLoadNodes := length % parallelTotal - - // Number of nodes that precede the current node and will have to run the maximum amount of tests per node - var numPrecedingMaxLoadNodes int - if parallelNode > numMaxLoadNodes { - numPrecedingMaxLoadNodes = numMaxLoadNodes - } else { - numPrecedingMaxLoadNodes = parallelNode - 1 - } - - // Number of nodes that precede the current node and will have to run the minimum amount of tests per node - var numPrecedingMinLoadNodes int - if parallelNode <= numMaxLoadNodes { - numPrecedingMinLoadNodes = 0 - } else { - numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1 - } - - // Evaluate the test start index and number of tests to run - startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode - if parallelNode > numMaxLoadNodes { - count = minTestsPerNode - } else { - count = maxTestsPerNode - } - return -} diff --git a/kit/github.com/onsi/ginkgo/internal/spec/index_computer_test.go b/kit/github.com/onsi/ginkgo/internal/spec/index_computer_test.go deleted file mode 100644 index 3b04b74..0000000 --- a/kit/github.com/onsi/ginkgo/internal/spec/index_computer_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package spec_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/spec" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("ParallelizedIndexRange", func() { - var startIndex, count int - - It("should return the correct index range for 4 tests on 2 nodes", func() { - startIndex, count = ParallelizedIndexRange(4, 2, 1) - Ω(startIndex).Should(Equal(0)) - Ω(count).Should(Equal(2)) - - startIndex, count = ParallelizedIndexRange(4, 2, 2) - Ω(startIndex).Should(Equal(2)) - Ω(count).Should(Equal(2)) - }) - - It("should return the correct index range for 5 tests on 2 nodes", func() { - startIndex, count = ParallelizedIndexRange(5, 2, 1) - Ω(startIndex).Should(Equal(0)) - Ω(count).Should(Equal(3)) - - startIndex, count = ParallelizedIndexRange(5, 2, 2) - Ω(startIndex).Should(Equal(3)) - Ω(count).Should(Equal(2)) - }) - - It("should return the correct index range for 5 tests on 3 nodes", func() { - startIndex, count = ParallelizedIndexRange(5, 3, 1) - Ω(startIndex).Should(Equal(0)) - Ω(count).Should(Equal(2)) - - startIndex, count = ParallelizedIndexRange(5, 3, 2) - Ω(startIndex).Should(Equal(2)) - Ω(count).Should(Equal(2)) - - startIndex, count = ParallelizedIndexRange(5, 3, 3) - Ω(startIndex).Should(Equal(4)) - Ω(count).Should(Equal(1)) - }) - - It("should return the correct index range for 5 tests on 4 nodes", func() { - startIndex, count = ParallelizedIndexRange(5, 4, 1) - Ω(startIndex).Should(Equal(0)) - Ω(count).Should(Equal(2)) - - startIndex, count = ParallelizedIndexRange(5, 4, 2) - Ω(startIndex).Should(Equal(2)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 4, 3) - Ω(startIndex).Should(Equal(3)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 4, 4) - Ω(startIndex).Should(Equal(4)) - Ω(count).Should(Equal(1)) - }) - - It("should return the correct index range for 5 tests on 5 nodes", func() { - startIndex, count = ParallelizedIndexRange(5, 5, 1) - Ω(startIndex).Should(Equal(0)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 5, 2) - Ω(startIndex).Should(Equal(1)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 5, 3) - Ω(startIndex).Should(Equal(2)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 5, 4) - Ω(startIndex).Should(Equal(3)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 5, 5) - Ω(startIndex).Should(Equal(4)) - Ω(count).Should(Equal(1)) - }) - - It("should return the correct index range for 5 tests on 6 nodes", func() { - startIndex, count = ParallelizedIndexRange(5, 6, 1) - Ω(startIndex).Should(Equal(0)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 6, 2) - Ω(startIndex).Should(Equal(1)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 6, 3) - Ω(startIndex).Should(Equal(2)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 6, 4) - Ω(startIndex).Should(Equal(3)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 6, 5) - Ω(startIndex).Should(Equal(4)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(5, 6, 6) - Ω(count).Should(Equal(0)) - }) - - It("should return the correct index range for 5 tests on 7 nodes", func() { - startIndex, count = ParallelizedIndexRange(5, 7, 6) - Ω(count).Should(Equal(0)) - - startIndex, count = ParallelizedIndexRange(5, 7, 7) - Ω(count).Should(Equal(0)) - }) - - It("should return the correct index range for 11 tests on 7 nodes", func() { - startIndex, count = ParallelizedIndexRange(11, 7, 1) - Ω(startIndex).Should(Equal(0)) - Ω(count).Should(Equal(2)) - - startIndex, count = ParallelizedIndexRange(11, 7, 2) - Ω(startIndex).Should(Equal(2)) - Ω(count).Should(Equal(2)) - - startIndex, count = ParallelizedIndexRange(11, 7, 3) - Ω(startIndex).Should(Equal(4)) - Ω(count).Should(Equal(2)) - - startIndex, count = ParallelizedIndexRange(11, 7, 4) - Ω(startIndex).Should(Equal(6)) - Ω(count).Should(Equal(2)) - - startIndex, count = ParallelizedIndexRange(11, 7, 5) - Ω(startIndex).Should(Equal(8)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(11, 7, 6) - Ω(startIndex).Should(Equal(9)) - Ω(count).Should(Equal(1)) - - startIndex, count = ParallelizedIndexRange(11, 7, 7) - Ω(startIndex).Should(Equal(10)) - Ω(count).Should(Equal(1)) - }) - -}) diff --git a/kit/github.com/onsi/ginkgo/internal/spec/spec.go b/kit/github.com/onsi/ginkgo/internal/spec/spec.go deleted file mode 100644 index e5d6576..0000000 --- a/kit/github.com/onsi/ginkgo/internal/spec/spec.go +++ /dev/null @@ -1,170 +0,0 @@ -package spec - -import ( - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/containernode" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -type Spec struct { - subject leafnodes.SubjectNode - focused bool - - containers []*containernode.ContainerNode - - state types.SpecState - runTime time.Duration - failure types.SpecFailure -} - -func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode) *Spec { - spec := &Spec{ - subject: subject, - containers: containers, - focused: subject.Flag() == types.FlagTypeFocused, - } - - spec.processFlag(subject.Flag()) - for i := len(containers) - 1; i >= 0; i-- { - spec.processFlag(containers[i].Flag()) - } - - return spec -} - -func (spec *Spec) processFlag(flag types.FlagType) { - if flag == types.FlagTypeFocused { - spec.focused = true - } else if flag == types.FlagTypePending { - spec.state = types.SpecStatePending - } -} - -func (spec *Spec) Skip() { - spec.state = types.SpecStateSkipped -} - -func (spec *Spec) Failed() bool { - return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut -} - -func (spec *Spec) Passed() bool { - return spec.state == types.SpecStatePassed -} - -func (spec *Spec) Pending() bool { - return spec.state == types.SpecStatePending -} - -func (spec *Spec) Skipped() bool { - return spec.state == types.SpecStateSkipped -} - -func (spec *Spec) Focused() bool { - return spec.focused -} - -func (spec *Spec) IsMeasurement() bool { - return spec.subject.Type() == types.SpecComponentTypeMeasure -} - -func (spec *Spec) Summary(suiteID string) *types.SpecSummary { - componentTexts := make([]string, len(spec.containers)+1) - componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1) - - for i, container := range spec.containers { - componentTexts[i] = container.Text() - componentCodeLocations[i] = container.CodeLocation() - } - - componentTexts[len(spec.containers)] = spec.subject.Text() - componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation() - - return &types.SpecSummary{ - IsMeasurement: spec.IsMeasurement(), - NumberOfSamples: spec.subject.Samples(), - ComponentTexts: componentTexts, - ComponentCodeLocations: componentCodeLocations, - State: spec.state, - RunTime: spec.runTime, - Failure: spec.failure, - Measurements: spec.measurementsReport(), - SuiteID: suiteID, - } -} - -func (spec *Spec) ConcatenatedString() string { - s := "" - for _, container := range spec.containers { - s += container.Text() + " " - } - - return s + spec.subject.Text() -} - -func (spec *Spec) Run() { - startTime := time.Now() - defer func() { - spec.runTime = time.Since(startTime) - }() - - for sample := 0; sample < spec.subject.Samples(); sample++ { - spec.state, spec.failure = spec.runSample(sample) - - if spec.state != types.SpecStatePassed { - return - } - } -} - -func (spec *Spec) runSample(sample int) (specState types.SpecState, specFailure types.SpecFailure) { - specState = types.SpecStatePassed - specFailure = types.SpecFailure{} - innerMostContainerIndexToUnwind := -1 - - defer func() { - for i := innerMostContainerIndexToUnwind; i >= 0; i-- { - container := spec.containers[i] - for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { - afterEachState, afterEachFailure := afterEach.Run() - if afterEachState != types.SpecStatePassed && specState == types.SpecStatePassed { - specState = afterEachState - specFailure = afterEachFailure - } - } - } - }() - - for i, container := range spec.containers { - innerMostContainerIndexToUnwind = i - for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) { - specState, specFailure = beforeEach.Run() - if specState != types.SpecStatePassed { - return - } - } - } - - for _, container := range spec.containers { - for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) { - specState, specFailure = justBeforeEach.Run() - if specState != types.SpecStatePassed { - return - } - } - } - - specState, specFailure = spec.subject.Run() - - return -} - -func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement { - if !spec.IsMeasurement() || spec.Failed() { - return map[string]*types.SpecMeasurement{} - } - - return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport() -} diff --git a/kit/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go b/kit/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go deleted file mode 100644 index 61823e0..0000000 --- a/kit/github.com/onsi/ginkgo/internal/spec/spec_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package spec_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestSpec(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Spec Suite") -} diff --git a/kit/github.com/onsi/ginkgo/internal/spec/spec_test.go b/kit/github.com/onsi/ginkgo/internal/spec/spec_test.go deleted file mode 100644 index 9cdabb8..0000000 --- a/kit/github.com/onsi/ginkgo/internal/spec/spec_test.go +++ /dev/null @@ -1,575 +0,0 @@ -package spec_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "time" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/spec" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/containernode" - Failer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -var noneFlag = types.FlagTypeNone -var focusedFlag = types.FlagTypeFocused -var pendingFlag = types.FlagTypePending - -var _ = Describe("Spec", func() { - var ( - failer *Failer.Failer - codeLocation types.CodeLocation - nodesThatRan []string - spec *Spec - ) - - newBody := func(text string, fail bool) func() { - return func() { - nodesThatRan = append(nodesThatRan, text) - if fail { - failer.Fail(text, codeLocation) - } - } - } - - newIt := func(text string, flag types.FlagType, fail bool) *leafnodes.ItNode { - return leafnodes.NewItNode(text, newBody(text, fail), flag, codeLocation, 0, failer, 0) - } - - newItWithBody := func(text string, body interface{}) *leafnodes.ItNode { - return leafnodes.NewItNode(text, body, noneFlag, codeLocation, 0, failer, 0) - } - - newMeasure := func(text string, flag types.FlagType, fail bool, samples int) *leafnodes.MeasureNode { - return leafnodes.NewMeasureNode(text, func(Benchmarker) { - nodesThatRan = append(nodesThatRan, text) - if fail { - failer.Fail(text, codeLocation) - } - }, flag, codeLocation, samples, failer, 0) - } - - newBef := func(text string, fail bool) leafnodes.BasicNode { - return leafnodes.NewBeforeEachNode(newBody(text, fail), codeLocation, 0, failer, 0) - } - - newAft := func(text string, fail bool) leafnodes.BasicNode { - return leafnodes.NewAfterEachNode(newBody(text, fail), codeLocation, 0, failer, 0) - } - - newJusBef := func(text string, fail bool) leafnodes.BasicNode { - return leafnodes.NewJustBeforeEachNode(newBody(text, fail), codeLocation, 0, failer, 0) - } - - newContainer := func(text string, flag types.FlagType, setupNodes ...leafnodes.BasicNode) *containernode.ContainerNode { - c := containernode.New(text, flag, codeLocation) - for _, node := range setupNodes { - c.PushSetupNode(node) - } - return c - } - - containers := func(containers ...*containernode.ContainerNode) []*containernode.ContainerNode { - return containers - } - - BeforeEach(func() { - failer = Failer.New() - codeLocation = codelocation.New(0) - nodesThatRan = []string{} - }) - - Describe("marking specs focused and pending", func() { - It("should satisfy various caes", func() { - cases := []struct { - ContainerFlags []types.FlagType - SubjectFlag types.FlagType - Pending bool - Focused bool - }{ - {[]types.FlagType{}, noneFlag, false, false}, - {[]types.FlagType{}, focusedFlag, false, true}, - {[]types.FlagType{}, pendingFlag, true, false}, - {[]types.FlagType{noneFlag}, noneFlag, false, false}, - {[]types.FlagType{focusedFlag}, noneFlag, false, true}, - {[]types.FlagType{pendingFlag}, noneFlag, true, false}, - {[]types.FlagType{noneFlag}, focusedFlag, false, true}, - {[]types.FlagType{focusedFlag}, focusedFlag, false, true}, - {[]types.FlagType{pendingFlag}, focusedFlag, true, true}, - {[]types.FlagType{noneFlag}, pendingFlag, true, false}, - {[]types.FlagType{focusedFlag}, pendingFlag, true, true}, - {[]types.FlagType{pendingFlag}, pendingFlag, true, false}, - {[]types.FlagType{focusedFlag, noneFlag}, noneFlag, false, true}, - {[]types.FlagType{noneFlag, focusedFlag}, noneFlag, false, true}, - {[]types.FlagType{pendingFlag, noneFlag}, noneFlag, true, false}, - {[]types.FlagType{noneFlag, pendingFlag}, noneFlag, true, false}, - {[]types.FlagType{focusedFlag, pendingFlag}, noneFlag, true, true}, - } - - for i, c := range cases { - subject := newIt("it node", c.SubjectFlag, false) - containers := []*containernode.ContainerNode{} - for _, flag := range c.ContainerFlags { - containers = append(containers, newContainer("container", flag)) - } - - spec := New(subject, containers) - Ω(spec.Pending()).Should(Equal(c.Pending), "Case %d: %#v", i, c) - Ω(spec.Focused()).Should(Equal(c.Focused), "Case %d: %#v", i, c) - - if c.Pending { - Ω(spec.Summary("").State).Should(Equal(types.SpecStatePending)) - } - } - }) - }) - - Describe("Skip", func() { - It("should be skipped", func() { - spec := New(newIt("it node", noneFlag, false), containers(newContainer("container", noneFlag))) - Ω(spec.Skipped()).Should(BeFalse()) - spec.Skip() - Ω(spec.Skipped()).Should(BeTrue()) - Ω(spec.Summary("").State).Should(Equal(types.SpecStateSkipped)) - }) - }) - - Describe("IsMeasurement", func() { - It("should be true if the subject is a measurement node", func() { - spec := New(newIt("it node", noneFlag, false), containers(newContainer("container", noneFlag))) - Ω(spec.IsMeasurement()).Should(BeFalse()) - Ω(spec.Summary("").IsMeasurement).Should(BeFalse()) - Ω(spec.Summary("").NumberOfSamples).Should(Equal(1)) - - spec = New(newMeasure("measure node", noneFlag, false, 10), containers(newContainer("container", noneFlag))) - Ω(spec.IsMeasurement()).Should(BeTrue()) - Ω(spec.Summary("").IsMeasurement).Should(BeTrue()) - Ω(spec.Summary("").NumberOfSamples).Should(Equal(10)) - }) - }) - - Describe("Passed", func() { - It("should pass when the subject passed", func() { - spec := New(newIt("it node", noneFlag, false), containers()) - spec.Run() - - Ω(spec.Passed()).Should(BeTrue()) - Ω(spec.Failed()).Should(BeFalse()) - Ω(spec.Summary("").State).Should(Equal(types.SpecStatePassed)) - Ω(spec.Summary("").Failure).Should(BeZero()) - }) - }) - - Describe("Failed", func() { - It("should be failed if the failure was panic", func() { - spec := New(newItWithBody("panicky it", func() { - panic("bam") - }), containers()) - spec.Run() - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(spec.Summary("").State).Should(Equal(types.SpecStatePanicked)) - Ω(spec.Summary("").Failure.Message).Should(Equal("Test Panicked")) - Ω(spec.Summary("").Failure.ForwardedPanic).Should(Equal("bam")) - }) - - It("should be failed if the failure was a timeout", func() { - spec := New(newItWithBody("sleepy it", func(done Done) {}), containers()) - spec.Run() - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(spec.Summary("").State).Should(Equal(types.SpecStateTimedOut)) - Ω(spec.Summary("").Failure.Message).Should(Equal("Timed out")) - }) - - It("should be failed if the failure was... a failure", func() { - spec := New(newItWithBody("failing it", func() { - failer.Fail("bam", codeLocation) - }), containers()) - spec.Run() - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(spec.Summary("").State).Should(Equal(types.SpecStateFailed)) - Ω(spec.Summary("").Failure.Message).Should(Equal("bam")) - }) - }) - - Describe("Concatenated string", func() { - It("should concatenate the texts of the containers and the subject", func() { - spec := New( - newIt("it node", noneFlag, false), - containers( - newContainer("outer container", noneFlag), - newContainer("inner container", noneFlag), - ), - ) - - Ω(spec.ConcatenatedString()).Should(Equal("outer container inner container it node")) - }) - }) - - Describe("running it specs", func() { - Context("with just an it", func() { - Context("that succeeds", func() { - It("should run the it and report on its success", func() { - spec := New(newIt("it node", noneFlag, false), containers()) - spec.Run() - Ω(spec.Passed()).Should(BeTrue()) - Ω(spec.Failed()).Should(BeFalse()) - Ω(nodesThatRan).Should(Equal([]string{"it node"})) - }) - }) - - Context("that fails", func() { - It("should run the it and report on its success", func() { - spec := New(newIt("it node", noneFlag, true), containers()) - spec.Run() - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(spec.Summary("").Failure.Message).Should(Equal("it node")) - Ω(nodesThatRan).Should(Equal([]string{"it node"})) - }) - }) - }) - - Context("with a full set of setup nodes", func() { - var failingNodes map[string]bool - - BeforeEach(func() { - failingNodes = map[string]bool{} - }) - - JustBeforeEach(func() { - spec = New( - newIt("it node", noneFlag, failingNodes["it node"]), - containers( - newContainer("outer container", noneFlag, - newBef("outer bef A", failingNodes["outer bef A"]), - newBef("outer bef B", failingNodes["outer bef B"]), - newJusBef("outer jusbef A", failingNodes["outer jusbef A"]), - newJusBef("outer jusbef B", failingNodes["outer jusbef B"]), - newAft("outer aft A", failingNodes["outer aft A"]), - newAft("outer aft B", failingNodes["outer aft B"]), - ), - newContainer("inner container", noneFlag, - newBef("inner bef A", failingNodes["inner bef A"]), - newBef("inner bef B", failingNodes["inner bef B"]), - newJusBef("inner jusbef A", failingNodes["inner jusbef A"]), - newJusBef("inner jusbef B", failingNodes["inner jusbef B"]), - newAft("inner aft A", failingNodes["inner aft A"]), - newAft("inner aft B", failingNodes["inner aft B"]), - ), - ), - ) - spec.Run() - }) - - Context("that all pass", func() { - It("should walk through the nodes in the correct order", func() { - Ω(spec.Passed()).Should(BeTrue()) - Ω(spec.Failed()).Should(BeFalse()) - Ω(nodesThatRan).Should(Equal([]string{ - "outer bef A", - "outer bef B", - "inner bef A", - "inner bef B", - "outer jusbef A", - "outer jusbef B", - "inner jusbef A", - "inner jusbef B", - "it node", - "inner aft A", - "inner aft B", - "outer aft A", - "outer aft B", - })) - }) - }) - - Context("when the subject fails", func() { - BeforeEach(func() { - failingNodes["it node"] = true - }) - - It("should run the afters", func() { - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(nodesThatRan).Should(Equal([]string{ - "outer bef A", - "outer bef B", - "inner bef A", - "inner bef B", - "outer jusbef A", - "outer jusbef B", - "inner jusbef A", - "inner jusbef B", - "it node", - "inner aft A", - "inner aft B", - "outer aft A", - "outer aft B", - })) - Ω(spec.Summary("").Failure.Message).Should(Equal("it node")) - }) - }) - - Context("when an inner before fails", func() { - BeforeEach(func() { - failingNodes["inner bef A"] = true - }) - - It("should not run any other befores, but it should run the subsequent afters", func() { - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(nodesThatRan).Should(Equal([]string{ - "outer bef A", - "outer bef B", - "inner bef A", - "inner aft A", - "inner aft B", - "outer aft A", - "outer aft B", - })) - Ω(spec.Summary("").Failure.Message).Should(Equal("inner bef A")) - }) - }) - - Context("when an outer before fails", func() { - BeforeEach(func() { - failingNodes["outer bef B"] = true - }) - - It("should not run any other befores, but it should run the subsequent afters", func() { - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(nodesThatRan).Should(Equal([]string{ - "outer bef A", - "outer bef B", - "outer aft A", - "outer aft B", - })) - Ω(spec.Summary("").Failure.Message).Should(Equal("outer bef B")) - }) - }) - - Context("when an after fails", func() { - BeforeEach(func() { - failingNodes["inner aft B"] = true - }) - - It("should run all other afters, but mark the test as failed", func() { - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(nodesThatRan).Should(Equal([]string{ - "outer bef A", - "outer bef B", - "inner bef A", - "inner bef B", - "outer jusbef A", - "outer jusbef B", - "inner jusbef A", - "inner jusbef B", - "it node", - "inner aft A", - "inner aft B", - "outer aft A", - "outer aft B", - })) - Ω(spec.Summary("").Failure.Message).Should(Equal("inner aft B")) - }) - }) - - Context("when a just before each fails", func() { - BeforeEach(func() { - failingNodes["outer jusbef B"] = true - }) - - It("should run the afters, but not the subject", func() { - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(nodesThatRan).Should(Equal([]string{ - "outer bef A", - "outer bef B", - "inner bef A", - "inner bef B", - "outer jusbef A", - "outer jusbef B", - "inner aft A", - "inner aft B", - "outer aft A", - "outer aft B", - })) - Ω(spec.Summary("").Failure.Message).Should(Equal("outer jusbef B")) - }) - }) - - Context("when an after fails after an earlier node has failed", func() { - BeforeEach(func() { - failingNodes["it node"] = true - failingNodes["inner aft B"] = true - }) - - It("should record the earlier failure", func() { - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(nodesThatRan).Should(Equal([]string{ - "outer bef A", - "outer bef B", - "inner bef A", - "inner bef B", - "outer jusbef A", - "outer jusbef B", - "inner jusbef A", - "inner jusbef B", - "it node", - "inner aft A", - "inner aft B", - "outer aft A", - "outer aft B", - })) - Ω(spec.Summary("").Failure.Message).Should(Equal("it node")) - }) - }) - }) - }) - - Describe("running measurement specs", func() { - Context("when the measurement succeeds", func() { - It("should run N samples", func() { - spec = New( - newMeasure("measure node", noneFlag, false, 3), - containers( - newContainer("container", noneFlag, - newBef("bef A", false), - newJusBef("jusbef A", false), - newAft("aft A", false), - ), - ), - ) - spec.Run() - - Ω(spec.Passed()).Should(BeTrue()) - Ω(spec.Failed()).Should(BeFalse()) - Ω(nodesThatRan).Should(Equal([]string{ - "bef A", - "jusbef A", - "measure node", - "aft A", - "bef A", - "jusbef A", - "measure node", - "aft A", - "bef A", - "jusbef A", - "measure node", - "aft A", - })) - }) - }) - - Context("when the measurement fails", func() { - It("should bail after the failure occurs", func() { - spec = New( - newMeasure("measure node", noneFlag, true, 3), - containers( - newContainer("container", noneFlag, - newBef("bef A", false), - newJusBef("jusbef A", false), - newAft("aft A", false), - ), - ), - ) - spec.Run() - - Ω(spec.Passed()).Should(BeFalse()) - Ω(spec.Failed()).Should(BeTrue()) - Ω(nodesThatRan).Should(Equal([]string{ - "bef A", - "jusbef A", - "measure node", - "aft A", - })) - }) - }) - }) - - Describe("Summary", func() { - var ( - subjectCodeLocation types.CodeLocation - outerContainerCodeLocation types.CodeLocation - innerContainerCodeLocation types.CodeLocation - summary *types.SpecSummary - ) - - BeforeEach(func() { - subjectCodeLocation = codelocation.New(0) - outerContainerCodeLocation = codelocation.New(0) - innerContainerCodeLocation = codelocation.New(0) - - spec = New( - leafnodes.NewItNode("it node", func() { - time.Sleep(10 * time.Millisecond) - }, noneFlag, subjectCodeLocation, 0, failer, 0), - containers( - containernode.New("outer container", noneFlag, outerContainerCodeLocation), - containernode.New("inner container", noneFlag, innerContainerCodeLocation), - ), - ) - - spec.Run() - Ω(spec.Passed()).Should(BeTrue()) - summary = spec.Summary("suite id") - }) - - It("should have the suite id", func() { - Ω(summary.SuiteID).Should(Equal("suite id")) - }) - - It("should have the component texts and code locations", func() { - Ω(summary.ComponentTexts).Should(Equal([]string{"outer container", "inner container", "it node"})) - Ω(summary.ComponentCodeLocations).Should(Equal([]types.CodeLocation{outerContainerCodeLocation, innerContainerCodeLocation, subjectCodeLocation})) - }) - - It("should have a runtime", func() { - Ω(summary.RunTime).Should(BeNumerically(">=", 10*time.Millisecond)) - }) - - It("should not be a measurement, or have a measurement summary", func() { - Ω(summary.IsMeasurement).Should(BeFalse()) - Ω(summary.Measurements).Should(BeEmpty()) - }) - }) - - Describe("Summaries for measurements", func() { - var summary *types.SpecSummary - - BeforeEach(func() { - spec = New(leafnodes.NewMeasureNode("measure node", func(b Benchmarker) { - b.RecordValue("a value", 7, "some info") - }, noneFlag, codeLocation, 4, failer, 0), containers()) - spec.Run() - Ω(spec.Passed()).Should(BeTrue()) - summary = spec.Summary("suite id") - }) - - It("should include the number of samples", func() { - Ω(summary.NumberOfSamples).Should(Equal(4)) - }) - - It("should be a measurement", func() { - Ω(summary.IsMeasurement).Should(BeTrue()) - }) - - It("should have the measurements report", func() { - Ω(summary.Measurements).Should(HaveKey("a value")) - - report := summary.Measurements["a value"] - Ω(report.Name).Should(Equal("a value")) - Ω(report.Info).Should(Equal("some info")) - Ω(report.Results).Should(Equal([]float64{7, 7, 7, 7})) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/spec/specs.go b/kit/github.com/onsi/ginkgo/internal/spec/specs.go deleted file mode 100644 index a3d5451..0000000 --- a/kit/github.com/onsi/ginkgo/internal/spec/specs.go +++ /dev/null @@ -1,122 +0,0 @@ -package spec - -import ( - "math/rand" - "regexp" - "sort" -) - -type Specs struct { - specs []*Spec - numberOfOriginalSpecs int - hasProgrammaticFocus bool -} - -func NewSpecs(specs []*Spec) *Specs { - return &Specs{ - specs: specs, - numberOfOriginalSpecs: len(specs), - } -} - -func (e *Specs) Specs() []*Spec { - return e.specs -} - -func (e *Specs) NumberOfOriginalSpecs() int { - return e.numberOfOriginalSpecs -} - -func (e *Specs) HasProgrammaticFocus() bool { - return e.hasProgrammaticFocus -} - -func (e *Specs) Shuffle(r *rand.Rand) { - sort.Sort(e) - permutation := r.Perm(len(e.specs)) - shuffledSpecs := make([]*Spec, len(e.specs)) - for i, j := range permutation { - shuffledSpecs[i] = e.specs[j] - } - e.specs = shuffledSpecs -} - -func (e *Specs) ApplyFocus(description string, focusString string, skipString string) { - if focusString == "" && skipString == "" { - e.applyProgrammaticFocus() - } else { - e.applyRegExpFocus(description, focusString, skipString) - } -} - -func (e *Specs) applyProgrammaticFocus() { - e.hasProgrammaticFocus = false - for _, spec := range e.specs { - if spec.Focused() { - e.hasProgrammaticFocus = true - break - } - } - - if e.hasProgrammaticFocus { - for _, spec := range e.specs { - if !spec.Focused() { - spec.Skip() - } - } - } -} - -func (e *Specs) applyRegExpFocus(description string, focusString string, skipString string) { - for _, spec := range e.specs { - matchesFocus := true - matchesSkip := false - - toMatch := []byte(description + " " + spec.ConcatenatedString()) - - if focusString != "" { - focusFilter := regexp.MustCompile(focusString) - matchesFocus = focusFilter.Match([]byte(toMatch)) - } - - if skipString != "" { - skipFilter := regexp.MustCompile(skipString) - matchesSkip = skipFilter.Match([]byte(toMatch)) - } - - if !matchesFocus || matchesSkip { - spec.Skip() - } - } -} - -func (e *Specs) SkipMeasurements() { - for _, spec := range e.specs { - if spec.IsMeasurement() { - spec.Skip() - } - } -} - -func (e *Specs) TrimForParallelization(total int, node int) { - startIndex, count := ParallelizedIndexRange(len(e.specs), total, node) - if count == 0 { - e.specs = make([]*Spec, 0) - } else { - e.specs = e.specs[startIndex : startIndex+count] - } -} - -//sort.Interface - -func (e *Specs) Len() int { - return len(e.specs) -} - -func (e *Specs) Less(i, j int) bool { - return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString() -} - -func (e *Specs) Swap(i, j int) { - e.specs[i], e.specs[j] = e.specs[j], e.specs[i] -} diff --git a/kit/github.com/onsi/ginkgo/internal/spec/specs_test.go b/kit/github.com/onsi/ginkgo/internal/spec/specs_test.go deleted file mode 100644 index e48ff31..0000000 --- a/kit/github.com/onsi/ginkgo/internal/spec/specs_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package spec_test - -import ( - "math/rand" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/spec" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/containernode" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -var _ = Describe("Specs", func() { - var specs *Specs - - newSpec := func(text string, flag types.FlagType) *Spec { - subject := leafnodes.NewItNode(text, func() {}, flag, codelocation.New(0), 0, nil, 0) - return New(subject, []*containernode.ContainerNode{}) - } - - newMeasureSpec := func(text string, flag types.FlagType) *Spec { - subject := leafnodes.NewMeasureNode(text, func(Benchmarker) {}, flag, codelocation.New(0), 0, nil, 0) - return New(subject, []*containernode.ContainerNode{}) - } - - newSpecs := func(args ...interface{}) *Specs { - specs := []*Spec{} - for index := 0; index < len(args)-1; index += 2 { - specs = append(specs, newSpec(args[index].(string), args[index+1].(types.FlagType))) - } - return NewSpecs(specs) - } - - specTexts := func(specs *Specs) []string { - texts := []string{} - for _, spec := range specs.Specs() { - texts = append(texts, spec.ConcatenatedString()) - } - return texts - } - - willRunTexts := func(specs *Specs) []string { - texts := []string{} - for _, spec := range specs.Specs() { - if !(spec.Skipped() || spec.Pending()) { - texts = append(texts, spec.ConcatenatedString()) - } - } - return texts - } - - skippedTexts := func(specs *Specs) []string { - texts := []string{} - for _, spec := range specs.Specs() { - if spec.Skipped() { - texts = append(texts, spec.ConcatenatedString()) - } - } - return texts - } - - pendingTexts := func(specs *Specs) []string { - texts := []string{} - for _, spec := range specs.Specs() { - if spec.Pending() { - texts = append(texts, spec.ConcatenatedString()) - } - } - return texts - } - - Describe("Shuffling specs", func() { - It("should shuffle the specs using the passed in randomizer", func() { - specs17 := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag) - specs17.Shuffle(rand.New(rand.NewSource(17))) - texts17 := specTexts(specs17) - - specs17Again := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag) - specs17Again.Shuffle(rand.New(rand.NewSource(17))) - texts17Again := specTexts(specs17Again) - - specs15 := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag) - specs15.Shuffle(rand.New(rand.NewSource(15))) - texts15 := specTexts(specs15) - - specsUnshuffled := newSpecs("C", noneFlag, "A", noneFlag, "B", noneFlag) - textsUnshuffled := specTexts(specsUnshuffled) - - Ω(textsUnshuffled).Should(Equal([]string{"C", "A", "B"})) - - Ω(texts17).Should(Equal(texts17Again)) - Ω(texts17).ShouldNot(Equal(texts15)) - Ω(texts17).ShouldNot(Equal(textsUnshuffled)) - Ω(texts15).ShouldNot(Equal(textsUnshuffled)) - - Ω(texts17).Should(HaveLen(3)) - Ω(texts17).Should(ContainElement("A")) - Ω(texts17).Should(ContainElement("B")) - Ω(texts17).Should(ContainElement("C")) - - Ω(texts15).Should(HaveLen(3)) - Ω(texts15).Should(ContainElement("A")) - Ω(texts15).Should(ContainElement("B")) - Ω(texts15).Should(ContainElement("C")) - }) - }) - - Describe("with no programmatic focus", func() { - BeforeEach(func() { - specs = newSpecs("A1", noneFlag, "A2", noneFlag, "B1", noneFlag, "B2", pendingFlag) - specs.ApplyFocus("", "", "") - }) - - It("should not report as having programmatic specs", func() { - Ω(specs.HasProgrammaticFocus()).Should(BeFalse()) - }) - }) - - Describe("Applying focus/skip", func() { - var description, focusString, skipString string - - BeforeEach(func() { - description, focusString, skipString = "", "", "" - }) - - JustBeforeEach(func() { - specs = newSpecs("A1", focusedFlag, "A2", noneFlag, "B1", focusedFlag, "B2", pendingFlag) - specs.ApplyFocus(description, focusString, skipString) - }) - - Context("with neither a focus string nor a skip string", func() { - It("should apply the programmatic focus", func() { - Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "B1"})) - Ω(skippedTexts(specs)).Should(Equal([]string{"A2", "B2"})) - Ω(pendingTexts(specs)).Should(BeEmpty()) - }) - - It("should report as having programmatic specs", func() { - Ω(specs.HasProgrammaticFocus()).Should(BeTrue()) - }) - }) - - Context("with a focus regexp", func() { - BeforeEach(func() { - focusString = "A" - }) - - It("should override the programmatic focus", func() { - Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "A2"})) - Ω(skippedTexts(specs)).Should(Equal([]string{"B1", "B2"})) - Ω(pendingTexts(specs)).Should(BeEmpty()) - }) - - It("should not report as having programmatic specs", func() { - Ω(specs.HasProgrammaticFocus()).Should(BeFalse()) - }) - }) - - Context("with a focus regexp", func() { - BeforeEach(func() { - focusString = "B" - }) - - It("should not override any pendings", func() { - Ω(willRunTexts(specs)).Should(Equal([]string{"B1"})) - Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2"})) - Ω(pendingTexts(specs)).Should(Equal([]string{"B2"})) - }) - }) - - Context("with a description", func() { - BeforeEach(func() { - description = "C" - focusString = "C" - }) - - It("should include the description in the focus determination", func() { - Ω(willRunTexts(specs)).Should(Equal([]string{"A1", "A2", "B1"})) - Ω(skippedTexts(specs)).Should(BeEmpty()) - Ω(pendingTexts(specs)).Should(Equal([]string{"B2"})) - }) - }) - - Context("with a description", func() { - BeforeEach(func() { - description = "C" - skipString = "C" - }) - - It("should include the description in the focus determination", func() { - Ω(willRunTexts(specs)).Should(BeEmpty()) - Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2", "B1", "B2"})) - Ω(pendingTexts(specs)).Should(BeEmpty()) - }) - }) - - Context("with a skip regexp", func() { - BeforeEach(func() { - skipString = "A" - }) - - It("should override the programmatic focus", func() { - Ω(willRunTexts(specs)).Should(Equal([]string{"B1"})) - Ω(skippedTexts(specs)).Should(Equal([]string{"A1", "A2"})) - Ω(pendingTexts(specs)).Should(Equal([]string{"B2"})) - }) - - It("should not report as having programmatic specs", func() { - Ω(specs.HasProgrammaticFocus()).Should(BeFalse()) - }) - }) - - Context("with both a focus and a skip regexp", func() { - BeforeEach(func() { - focusString = "1" - skipString = "B" - }) - - It("should AND the two", func() { - Ω(willRunTexts(specs)).Should(Equal([]string{"A1"})) - Ω(skippedTexts(specs)).Should(Equal([]string{"A2", "B1", "B2"})) - Ω(pendingTexts(specs)).Should(BeEmpty()) - }) - - It("should not report as having programmatic specs", func() { - Ω(specs.HasProgrammaticFocus()).Should(BeFalse()) - }) - }) - }) - - Describe("skipping measurements", func() { - BeforeEach(func() { - specs = NewSpecs([]*Spec{ - newSpec("A", noneFlag), - newSpec("B", noneFlag), - newSpec("C", pendingFlag), - newMeasureSpec("measurementA", noneFlag), - newMeasureSpec("measurementB", pendingFlag), - }) - }) - - It("should skip measurements", func() { - Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B", "measurementA"})) - Ω(skippedTexts(specs)).Should(BeEmpty()) - Ω(pendingTexts(specs)).Should(Equal([]string{"C", "measurementB"})) - - specs.SkipMeasurements() - - Ω(willRunTexts(specs)).Should(Equal([]string{"A", "B"})) - Ω(skippedTexts(specs)).Should(Equal([]string{"measurementA", "measurementB"})) - Ω(pendingTexts(specs)).Should(Equal([]string{"C"})) - }) - }) - - Describe("when running tests in parallel", func() { - It("should select out a subset of the tests", func() { - specsNode1 := newSpecs("A", noneFlag, "B", noneFlag, "C", noneFlag, "D", noneFlag, "E", noneFlag) - specsNode2 := newSpecs("A", noneFlag, "B", noneFlag, "C", noneFlag, "D", noneFlag, "E", noneFlag) - specsNode3 := newSpecs("A", noneFlag, "B", noneFlag, "C", noneFlag, "D", noneFlag, "E", noneFlag) - - specsNode1.TrimForParallelization(3, 1) - specsNode2.TrimForParallelization(3, 2) - specsNode3.TrimForParallelization(3, 3) - - Ω(willRunTexts(specsNode1)).Should(Equal([]string{"A", "B"})) - Ω(willRunTexts(specsNode2)).Should(Equal([]string{"C", "D"})) - Ω(willRunTexts(specsNode3)).Should(Equal([]string{"E"})) - - Ω(specsNode1.Specs()).Should(HaveLen(2)) - Ω(specsNode2.Specs()).Should(HaveLen(2)) - Ω(specsNode3.Specs()).Should(HaveLen(1)) - - Ω(specsNode1.NumberOfOriginalSpecs()).Should(Equal(5)) - Ω(specsNode2.NumberOfOriginalSpecs()).Should(Equal(5)) - Ω(specsNode3.NumberOfOriginalSpecs()).Should(Equal(5)) - }) - - Context("when way too many nodes are used", func() { - It("should return 0 specs", func() { - specsNode1 := newSpecs("A", noneFlag, "B", noneFlag) - specsNode2 := newSpecs("A", noneFlag, "B", noneFlag) - specsNode3 := newSpecs("A", noneFlag, "B", noneFlag) - - specsNode1.TrimForParallelization(3, 1) - specsNode2.TrimForParallelization(3, 2) - specsNode3.TrimForParallelization(3, 3) - - Ω(willRunTexts(specsNode1)).Should(Equal([]string{"A"})) - Ω(willRunTexts(specsNode2)).Should(Equal([]string{"B"})) - Ω(willRunTexts(specsNode3)).Should(BeEmpty()) - - Ω(specsNode1.Specs()).Should(HaveLen(1)) - Ω(specsNode2.Specs()).Should(HaveLen(1)) - Ω(specsNode3.Specs()).Should(HaveLen(0)) - - Ω(specsNode1.NumberOfOriginalSpecs()).Should(Equal(2)) - Ω(specsNode2.NumberOfOriginalSpecs()).Should(Equal(2)) - Ω(specsNode3.NumberOfOriginalSpecs()).Should(Equal(2)) - }) - }) - - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/kit/github.com/onsi/ginkgo/internal/specrunner/random_id.go deleted file mode 100644 index a0b8b62..0000000 --- a/kit/github.com/onsi/ginkgo/internal/specrunner/random_id.go +++ /dev/null @@ -1,15 +0,0 @@ -package specrunner - -import ( - "crypto/rand" - "fmt" -) - -func randomID() string { - b := make([]byte, 8) - _, err := rand.Read(b) - if err != nil { - return "" - } - return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8]) -} diff --git a/kit/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/kit/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go deleted file mode 100644 index 68a67cf..0000000 --- a/kit/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go +++ /dev/null @@ -1,286 +0,0 @@ -package specrunner - -import ( - "fmt" - "os" - "os/signal" - "sync" - "syscall" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/spec" - Writer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/writer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - - "time" -) - -type SpecRunner struct { - description string - beforeSuiteNode leafnodes.SuiteNode - specs *spec.Specs - afterSuiteNode leafnodes.SuiteNode - reporters []reporters.Reporter - startTime time.Time - suiteID string - runningSpec *spec.Spec - writer Writer.WriterInterface - config config.GinkgoConfigType - interrupted bool - lock *sync.Mutex -} - -func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { - return &SpecRunner{ - description: description, - beforeSuiteNode: beforeSuiteNode, - specs: specs, - afterSuiteNode: afterSuiteNode, - reporters: reporters, - writer: writer, - config: config, - suiteID: randomID(), - lock: &sync.Mutex{}, - } -} - -func (runner *SpecRunner) Run() bool { - runner.reportSuiteWillBegin() - go runner.registerForInterrupts() - - suitePassed := runner.runBeforeSuite() - - if suitePassed { - suitePassed = runner.runSpecs() - } - - runner.blockForeverIfInterrupted() - - suitePassed = runner.runAfterSuite() && suitePassed - - runner.reportSuiteDidEnd(suitePassed) - - return suitePassed -} - -func (runner *SpecRunner) runBeforeSuite() bool { - if runner.beforeSuiteNode == nil || runner.wasInterrupted() { - return true - } - - runner.writer.Truncate() - conf := runner.config - passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) - if !passed { - runner.writer.DumpOut() - } - runner.reportBeforeSuite(runner.beforeSuiteNode.Summary()) - return passed -} - -func (runner *SpecRunner) runAfterSuite() bool { - if runner.afterSuiteNode == nil { - return true - } - - runner.writer.Truncate() - conf := runner.config - passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) - if !passed { - runner.writer.DumpOut() - } - runner.reportAfterSuite(runner.afterSuiteNode.Summary()) - return passed -} - -func (runner *SpecRunner) runSpecs() bool { - suiteFailed := false - skipRemainingSpecs := false - for _, spec := range runner.specs.Specs() { - if runner.wasInterrupted() { - return suiteFailed - } - if skipRemainingSpecs { - spec.Skip() - } - runner.reportSpecWillRun(spec) - - if !spec.Skipped() && !spec.Pending() { - runner.runningSpec = spec - spec.Run() - runner.runningSpec = nil - if spec.Failed() { - suiteFailed = true - } - } else if spec.Pending() && runner.config.FailOnPending { - suiteFailed = true - } - - runner.reportSpecDidComplete(spec) - - if spec.Failed() && runner.config.FailFast { - skipRemainingSpecs = true - } - } - - return !suiteFailed -} - -func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) { - if runner.runningSpec == nil { - return nil, false - } - - return runner.runningSpec.Summary(runner.suiteID), true -} - -func (runner *SpecRunner) registerForInterrupts() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - - <-c - signal.Stop(c) - runner.markInterrupted() - go runner.registerForHardInterrupts() - if runner.afterSuiteNode != nil { - fmt.Fprintln(os.Stderr, "\nReceived interrupt. Running AfterSuite...\n^C again to terminate immediately") - runner.runAfterSuite() - } - runner.reportSuiteDidEnd(false) - os.Exit(1) -} - -func (runner *SpecRunner) registerForHardInterrupts() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - - <-c - fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.") - os.Exit(1) -} - -func (runner *SpecRunner) blockForeverIfInterrupted() { - runner.lock.Lock() - interrupted := runner.interrupted - runner.lock.Unlock() - - if interrupted { - select {} - } -} - -func (runner *SpecRunner) markInterrupted() { - runner.lock.Lock() - defer runner.lock.Unlock() - runner.interrupted = true -} - -func (runner *SpecRunner) wasInterrupted() bool { - runner.lock.Lock() - defer runner.lock.Unlock() - return runner.interrupted -} - -func (runner *SpecRunner) reportSuiteWillBegin() { - runner.startTime = time.Now() - summary := runner.summary(true) - for _, reporter := range runner.reporters { - reporter.SpecSuiteWillBegin(runner.config, summary) - } -} - -func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) { - for _, reporter := range runner.reporters { - reporter.BeforeSuiteDidRun(summary) - } -} - -func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) { - for _, reporter := range runner.reporters { - reporter.AfterSuiteDidRun(summary) - } -} - -func (runner *SpecRunner) reportSpecWillRun(spec *spec.Spec) { - runner.writer.Truncate() - - summary := spec.Summary(runner.suiteID) - for _, reporter := range runner.reporters { - reporter.SpecWillRun(summary) - } -} - -func (runner *SpecRunner) reportSpecDidComplete(spec *spec.Spec) { - summary := spec.Summary(runner.suiteID) - for i := len(runner.reporters) - 1; i >= 1; i-- { - runner.reporters[i].SpecDidComplete(summary) - } - - if spec.Failed() { - runner.writer.DumpOut() - } - - runner.reporters[0].SpecDidComplete(summary) -} - -func (runner *SpecRunner) reportSuiteDidEnd(success bool) { - summary := runner.summary(success) - summary.RunTime = time.Since(runner.startTime) - for _, reporter := range runner.reporters { - reporter.SpecSuiteDidEnd(summary) - } -} - -func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) { - count = 0 - - for _, spec := range runner.specs.Specs() { - if filter(spec) { - count++ - } - } - - return count -} - -func (runner *SpecRunner) summary(success bool) *types.SuiteSummary { - numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { - return !ex.Skipped() && !ex.Pending() - }) - - numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { - return ex.Pending() - }) - - numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { - return ex.Skipped() - }) - - numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { - return ex.Passed() - }) - - numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { - return ex.Failed() - }) - - if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() { - numberOfFailedSpecs = numberOfSpecsThatWillBeRun - } - - return &types.SuiteSummary{ - SuiteDescription: runner.description, - SuiteSucceeded: success, - SuiteID: runner.suiteID, - - NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(), - NumberOfTotalSpecs: len(runner.specs.Specs()), - NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun, - NumberOfPendingSpecs: numberOfPendingSpecs, - NumberOfSkippedSpecs: numberOfSkippedSpecs, - NumberOfPassedSpecs: numberOfPassedSpecs, - NumberOfFailedSpecs: numberOfFailedSpecs, - } -} diff --git a/kit/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go b/kit/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go deleted file mode 100644 index c3f63cc..0000000 --- a/kit/github.com/onsi/ginkgo/internal/specrunner/spec_runner_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package specrunner_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestSpecRunner(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Spec Runner Suite") -} diff --git a/kit/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go b/kit/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go deleted file mode 100644 index ff56269..0000000 --- a/kit/github.com/onsi/ginkgo/internal/specrunner/spec_runner_test.go +++ /dev/null @@ -1,561 +0,0 @@ -package specrunner_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/specrunner" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/containernode" - Failer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/spec" - Writer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/writer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters" -) - -var noneFlag = types.FlagTypeNone -var focusedFlag = types.FlagTypeFocused -var pendingFlag = types.FlagTypePending - -var _ = Describe("Spec Runner", func() { - var ( - reporter1 *reporters.FakeReporter - reporter2 *reporters.FakeReporter - failer *Failer.Failer - writer *Writer.FakeGinkgoWriter - - thingsThatRan []string - - runner *SpecRunner - ) - - newBefSuite := func(text string, fail bool) leafnodes.SuiteNode { - return leafnodes.NewBeforeSuiteNode(func() { - writer.AddEvent(text) - thingsThatRan = append(thingsThatRan, text) - if fail { - failer.Fail(text, codelocation.New(0)) - } - }, codelocation.New(0), 0, failer) - } - - newAftSuite := func(text string, fail bool) leafnodes.SuiteNode { - return leafnodes.NewAfterSuiteNode(func() { - writer.AddEvent(text) - thingsThatRan = append(thingsThatRan, text) - if fail { - failer.Fail(text, codelocation.New(0)) - } - }, codelocation.New(0), 0, failer) - } - - newSpec := func(text string, flag types.FlagType, fail bool) *spec.Spec { - subject := leafnodes.NewItNode(text, func() { - writer.AddEvent(text) - thingsThatRan = append(thingsThatRan, text) - if fail { - failer.Fail(text, codelocation.New(0)) - } - }, flag, codelocation.New(0), 0, failer, 0) - - return spec.New(subject, []*containernode.ContainerNode{}) - } - - newSpecWithBody := func(text string, body interface{}) *spec.Spec { - subject := leafnodes.NewItNode(text, body, noneFlag, codelocation.New(0), 0, failer, 0) - - return spec.New(subject, []*containernode.ContainerNode{}) - } - - newRunner := func(config config.GinkgoConfigType, beforeSuiteNode leafnodes.SuiteNode, afterSuiteNode leafnodes.SuiteNode, specs ...*spec.Spec) *SpecRunner { - return New("description", beforeSuiteNode, spec.NewSpecs(specs), afterSuiteNode, []reporters.Reporter{reporter1, reporter2}, writer, config) - } - - BeforeEach(func() { - reporter1 = reporters.NewFakeReporter() - reporter2 = reporters.NewFakeReporter() - writer = Writer.NewFake() - failer = Failer.New() - - thingsThatRan = []string{} - }) - - Describe("Running and Reporting", func() { - var specA, pendingSpec, anotherPendingSpec, failedSpec, specB, skippedSpec *spec.Spec - var willRunCalls, didCompleteCalls []string - - BeforeEach(func() { - willRunCalls = []string{} - didCompleteCalls = []string{} - specA = newSpec("spec A", noneFlag, false) - pendingSpec = newSpec("pending spec", pendingFlag, false) - anotherPendingSpec = newSpec("another pending spec", pendingFlag, false) - failedSpec = newSpec("failed spec", noneFlag, true) - specB = newSpec("spec B", noneFlag, false) - skippedSpec = newSpec("skipped spec", noneFlag, false) - skippedSpec.Skip() - - reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) { - willRunCalls = append(willRunCalls, "Reporter1") - } - reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) { - willRunCalls = append(willRunCalls, "Reporter2") - } - - reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) { - didCompleteCalls = append(didCompleteCalls, "Reporter1") - } - reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) { - didCompleteCalls = append(didCompleteCalls, "Reporter2") - } - - runner = newRunner(config.GinkgoConfigType{RandomSeed: 17}, newBefSuite("BefSuite", false), newAftSuite("AftSuite", false), specA, pendingSpec, anotherPendingSpec, failedSpec, specB, skippedSpec) - runner.Run() - }) - - It("should skip skipped/pending tests", func() { - Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "spec A", "failed spec", "spec B", "AftSuite"})) - }) - - It("should report to any attached reporters", func() { - Ω(reporter1.Config).Should(Equal(reporter2.Config)) - Ω(reporter1.BeginSummary).Should(Equal(reporter2.BeginSummary)) - Ω(reporter1.SpecWillRunSummaries).Should(Equal(reporter2.SpecWillRunSummaries)) - Ω(reporter1.SpecSummaries).Should(Equal(reporter2.SpecSummaries)) - Ω(reporter1.EndSummary).Should(Equal(reporter2.EndSummary)) - }) - - It("should report that a spec did end in reverse order", func() { - Ω(willRunCalls[0:4]).Should(Equal([]string{"Reporter1", "Reporter2", "Reporter1", "Reporter2"})) - Ω(didCompleteCalls[0:4]).Should(Equal([]string{"Reporter2", "Reporter1", "Reporter2", "Reporter1"})) - }) - - It("should report the passed in config", func() { - Ω(reporter1.Config.RandomSeed).Should(BeNumerically("==", 17)) - }) - - It("should report the beginning of the suite", func() { - Ω(reporter1.BeginSummary.SuiteDescription).Should(Equal("description")) - Ω(reporter1.BeginSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}")) - Ω(reporter1.BeginSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6)) - Ω(reporter1.BeginSummary.NumberOfTotalSpecs).Should(Equal(6)) - Ω(reporter1.BeginSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3)) - Ω(reporter1.BeginSummary.NumberOfPendingSpecs).Should(Equal(2)) - Ω(reporter1.BeginSummary.NumberOfSkippedSpecs).Should(Equal(1)) - }) - - It("should report the end of the suite", func() { - Ω(reporter1.EndSummary.SuiteDescription).Should(Equal("description")) - Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) - Ω(reporter1.EndSummary.SuiteID).Should(MatchRegexp("[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}")) - Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(6)) - Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(6)) - Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(3)) - Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(2)) - Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(1)) - Ω(reporter1.EndSummary.NumberOfPassedSpecs).Should(Equal(2)) - Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1)) - }) - }) - - Describe("reporting on specs", func() { - var proceed chan bool - var ready chan bool - var finished chan bool - BeforeEach(func() { - ready = make(chan bool) - proceed = make(chan bool) - finished = make(chan bool) - skippedSpec := newSpec("SKIP", noneFlag, false) - skippedSpec.Skip() - - runner = newRunner( - config.GinkgoConfigType{}, - newBefSuite("BefSuite", false), - newAftSuite("AftSuite", false), - skippedSpec, - newSpec("PENDING", pendingFlag, false), - newSpecWithBody("RUN", func() { - close(ready) - <-proceed - }), - ) - go func() { - runner.Run() - close(finished) - }() - }) - - It("should report about pending/skipped specs", func() { - <-ready - Ω(reporter1.SpecWillRunSummaries).Should(HaveLen(3)) - - Ω(reporter1.SpecWillRunSummaries[0].ComponentTexts[0]).Should(Equal("SKIP")) - Ω(reporter1.SpecWillRunSummaries[1].ComponentTexts[0]).Should(Equal("PENDING")) - Ω(reporter1.SpecWillRunSummaries[2].ComponentTexts[0]).Should(Equal("RUN")) - - Ω(reporter1.SpecSummaries[0].ComponentTexts[0]).Should(Equal("SKIP")) - Ω(reporter1.SpecSummaries[1].ComponentTexts[0]).Should(Equal("PENDING")) - Ω(reporter1.SpecSummaries).Should(HaveLen(2)) - - close(proceed) - <-finished - - Ω(reporter1.SpecSummaries).Should(HaveLen(3)) - Ω(reporter1.SpecSummaries[2].ComponentTexts[0]).Should(Equal("RUN")) - }) - }) - - Describe("Running BeforeSuite & AfterSuite", func() { - var success bool - var befSuite leafnodes.SuiteNode - var aftSuite leafnodes.SuiteNode - Context("with a nil BeforeSuite & AfterSuite", func() { - BeforeEach(func() { - runner = newRunner( - config.GinkgoConfigType{}, - nil, - nil, - newSpec("A", noneFlag, false), - newSpec("B", noneFlag, false), - ) - success = runner.Run() - }) - - It("should not report about the BeforeSuite", func() { - Ω(reporter1.BeforeSuiteSummary).Should(BeNil()) - }) - - It("should not report about the AfterSuite", func() { - Ω(reporter1.AfterSuiteSummary).Should(BeNil()) - }) - - It("should run the specs", func() { - Ω(thingsThatRan).Should(Equal([]string{"A", "B"})) - }) - }) - - Context("when the BeforeSuite & AfterSuite pass", func() { - BeforeEach(func() { - befSuite = newBefSuite("BefSuite", false) - aftSuite = newBefSuite("AftSuite", false) - runner = newRunner( - config.GinkgoConfigType{}, - befSuite, - aftSuite, - newSpec("A", noneFlag, false), - newSpec("B", noneFlag, false), - ) - success = runner.Run() - }) - - It("should run the BeforeSuite, the AfterSuite and the specs", func() { - Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "A", "B", "AftSuite"})) - }) - - It("should report about the BeforeSuite", func() { - Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary())) - }) - - It("should report about the AfterSuite", func() { - Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary())) - }) - - It("should report success", func() { - Ω(success).Should(BeTrue()) - Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue()) - Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0)) - }) - - It("should not dump the writer", func() { - Ω(writer.EventStream).ShouldNot(ContainElement("DUMP")) - }) - }) - - Context("when the BeforeSuite fails", func() { - BeforeEach(func() { - befSuite = newBefSuite("BefSuite", true) - aftSuite = newBefSuite("AftSuite", false) - - skipped := newSpec("Skipped", noneFlag, false) - skipped.Skip() - - runner = newRunner( - config.GinkgoConfigType{}, - befSuite, - aftSuite, - newSpec("A", noneFlag, false), - newSpec("B", noneFlag, false), - newSpec("Pending", pendingFlag, false), - skipped, - ) - success = runner.Run() - }) - - It("should not run the specs, but it should run the AfterSuite", func() { - Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "AftSuite"})) - }) - - It("should report about the BeforeSuite", func() { - Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary())) - }) - - It("should report about the AfterSuite", func() { - Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary())) - }) - - It("should report failure", func() { - Ω(success).Should(BeFalse()) - Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) - Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(2)) - Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(2)) - }) - - It("should dump the writer", func() { - Ω(writer.EventStream).Should(ContainElement("DUMP")) - }) - }) - - Context("when some other test fails", func() { - BeforeEach(func() { - aftSuite = newBefSuite("AftSuite", false) - - runner = newRunner( - config.GinkgoConfigType{}, - nil, - aftSuite, - newSpec("A", noneFlag, true), - ) - success = runner.Run() - }) - - It("should still run the AfterSuite", func() { - Ω(thingsThatRan).Should(Equal([]string{"A", "AftSuite"})) - }) - - It("should report about the AfterSuite", func() { - Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary())) - }) - - It("should report failure", func() { - Ω(success).Should(BeFalse()) - Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) - Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(1)) - Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(1)) - }) - }) - - Context("when the AfterSuite fails", func() { - BeforeEach(func() { - befSuite = newBefSuite("BefSuite", false) - aftSuite = newBefSuite("AftSuite", true) - runner = newRunner( - config.GinkgoConfigType{}, - befSuite, - aftSuite, - newSpec("A", noneFlag, false), - newSpec("B", noneFlag, false), - ) - success = runner.Run() - }) - - It("should run everything", func() { - Ω(thingsThatRan).Should(Equal([]string{"BefSuite", "A", "B", "AftSuite"})) - }) - - It("should report about the BeforeSuite", func() { - Ω(reporter1.BeforeSuiteSummary).Should(Equal(befSuite.Summary())) - }) - - It("should report about the AfterSuite", func() { - Ω(reporter1.AfterSuiteSummary).Should(Equal(aftSuite.Summary())) - }) - - It("should report failure", func() { - Ω(success).Should(BeFalse()) - Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) - Ω(reporter1.EndSummary.NumberOfFailedSpecs).Should(Equal(0)) - }) - - It("should dump the writer", func() { - Ω(writer.EventStream).Should(ContainElement("DUMP")) - }) - }) - }) - - Describe("When instructed to fail fast", func() { - BeforeEach(func() { - conf := config.GinkgoConfigType{ - FailFast: true, - } - runner = newRunner(conf, nil, newAftSuite("after-suite", false), newSpec("passing", noneFlag, false), newSpec("failing", noneFlag, true), newSpec("dont-see", noneFlag, true), newSpec("dont-see", noneFlag, true)) - }) - - It("should return false, report failure, and not run anything past the failing test", func() { - Ω(runner.Run()).Should(BeFalse()) - Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) - Ω(thingsThatRan).Should(Equal([]string{"passing", "failing", "after-suite"})) - }) - - It("should announce the subsequent specs as skipped", func() { - runner.Run() - Ω(reporter1.SpecSummaries).Should(HaveLen(4)) - Ω(reporter1.SpecSummaries[2].State).Should(Equal(types.SpecStateSkipped)) - Ω(reporter1.SpecSummaries[3].State).Should(Equal(types.SpecStateSkipped)) - }) - - It("should mark all subsequent specs as skipped", func() { - runner.Run() - Ω(reporter1.EndSummary.NumberOfSkippedSpecs).Should(Equal(2)) - }) - }) - - Describe("Marking failure and success", func() { - Context("when all tests pass", func() { - BeforeEach(func() { - runner = newRunner(config.GinkgoConfigType{}, nil, nil, newSpec("passing", noneFlag, false), newSpec("pending", pendingFlag, false)) - }) - - It("should return true and report success", func() { - Ω(runner.Run()).Should(BeTrue()) - Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeTrue()) - }) - }) - - Context("when a test fails", func() { - BeforeEach(func() { - runner = newRunner(config.GinkgoConfigType{}, nil, nil, newSpec("failing", noneFlag, true), newSpec("pending", pendingFlag, false)) - }) - - It("should return false and report failure", func() { - Ω(runner.Run()).Should(BeFalse()) - Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) - }) - }) - - Context("when there is a pending test, but pendings count as failures", func() { - BeforeEach(func() { - runner = newRunner(config.GinkgoConfigType{FailOnPending: true}, nil, nil, newSpec("passing", noneFlag, false), newSpec("pending", pendingFlag, false)) - }) - - It("should return false and report failure", func() { - Ω(runner.Run()).Should(BeFalse()) - Ω(reporter1.EndSummary.SuiteSucceeded).Should(BeFalse()) - }) - }) - }) - - Describe("Managing the writer", func() { - BeforeEach(func() { - runner = newRunner( - config.GinkgoConfigType{}, - nil, - nil, - newSpec("A", noneFlag, false), - newSpec("B", noneFlag, true), - newSpec("C", noneFlag, false), - ) - reporter1.SpecWillRunStub = func(specSummary *types.SpecSummary) { - writer.AddEvent("R1.WillRun") - } - reporter2.SpecWillRunStub = func(specSummary *types.SpecSummary) { - writer.AddEvent("R2.WillRun") - } - reporter1.SpecDidCompleteStub = func(specSummary *types.SpecSummary) { - writer.AddEvent("R1.DidComplete") - } - reporter2.SpecDidCompleteStub = func(specSummary *types.SpecSummary) { - writer.AddEvent("R2.DidComplete") - } - runner.Run() - }) - - It("should truncate between tests, but only dump if a test fails", func() { - Ω(writer.EventStream).Should(Equal([]string{ - "TRUNCATE", - "R1.WillRun", - "R2.WillRun", - "A", - "R2.DidComplete", - "R1.DidComplete", - "TRUNCATE", - "R1.WillRun", - "R2.WillRun", - "B", - "R2.DidComplete", - "DUMP", - "R1.DidComplete", - "TRUNCATE", - "R1.WillRun", - "R2.WillRun", - "C", - "R2.DidComplete", - "R1.DidComplete", - })) - }) - }) - - Describe("CurrentSpecSummary", func() { - It("should return the spec summary for the currently running spec", func() { - var summary *types.SpecSummary - runner = newRunner( - config.GinkgoConfigType{}, - nil, - nil, - newSpec("A", noneFlag, false), - newSpecWithBody("B", func() { - var ok bool - summary, ok = runner.CurrentSpecSummary() - Ω(ok).Should(BeTrue()) - }), - newSpec("C", noneFlag, false), - ) - runner.Run() - - Ω(summary.ComponentTexts).Should(Equal([]string{"B"})) - - summary, ok := runner.CurrentSpecSummary() - Ω(summary).Should(BeNil()) - Ω(ok).Should(BeFalse()) - }) - }) - - Context("When running tests in parallel", func() { - It("reports the correct number of specs before parallelization", func() { - specs := spec.NewSpecs([]*spec.Spec{ - newSpec("A", noneFlag, false), - newSpec("B", pendingFlag, false), - newSpec("C", noneFlag, false), - }) - specs.TrimForParallelization(2, 1) - runner = New("description", nil, specs, nil, []reporters.Reporter{reporter1, reporter2}, writer, config.GinkgoConfigType{}) - runner.Run() - - Ω(reporter1.EndSummary.NumberOfSpecsBeforeParallelization).Should(Equal(3)) - Ω(reporter1.EndSummary.NumberOfTotalSpecs).Should(Equal(2)) - Ω(reporter1.EndSummary.NumberOfSpecsThatWillBeRun).Should(Equal(1)) - Ω(reporter1.EndSummary.NumberOfPendingSpecs).Should(Equal(1)) - }) - }) - - Describe("generating a suite id", func() { - It("should generate an id randomly", func() { - runnerA := newRunner(config.GinkgoConfigType{}, nil, nil) - runnerA.Run() - IDA := reporter1.BeginSummary.SuiteID - - runnerB := newRunner(config.GinkgoConfigType{}, nil, nil) - runnerB.Run() - IDB := reporter1.BeginSummary.SuiteID - - IDRegexp := "[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}" - Ω(IDA).Should(MatchRegexp(IDRegexp)) - Ω(IDB).Should(MatchRegexp(IDRegexp)) - - Ω(IDA).ShouldNot(Equal(IDB)) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/suite/suite.go b/kit/github.com/onsi/ginkgo/internal/suite/suite.go deleted file mode 100644 index 4cb3375..0000000 --- a/kit/github.com/onsi/ginkgo/internal/suite/suite.go +++ /dev/null @@ -1,170 +0,0 @@ -package suite - -import ( - "math/rand" - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/containernode" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/leafnodes" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/spec" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/specrunner" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/writer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -type ginkgoTestingT interface { - Fail() -} - -type Suite struct { - topLevelContainer *containernode.ContainerNode - currentContainer *containernode.ContainerNode - containerIndex int - beforeSuiteNode leafnodes.SuiteNode - afterSuiteNode leafnodes.SuiteNode - runner *specrunner.SpecRunner - failer *failer.Failer - running bool -} - -func New(failer *failer.Failer) *Suite { - topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{}) - - return &Suite{ - topLevelContainer: topLevelContainer, - currentContainer: topLevelContainer, - failer: failer, - containerIndex: 1, - } -} - -func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) { - if config.ParallelTotal < 1 { - panic("ginkgo.parallel.total must be >= 1") - } - - if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 { - panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total") - } - - r := rand.New(rand.NewSource(config.RandomSeed)) - suite.topLevelContainer.Shuffle(r) - specs := suite.generateSpecs(description, config) - suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config) - - suite.running = true - success := suite.runner.Run() - if !success { - t.Fail() - } - return success, specs.HasProgrammaticFocus() -} - -func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs { - specsSlice := []*spec.Spec{} - for _, collatedNodes := range suite.topLevelContainer.Collate() { - specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers)) - } - - specs := spec.NewSpecs(specsSlice) - - if config.RandomizeAllSpecs { - specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) - } - - specs.ApplyFocus(description, config.FocusString, config.SkipString) - - if config.SkipMeasurements { - specs.SkipMeasurements() - } - - if config.ParallelTotal > 1 { - specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode) - } - - return specs -} - -func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { - return suite.runner.CurrentSpecSummary() -} - -func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.beforeSuiteNode != nil { - panic("You may only call BeforeSuite once!") - } - suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.afterSuiteNode != nil { - panic("You may only call AfterSuite once!") - } - suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.beforeSuiteNode != nil { - panic("You may only call BeforeSuite once!") - } - suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.afterSuiteNode != nil { - panic("You may only call AfterSuite once!") - } - suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) -} - -func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) { - container := containernode.New(text, flag, codeLocation) - suite.currentContainer.PushContainerNode(container) - - previousContainer := suite.currentContainer - suite.currentContainer = container - suite.containerIndex++ - - body() - - suite.containerIndex-- - suite.currentContainer = previousContainer -} - -func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call It from within another It/Measure/BeforeEach/JustBeforeEach/AfterEach", codeLocation) - } - suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) { - if suite.running { - suite.failer.Fail("You may only call Measure from within another It/Measure/BeforeEach/JustBeforeEach/AfterEach", codeLocation) - } - suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call BeforeEach from within another It/Measure/BeforeEach/JustBeforeEach/AfterEach", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call JustBeforeEach from within another It/Measure/BeforeEach/JustBeforeEach/AfterEach", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} - -func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { - if suite.running { - suite.failer.Fail("You may only call AfterEach from within another It/Measure/BeforeEach/JustBeforeEach/AfterEach", codeLocation) - } - suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) -} diff --git a/kit/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go b/kit/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go deleted file mode 100644 index bd1abbb..0000000 --- a/kit/github.com/onsi/ginkgo/internal/suite/suite_suite_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package suite_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func Test(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Suite") -} - -var numBeforeSuiteRuns = 0 -var numAfterSuiteRuns = 0 - -var _ = BeforeSuite(func() { - numBeforeSuiteRuns++ -}) - -var _ = AfterSuite(func() { - numAfterSuiteRuns++ - Ω(numBeforeSuiteRuns).Should(Equal(1)) - Ω(numAfterSuiteRuns).Should(Equal(1)) -}) - -//Fakes -type fakeTestingT struct { - didFail bool -} - -func (fakeT *fakeTestingT) Fail() { - fakeT.didFail = true -} diff --git a/kit/github.com/onsi/ginkgo/internal/suite/suite_test.go b/kit/github.com/onsi/ginkgo/internal/suite/suite_test.go deleted file mode 100644 index 934ddc8..0000000 --- a/kit/github.com/onsi/ginkgo/internal/suite/suite_test.go +++ /dev/null @@ -1,360 +0,0 @@ -package suite_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/suite" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "math/rand" - "time" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - Failer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/failer" - Writer "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/writer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -var _ = Describe("Suite", func() { - var ( - specSuite *Suite - fakeT *fakeTestingT - fakeR *reporters.FakeReporter - writer *Writer.FakeGinkgoWriter - failer *Failer.Failer - ) - - BeforeEach(func() { - writer = Writer.NewFake() - fakeT = &fakeTestingT{} - fakeR = reporters.NewFakeReporter() - failer = Failer.New() - specSuite = New(failer) - }) - - Describe("running a suite", func() { - var ( - runOrder []string - randomizeAllSpecs bool - randomSeed int64 - focusString string - parallelNode int - parallelTotal int - runResult bool - hasProgrammaticFocus bool - ) - - var f = func(runText string) func() { - return func() { - runOrder = append(runOrder, runText) - } - } - - BeforeEach(func() { - randomizeAllSpecs = false - randomSeed = 11 - parallelNode = 1 - parallelTotal = 1 - focusString = "" - - runOrder = make([]string, 0) - specSuite.SetBeforeSuiteNode(f("BeforeSuite"), codelocation.New(0), 0) - specSuite.PushBeforeEachNode(f("top BE"), codelocation.New(0), 0) - specSuite.PushJustBeforeEachNode(f("top JBE"), codelocation.New(0), 0) - specSuite.PushAfterEachNode(f("top AE"), codelocation.New(0), 0) - - specSuite.PushContainerNode("container", func() { - specSuite.PushBeforeEachNode(f("BE"), codelocation.New(0), 0) - specSuite.PushJustBeforeEachNode(f("JBE"), codelocation.New(0), 0) - specSuite.PushAfterEachNode(f("AE"), codelocation.New(0), 0) - specSuite.PushItNode("it", f("IT"), types.FlagTypeNone, codelocation.New(0), 0) - - specSuite.PushContainerNode("inner container", func() { - specSuite.PushItNode("inner it", f("inner IT"), types.FlagTypeNone, codelocation.New(0), 0) - }, types.FlagTypeNone, codelocation.New(0)) - }, types.FlagTypeNone, codelocation.New(0)) - - specSuite.PushContainerNode("container 2", func() { - specSuite.PushBeforeEachNode(f("BE 2"), codelocation.New(0), 0) - specSuite.PushItNode("it 2", f("IT 2"), types.FlagTypeNone, codelocation.New(0), 0) - }, types.FlagTypeNone, codelocation.New(0)) - - specSuite.PushItNode("top level it", f("top IT"), types.FlagTypeNone, codelocation.New(0), 0) - - specSuite.SetAfterSuiteNode(f("AfterSuite"), codelocation.New(0), 0) - }) - - JustBeforeEach(func() { - runResult, hasProgrammaticFocus = specSuite.Run(fakeT, "suite description", []reporters.Reporter{fakeR}, writer, config.GinkgoConfigType{ - RandomSeed: randomSeed, - RandomizeAllSpecs: randomizeAllSpecs, - FocusString: focusString, - ParallelNode: parallelNode, - ParallelTotal: parallelTotal, - }) - }) - - It("provides the config and suite description to the reporter", func() { - Ω(fakeR.Config.RandomSeed).Should(Equal(int64(randomSeed))) - Ω(fakeR.Config.RandomizeAllSpecs).Should(Equal(randomizeAllSpecs)) - Ω(fakeR.BeginSummary.SuiteDescription).Should(Equal("suite description")) - }) - - It("reports that the BeforeSuite node ran", func() { - Ω(fakeR.BeforeSuiteSummary).ShouldNot(BeNil()) - }) - - It("reports that the AfterSuite node ran", func() { - Ω(fakeR.AfterSuiteSummary).ShouldNot(BeNil()) - }) - - It("provides information about the current test", func() { - description := CurrentGinkgoTestDescription() - Ω(description.ComponentTexts).Should(Equal([]string{"Suite", "running a suite", "provides information about the current test"})) - Ω(description.FullTestText).Should(Equal("Suite running a suite provides information about the current test")) - Ω(description.TestText).Should(Equal("provides information about the current test")) - Ω(description.IsMeasurement).Should(BeFalse()) - Ω(description.FileName).Should(ContainSubstring("suite_test.go")) - Ω(description.LineNumber).Should(BeNumerically(">", 50)) - Ω(description.LineNumber).Should(BeNumerically("<", 150)) - }) - - Measure("should run measurements", func(b Benchmarker) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - runtime := b.Time("sleeping", func() { - sleepTime := time.Duration(r.Float64() * 0.01 * float64(time.Second)) - time.Sleep(sleepTime) - }) - Ω(runtime.Seconds()).Should(BeNumerically("<=", 0.015)) - Ω(runtime.Seconds()).Should(BeNumerically(">=", 0)) - - randomValue := r.Float64() * 10.0 - b.RecordValue("random value", randomValue) - Ω(randomValue).Should(BeNumerically("<=", 10.0)) - Ω(randomValue).Should(BeNumerically(">=", 0.0)) - }, 10) - - It("creates a node hierarchy, converts it to a spec collection, and runs it", func() { - Ω(runOrder).Should(Equal([]string{ - "BeforeSuite", - "top BE", "BE", "top JBE", "JBE", "IT", "AE", "top AE", - "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE", - "top BE", "BE 2", "top JBE", "IT 2", "top AE", - "top BE", "top JBE", "top IT", "top AE", - "AfterSuite", - })) - }) - - Context("when told to randomize all specs", func() { - BeforeEach(func() { - randomizeAllSpecs = true - }) - - It("does", func() { - Ω(runOrder).Should(Equal([]string{ - "BeforeSuite", - "top BE", "top JBE", "top IT", "top AE", - "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE", - "top BE", "BE", "top JBE", "JBE", "IT", "AE", "top AE", - "top BE", "BE 2", "top JBE", "IT 2", "top AE", - "AfterSuite", - })) - }) - }) - - Describe("with ginkgo.parallel.total > 1", func() { - BeforeEach(func() { - parallelTotal = 2 - randomizeAllSpecs = true - }) - - Context("for one worker", func() { - BeforeEach(func() { - parallelNode = 1 - }) - - It("should run a subset of tests", func() { - Ω(runOrder).Should(Equal([]string{ - "BeforeSuite", - "top BE", "top JBE", "top IT", "top AE", - "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE", - "AfterSuite", - })) - }) - }) - - Context("for another worker", func() { - BeforeEach(func() { - parallelNode = 2 - }) - - It("should run a (different) subset of tests", func() { - Ω(runOrder).Should(Equal([]string{ - "BeforeSuite", - "top BE", "BE", "top JBE", "JBE", "IT", "AE", "top AE", - "top BE", "BE 2", "top JBE", "IT 2", "top AE", - "AfterSuite", - })) - }) - }) - }) - - Context("when provided with a filter", func() { - BeforeEach(func() { - focusString = `inner|\d` - }) - - It("converts the filter to a regular expression and uses it to filter the running specs", func() { - Ω(runOrder).Should(Equal([]string{ - "BeforeSuite", - "top BE", "BE", "top JBE", "JBE", "inner IT", "AE", "top AE", - "top BE", "BE 2", "top JBE", "IT 2", "top AE", - "AfterSuite", - })) - }) - - It("should not report a programmatic focus", func() { - Ω(hasProgrammaticFocus).Should(BeFalse()) - }) - }) - - Context("with a programatically focused spec", func() { - BeforeEach(func() { - specSuite.PushItNode("focused it", f("focused it"), types.FlagTypeFocused, codelocation.New(0), 0) - }) - - It("should only run the focused test", func() { - Ω(runOrder).Should(Equal([]string{ - "BeforeSuite", - "top BE", "top JBE", "focused it", "top AE", - "AfterSuite", - })) - }) - - It("should report a programmatic focus", func() { - Ω(hasProgrammaticFocus).Should(BeTrue()) - }) - }) - - Context("when the specs pass", func() { - It("doesn't report a failure", func() { - Ω(fakeT.didFail).Should(BeFalse()) - }) - - It("should return true", func() { - Ω(runResult).Should(BeTrue()) - }) - }) - - Context("when a spec fails", func() { - var location types.CodeLocation - BeforeEach(func() { - specSuite.PushItNode("top level it", func() { - location = codelocation.New(0) - failer.Fail("oops!", location) - }, types.FlagTypeNone, codelocation.New(0), 0) - }) - - It("should return false", func() { - Ω(runResult).Should(BeFalse()) - }) - - It("reports a failure", func() { - Ω(fakeT.didFail).Should(BeTrue()) - }) - - It("generates the correct failure data", func() { - Ω(fakeR.SpecSummaries[0].Failure.Message).Should(Equal("oops!")) - Ω(fakeR.SpecSummaries[0].Failure.Location).Should(Equal(location)) - }) - }) - - Context("when runnable nodes are nested within other runnable nodes", func() { - Context("when an It is nested", func() { - BeforeEach(func() { - specSuite.PushItNode("top level it", func() { - specSuite.PushItNode("nested it", f("oops"), types.FlagTypeNone, codelocation.New(0), 0) - }, types.FlagTypeNone, codelocation.New(0), 0) - }) - - It("should fail", func() { - Ω(fakeT.didFail).Should(BeTrue()) - }) - }) - - Context("when a Measure is nested", func() { - BeforeEach(func() { - specSuite.PushItNode("top level it", func() { - specSuite.PushMeasureNode("nested measure", func(Benchmarker) {}, types.FlagTypeNone, codelocation.New(0), 10) - }, types.FlagTypeNone, codelocation.New(0), 0) - }) - - It("should fail", func() { - Ω(fakeT.didFail).Should(BeTrue()) - }) - }) - - Context("when a BeforeEach is nested", func() { - BeforeEach(func() { - specSuite.PushItNode("top level it", func() { - specSuite.PushBeforeEachNode(f("nested bef"), codelocation.New(0), 0) - }, types.FlagTypeNone, codelocation.New(0), 0) - }) - - It("should fail", func() { - Ω(fakeT.didFail).Should(BeTrue()) - }) - }) - - Context("when a JustBeforeEach is nested", func() { - BeforeEach(func() { - specSuite.PushItNode("top level it", func() { - specSuite.PushJustBeforeEachNode(f("nested jbef"), codelocation.New(0), 0) - }, types.FlagTypeNone, codelocation.New(0), 0) - }) - - It("should fail", func() { - Ω(fakeT.didFail).Should(BeTrue()) - }) - }) - - Context("when a AfterEach is nested", func() { - BeforeEach(func() { - specSuite.PushItNode("top level it", func() { - specSuite.PushAfterEachNode(f("nested aft"), codelocation.New(0), 0) - }, types.FlagTypeNone, codelocation.New(0), 0) - }) - - It("should fail", func() { - Ω(fakeT.didFail).Should(BeTrue()) - }) - }) - }) - }) - - Describe("BeforeSuite", func() { - Context("when setting BeforeSuite more than once", func() { - It("should panic", func() { - specSuite.SetBeforeSuiteNode(func() {}, codelocation.New(0), 0) - - Ω(func() { - specSuite.SetBeforeSuiteNode(func() {}, codelocation.New(0), 0) - }).Should(Panic()) - - }) - }) - }) - - Describe("AfterSuite", func() { - Context("when setting AfterSuite more than once", func() { - It("should panic", func() { - specSuite.SetAfterSuiteNode(func() {}, codelocation.New(0), 0) - - Ω(func() { - specSuite.SetAfterSuiteNode(func() {}, codelocation.New(0), 0) - }).Should(Panic()) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/kit/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go deleted file mode 100644 index a2b9af8..0000000 --- a/kit/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go +++ /dev/null @@ -1,76 +0,0 @@ -package testingtproxy - -import ( - "fmt" - "io" -) - -type failFunc func(message string, callerSkip ...int) - -func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy { - return &ginkgoTestingTProxy{ - fail: fail, - offset: offset, - writer: writer, - } -} - -type ginkgoTestingTProxy struct { - fail failFunc - offset int - writer io.Writer -} - -func (t *ginkgoTestingTProxy) Error(args ...interface{}) { - t.fail(fmt.Sprintln(args...), t.offset) -} - -func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) { - t.fail(fmt.Sprintf(format, args...), t.offset) -} - -func (t *ginkgoTestingTProxy) Fail() { - t.fail("failed", t.offset) -} - -func (t *ginkgoTestingTProxy) FailNow() { - t.fail("failed", t.offset) -} - -func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { - t.fail(fmt.Sprintln(args...), t.offset) -} - -func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { - t.fail(fmt.Sprintf(format, args...), t.offset) -} - -func (t *ginkgoTestingTProxy) Log(args ...interface{}) { - fmt.Fprintln(t.writer, args...) -} - -func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { - fmt.Fprintf(t.writer, format, args...) -} - -func (t *ginkgoTestingTProxy) Failed() bool { - return false -} - -func (t *ginkgoTestingTProxy) Parallel() { -} - -func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { - fmt.Println(args...) -} - -func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { - fmt.Printf(format, args...) -} - -func (t *ginkgoTestingTProxy) SkipNow() { -} - -func (t *ginkgoTestingTProxy) Skipped() bool { - return false -} diff --git a/kit/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/kit/github.com/onsi/ginkgo/internal/writer/fake_writer.go deleted file mode 100644 index 56e8d92..0000000 --- a/kit/github.com/onsi/ginkgo/internal/writer/fake_writer.go +++ /dev/null @@ -1,23 +0,0 @@ -package writer - -type FakeGinkgoWriter struct { - EventStream []string -} - -func NewFake() *FakeGinkgoWriter { - return &FakeGinkgoWriter{ - EventStream: []string{}, - } -} - -func (writer *FakeGinkgoWriter) AddEvent(event string) { - writer.EventStream = append(writer.EventStream, event) -} - -func (writer *FakeGinkgoWriter) Truncate() { - writer.EventStream = append(writer.EventStream, "TRUNCATE") -} - -func (writer *FakeGinkgoWriter) DumpOut() { - writer.EventStream = append(writer.EventStream, "DUMP") -} diff --git a/kit/github.com/onsi/ginkgo/internal/writer/writer.go b/kit/github.com/onsi/ginkgo/internal/writer/writer.go deleted file mode 100644 index 72fdb33..0000000 --- a/kit/github.com/onsi/ginkgo/internal/writer/writer.go +++ /dev/null @@ -1,59 +0,0 @@ -package writer - -import ( - "bytes" - "io" - "sync" -) - -type WriterInterface interface { - Truncate() - DumpOut() -} - -type Writer struct { - buffer *bytes.Buffer - outWriter io.Writer - lock *sync.Mutex - stream bool -} - -func New(outWriter io.Writer) *Writer { - return &Writer{ - buffer: &bytes.Buffer{}, - lock: &sync.Mutex{}, - outWriter: outWriter, - stream: true, - } -} - -func (w *Writer) SetStream(stream bool) { - w.lock.Lock() - defer w.lock.Unlock() - w.stream = stream -} - -func (w *Writer) Write(b []byte) (n int, err error) { - w.lock.Lock() - defer w.lock.Unlock() - - if w.stream { - return w.outWriter.Write(b) - } else { - return w.buffer.Write(b) - } -} - -func (w *Writer) Truncate() { - w.lock.Lock() - defer w.lock.Unlock() - w.buffer.Reset() -} - -func (w *Writer) DumpOut() { - w.lock.Lock() - defer w.lock.Unlock() - if !w.stream { - w.buffer.WriteTo(w.outWriter) - } -} diff --git a/kit/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go b/kit/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go deleted file mode 100644 index 02899b5..0000000 --- a/kit/github.com/onsi/ginkgo/internal/writer/writer_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package writer_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestWriter(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Writer Suite") -} diff --git a/kit/github.com/onsi/ginkgo/internal/writer/writer_test.go b/kit/github.com/onsi/ginkgo/internal/writer/writer_test.go deleted file mode 100644 index c9c4246..0000000 --- a/kit/github.com/onsi/ginkgo/internal/writer/writer_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package writer_test - -import ( - "bytes" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/writer" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("Writer", func() { - var writer *Writer - var out *bytes.Buffer - - BeforeEach(func() { - out = &bytes.Buffer{} - writer = New(out) - }) - - It("should stream directly to the outbuffer by default", func() { - writer.Write([]byte("foo")) - Ω(out.String()).Should(Equal("foo")) - }) - - Context("when told not to stream", func() { - BeforeEach(func() { - writer.SetStream(false) - }) - - It("should only write to the buffer when told to DumpOut", func() { - writer.Write([]byte("foo")) - Ω(out.String()).Should(BeEmpty()) - writer.DumpOut() - Ω(out.String()).Should(Equal("foo")) - }) - - It("should truncate the internal buffer when told to truncate", func() { - writer.Write([]byte("foo")) - writer.Truncate() - writer.DumpOut() - Ω(out.String()).Should(BeEmpty()) - - writer.Write([]byte("foo")) - writer.DumpOut() - Ω(out.String()).Should(Equal("foo")) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/reporters/default_reporter.go b/kit/github.com/onsi/ginkgo/reporters/default_reporter.go deleted file mode 100644 index 00d1146..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/default_reporter.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Ginkgo's Default Reporter - -A number of command line flags are available to tweak Ginkgo's default output. - -These are documented [here](http://onsi.github.io/ginkgo/#running_tests) -*/ -package reporters - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters/stenographer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -type DefaultReporter struct { - config config.DefaultReporterConfigType - stenographer stenographer.Stenographer - specSummaries []*types.SpecSummary -} - -func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter { - return &DefaultReporter{ - config: config, - stenographer: stenographer, - } -} - -func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct) - if config.ParallelTotal > 1 { - reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct) - } - reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) -} - -func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) - } -} - -func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) - } -} - -func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) { - if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { - reporter.stenographer.AnnounceSpecWillRun(specSummary) - } -} - -func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) { - switch specSummary.State { - case types.SpecStatePassed: - if specSummary.IsMeasurement { - reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct) - } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold { - reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct) - } else { - reporter.stenographer.AnnounceSuccesfulSpec(specSummary) - } - case types.SpecStatePending: - reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) - case types.SpecStateSkipped: - reporter.stenographer.AnnounceSkippedSpec(specSummary) - case types.SpecStateTimedOut: - reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace) - case types.SpecStatePanicked: - reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace) - case types.SpecStateFailed: - reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace) - } - - reporter.specSummaries = append(reporter.specSummaries, specSummary) -} - -func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - reporter.stenographer.SummarizeFailures(reporter.specSummaries) - reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct) -} diff --git a/kit/github.com/onsi/ginkgo/reporters/default_reporter_test.go b/kit/github.com/onsi/ginkgo/reporters/default_reporter_test.go deleted file mode 100644 index 48f2374..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/default_reporter_test.go +++ /dev/null @@ -1,396 +0,0 @@ -package reporters_test - -import ( - "time" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters" - st "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters/stenographer" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("DefaultReporter", func() { - var ( - reporter *reporters.DefaultReporter - reporterConfig config.DefaultReporterConfigType - stenographer *st.FakeStenographer - - ginkgoConfig config.GinkgoConfigType - suite *types.SuiteSummary - spec *types.SpecSummary - ) - - BeforeEach(func() { - stenographer = st.NewFakeStenographer() - reporterConfig = config.DefaultReporterConfigType{ - NoColor: false, - SlowSpecThreshold: 0.1, - NoisyPendings: true, - Verbose: true, - FullTrace: true, - } - - reporter = reporters.NewDefaultReporter(reporterConfig, stenographer) - }) - - call := func(method string, args ...interface{}) st.FakeStenographerCall { - return st.NewFakeStenographerCall(method, args...) - } - - Describe("SpecSuiteWillBegin", func() { - BeforeEach(func() { - suite = &types.SuiteSummary{ - SuiteDescription: "A Sweet Suite", - NumberOfTotalSpecs: 10, - NumberOfSpecsThatWillBeRun: 8, - } - - ginkgoConfig = config.GinkgoConfigType{ - RandomSeed: 1138, - RandomizeAllSpecs: true, - } - }) - - Context("when a serial (non-parallel) suite begins", func() { - BeforeEach(func() { - ginkgoConfig.ParallelTotal = 1 - - reporter.SpecSuiteWillBegin(ginkgoConfig, suite) - }) - - It("should announce the suite, then announce the number of specs", func() { - Ω(stenographer.Calls()).Should(HaveLen(2)) - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", "A Sweet Suite", ginkgoConfig.RandomSeed, true, false))) - Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceNumberOfSpecs", 8, 10, false))) - }) - }) - - Context("when a parallel suite begins", func() { - BeforeEach(func() { - ginkgoConfig.ParallelTotal = 2 - ginkgoConfig.ParallelNode = 1 - suite.NumberOfSpecsBeforeParallelization = 20 - - reporter.SpecSuiteWillBegin(ginkgoConfig, suite) - }) - - It("should announce the suite, announce that it's a parallel run, then announce the number of specs", func() { - Ω(stenographer.Calls()).Should(HaveLen(3)) - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuite", "A Sweet Suite", ginkgoConfig.RandomSeed, true, false))) - Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceParallelRun", 1, 2, 10, 20, false))) - Ω(stenographer.Calls()[2]).Should(Equal(call("AnnounceNumberOfSpecs", 8, 10, false))) - }) - }) - }) - - Describe("BeforeSuiteDidRun", func() { - Context("when the BeforeSuite passes", func() { - It("should announce nothing", func() { - reporter.BeforeSuiteDidRun(&types.SetupSummary{ - State: types.SpecStatePassed, - }) - - Ω(stenographer.Calls()).Should(BeEmpty()) - }) - }) - - Context("when the BeforeSuite fails", func() { - It("should announce the failure", func() { - summary := &types.SetupSummary{ - State: types.SpecStateFailed, - } - reporter.BeforeSuiteDidRun(summary) - - Ω(stenographer.Calls()).Should(HaveLen(1)) - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceBeforeSuiteFailure", summary, false, true))) - }) - }) - }) - - Describe("AfterSuiteDidRun", func() { - Context("when the AfterSuite passes", func() { - It("should announce nothing", func() { - reporter.AfterSuiteDidRun(&types.SetupSummary{ - State: types.SpecStatePassed, - }) - - Ω(stenographer.Calls()).Should(BeEmpty()) - }) - }) - - Context("when the AfterSuite fails", func() { - It("should announce the failure", func() { - summary := &types.SetupSummary{ - State: types.SpecStateFailed, - } - reporter.AfterSuiteDidRun(summary) - - Ω(stenographer.Calls()).Should(HaveLen(1)) - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceAfterSuiteFailure", summary, false, true))) - }) - }) - }) - - Describe("SpecWillRun", func() { - Context("When running in verbose mode", func() { - Context("and the spec will run", func() { - BeforeEach(func() { - spec = &types.SpecSummary{} - reporter.SpecWillRun(spec) - }) - - It("should announce that the spec will run", func() { - Ω(stenographer.Calls()).Should(HaveLen(1)) - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecWillRun", spec))) - }) - }) - - Context("and the spec will not run", func() { - Context("because it is pending", func() { - BeforeEach(func() { - spec = &types.SpecSummary{ - State: types.SpecStatePending, - } - reporter.SpecWillRun(spec) - }) - - It("should announce nothing", func() { - Ω(stenographer.Calls()).Should(BeEmpty()) - }) - }) - - Context("because it is skipped", func() { - BeforeEach(func() { - spec = &types.SpecSummary{ - State: types.SpecStateSkipped, - } - reporter.SpecWillRun(spec) - }) - - It("should announce nothing", func() { - Ω(stenographer.Calls()).Should(BeEmpty()) - }) - }) - }) - }) - - Context("When running in verbose & succinct mode", func() { - BeforeEach(func() { - reporterConfig.Succinct = true - reporter = reporters.NewDefaultReporter(reporterConfig, stenographer) - spec = &types.SpecSummary{} - reporter.SpecWillRun(spec) - }) - - It("should announce nothing", func() { - Ω(stenographer.Calls()).Should(BeEmpty()) - }) - }) - - Context("When not running in verbose mode", func() { - BeforeEach(func() { - reporterConfig.Verbose = false - reporter = reporters.NewDefaultReporter(reporterConfig, stenographer) - spec = &types.SpecSummary{} - reporter.SpecWillRun(spec) - }) - - It("should announce nothing", func() { - Ω(stenographer.Calls()).Should(BeEmpty()) - }) - }) - }) - - Describe("SpecDidComplete", func() { - JustBeforeEach(func() { - reporter.SpecDidComplete(spec) - }) - - BeforeEach(func() { - spec = &types.SpecSummary{} - }) - - Context("When the spec passed", func() { - BeforeEach(func() { - spec.State = types.SpecStatePassed - }) - - Context("When the spec was a measurement", func() { - BeforeEach(func() { - spec.IsMeasurement = true - }) - - It("should announce the measurement", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulMeasurement", spec, false))) - }) - }) - - Context("When the spec is slow", func() { - BeforeEach(func() { - spec.RunTime = time.Second - }) - - It("should announce that it was slow", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSlowSpec", spec, false))) - }) - }) - - Context("Otherwise", func() { - It("should announce the succesful spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSpec", spec))) - }) - }) - }) - - Context("When the spec is pending", func() { - BeforeEach(func() { - spec.State = types.SpecStatePending - }) - - It("should announce the pending spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, true))) - }) - }) - - Context("When the spec is skipped", func() { - BeforeEach(func() { - spec.State = types.SpecStateSkipped - }) - - It("should announce the skipped spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec))) - }) - }) - - Context("When the spec timed out", func() { - BeforeEach(func() { - spec.State = types.SpecStateTimedOut - }) - - It("should announce the timedout spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecTimedOut", spec, false, true))) - }) - }) - - Context("When the spec panicked", func() { - BeforeEach(func() { - spec.State = types.SpecStatePanicked - }) - - It("should announce the panicked spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecPanicked", spec, false, true))) - }) - }) - - Context("When the spec failed", func() { - BeforeEach(func() { - spec.State = types.SpecStateFailed - }) - - It("should announce the failed spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecFailed", spec, false, true))) - }) - }) - - Context("in succinct mode", func() { - BeforeEach(func() { - reporterConfig.Succinct = true - reporter = reporters.NewDefaultReporter(reporterConfig, stenographer) - }) - - Context("When the spec passed", func() { - BeforeEach(func() { - spec.State = types.SpecStatePassed - }) - - Context("When the spec was a measurement", func() { - BeforeEach(func() { - spec.IsMeasurement = true - }) - - It("should announce the measurement", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulMeasurement", spec, true))) - }) - }) - - Context("When the spec is slow", func() { - BeforeEach(func() { - spec.RunTime = time.Second - }) - - It("should announce that it was slow", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSlowSpec", spec, true))) - }) - }) - - Context("Otherwise", func() { - It("should announce the succesful spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSuccesfulSpec", spec))) - }) - }) - }) - - Context("When the spec is pending", func() { - BeforeEach(func() { - spec.State = types.SpecStatePending - }) - - It("should announce the pending spec, but never noisily", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, false))) - }) - }) - - Context("When the spec is skipped", func() { - BeforeEach(func() { - spec.State = types.SpecStateSkipped - }) - - It("should announce the skipped spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec))) - }) - }) - - Context("When the spec timed out", func() { - BeforeEach(func() { - spec.State = types.SpecStateTimedOut - }) - - It("should announce the timedout spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecTimedOut", spec, true, true))) - }) - }) - - Context("When the spec panicked", func() { - BeforeEach(func() { - spec.State = types.SpecStatePanicked - }) - - It("should announce the panicked spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecPanicked", spec, true, true))) - }) - }) - - Context("When the spec failed", func() { - BeforeEach(func() { - spec.State = types.SpecStateFailed - }) - - It("should announce the failed spec", func() { - Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSpecFailed", spec, true, true))) - }) - }) - }) - }) - - Describe("SpecSuiteDidEnd", func() { - BeforeEach(func() { - suite = &types.SuiteSummary{} - reporter.SpecSuiteDidEnd(suite) - }) - - It("should announce the spec run's completion", func() { - Ω(stenographer.Calls()[1]).Should(Equal(call("AnnounceSpecRunCompletion", suite, false))) - }) - }) -}) diff --git a/kit/github.com/onsi/ginkgo/reporters/fake_reporter.go b/kit/github.com/onsi/ginkgo/reporters/fake_reporter.go deleted file mode 100644 index 878f82f..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/fake_reporter.go +++ /dev/null @@ -1,59 +0,0 @@ -package reporters - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -//FakeReporter is useful for testing purposes -type FakeReporter struct { - Config config.GinkgoConfigType - - BeginSummary *types.SuiteSummary - BeforeSuiteSummary *types.SetupSummary - SpecWillRunSummaries []*types.SpecSummary - SpecSummaries []*types.SpecSummary - AfterSuiteSummary *types.SetupSummary - EndSummary *types.SuiteSummary - - SpecWillRunStub func(specSummary *types.SpecSummary) - SpecDidCompleteStub func(specSummary *types.SpecSummary) -} - -func NewFakeReporter() *FakeReporter { - return &FakeReporter{ - SpecWillRunSummaries: make([]*types.SpecSummary, 0), - SpecSummaries: make([]*types.SpecSummary, 0), - } -} - -func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - fakeR.Config = config - fakeR.BeginSummary = summary -} - -func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - fakeR.BeforeSuiteSummary = setupSummary -} - -func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) { - if fakeR.SpecWillRunStub != nil { - fakeR.SpecWillRunStub(specSummary) - } - fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary) -} - -func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) { - if fakeR.SpecDidCompleteStub != nil { - fakeR.SpecDidCompleteStub(specSummary) - } - fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary) -} - -func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - fakeR.AfterSuiteSummary = setupSummary -} - -func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - fakeR.EndSummary = summary -} diff --git a/kit/github.com/onsi/ginkgo/reporters/junit_reporter.go b/kit/github.com/onsi/ginkgo/reporters/junit_reporter.go deleted file mode 100644 index 1557d8f..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ /dev/null @@ -1,139 +0,0 @@ -/* - -JUnit XML Reporter for Ginkgo - -For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output - -*/ - -package reporters - -import ( - "encoding/xml" - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "os" - "strings" -) - -type JUnitTestSuite struct { - XMLName xml.Name `xml:"testsuite"` - TestCases []JUnitTestCase `xml:"testcase"` - Tests int `xml:"tests,attr"` - Failures int `xml:"failures,attr"` - Time float64 `xml:"time,attr"` -} - -type JUnitTestCase struct { - Name string `xml:"name,attr"` - ClassName string `xml:"classname,attr"` - FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` - Skipped *JUnitSkipped `xml:"skipped,omitempty"` - Time float64 `xml:"time,attr"` -} - -type JUnitFailureMessage struct { - Type string `xml:"type,attr"` - Message string `xml:",chardata"` -} - -type JUnitSkipped struct { - XMLName xml.Name `xml:"skipped"` -} - -type JUnitReporter struct { - suite JUnitTestSuite - filename string - testSuiteName string -} - -//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename. -func NewJUnitReporter(filename string) *JUnitReporter { - return &JUnitReporter{ - filename: filename, - } -} - -func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - reporter.suite = JUnitTestSuite{ - Tests: summary.NumberOfSpecsThatWillBeRun, - TestCases: []JUnitTestCase{}, - } - reporter.testSuiteName = summary.SuiteDescription -} - -func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) { -} - -func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("BeforeSuite", setupSummary) -} - -func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("AfterSuite", setupSummary) -} - -func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - testCase := JUnitTestCase{ - Name: name, - ClassName: reporter.testSuiteName, - } - - testCase.FailureMessage = &JUnitFailureMessage{ - Type: reporter.failureTypeForState(setupSummary.State), - Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message), - } - testCase.Time = setupSummary.RunTime.Seconds() - reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) - } -} - -func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { - testCase := JUnitTestCase{ - Name: strings.Join(specSummary.ComponentTexts[1:], " "), - ClassName: reporter.testSuiteName, - } - if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { - testCase.FailureMessage = &JUnitFailureMessage{ - Type: reporter.failureTypeForState(specSummary.State), - Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message), - } - } - if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { - testCase.Skipped = &JUnitSkipped{} - } - testCase.Time = specSummary.RunTime.Seconds() - reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) -} - -func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - reporter.suite.Time = summary.RunTime.Seconds() - reporter.suite.Failures = summary.NumberOfFailedSpecs - file, err := os.Create(reporter.filename) - if err != nil { - fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error()) - } - defer file.Close() - file.WriteString(xml.Header) - encoder := xml.NewEncoder(file) - encoder.Indent(" ", " ") - err = encoder.Encode(reporter.suite) - if err != nil { - fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error()) - } -} - -func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string { - switch state { - case types.SpecStateFailed: - return "Failure" - case types.SpecStateTimedOut: - return "Timeout" - case types.SpecStatePanicked: - return "Panic" - default: - return "" - } -} diff --git a/kit/github.com/onsi/ginkgo/reporters/junit_reporter_test.go b/kit/github.com/onsi/ginkgo/reporters/junit_reporter_test.go deleted file mode 100644 index a300946..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/junit_reporter_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package reporters_test - -import ( - "encoding/xml" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "io/ioutil" - "time" -) - -var _ = Describe("JUnit Reporter", func() { - var ( - outputFile string - reporter Reporter - ) - - readOutputFile := func() reporters.JUnitTestSuite { - bytes, err := ioutil.ReadFile(outputFile) - Ω(err).ShouldNot(HaveOccurred()) - var suite reporters.JUnitTestSuite - err = xml.Unmarshal(bytes, &suite) - Ω(err).ShouldNot(HaveOccurred()) - return suite - } - - BeforeEach(func() { - outputFile = "/tmp/test.xml" - reporter = reporters.NewJUnitReporter(outputFile) - - reporter.SpecSuiteWillBegin(config.GinkgoConfigType{}, &types.SuiteSummary{ - SuiteDescription: "My test suite", - NumberOfSpecsThatWillBeRun: 1, - }) - }) - - Describe("a passing test", func() { - BeforeEach(func() { - beforeSuite := &types.SetupSummary{ - State: types.SpecStatePassed, - } - reporter.BeforeSuiteDidRun(beforeSuite) - - afterSuite := &types.SetupSummary{ - State: types.SpecStatePassed, - } - reporter.AfterSuiteDidRun(afterSuite) - - spec := &types.SpecSummary{ - ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, - State: types.SpecStatePassed, - RunTime: 5 * time.Second, - } - reporter.SpecWillRun(spec) - reporter.SpecDidComplete(spec) - - reporter.SpecSuiteDidEnd(&types.SuiteSummary{ - NumberOfSpecsThatWillBeRun: 1, - NumberOfFailedSpecs: 0, - RunTime: 10 * time.Second, - }) - }) - - It("should record the test as passing", func() { - output := readOutputFile() - Ω(output.Tests).Should(Equal(1)) - Ω(output.Failures).Should(Equal(0)) - Ω(output.Time).Should(Equal(10.0)) - Ω(output.TestCases).Should(HaveLen(1)) - Ω(output.TestCases[0].Name).Should(Equal("A B C")) - Ω(output.TestCases[0].ClassName).Should(Equal("My test suite")) - Ω(output.TestCases[0].FailureMessage).Should(BeNil()) - Ω(output.TestCases[0].Skipped).Should(BeNil()) - Ω(output.TestCases[0].Time).Should(Equal(5.0)) - }) - }) - - Describe("when the BeforeSuite fails", func() { - var beforeSuite *types.SetupSummary - - BeforeEach(func() { - beforeSuite = &types.SetupSummary{ - State: types.SpecStateFailed, - RunTime: 3 * time.Second, - Failure: types.SpecFailure{ - Message: "failed to setup", - ComponentCodeLocation: codelocation.New(0), - }, - } - reporter.BeforeSuiteDidRun(beforeSuite) - - reporter.SpecSuiteDidEnd(&types.SuiteSummary{ - NumberOfSpecsThatWillBeRun: 1, - NumberOfFailedSpecs: 1, - RunTime: 10 * time.Second, - }) - }) - - It("should record the test as having failed", func() { - output := readOutputFile() - Ω(output.Tests).Should(Equal(1)) - Ω(output.Failures).Should(Equal(1)) - Ω(output.Time).Should(Equal(10.0)) - Ω(output.TestCases[0].Name).Should(Equal("BeforeSuite")) - Ω(output.TestCases[0].Time).Should(Equal(3.0)) - Ω(output.TestCases[0].ClassName).Should(Equal("My test suite")) - Ω(output.TestCases[0].FailureMessage.Type).Should(Equal("Failure")) - Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring("failed to setup")) - Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(beforeSuite.Failure.ComponentCodeLocation.String())) - Ω(output.TestCases[0].Skipped).Should(BeNil()) - }) - }) - - Describe("when the AfterSuite fails", func() { - var afterSuite *types.SetupSummary - - BeforeEach(func() { - afterSuite = &types.SetupSummary{ - State: types.SpecStateFailed, - RunTime: 3 * time.Second, - Failure: types.SpecFailure{ - Message: "failed to setup", - ComponentCodeLocation: codelocation.New(0), - }, - } - reporter.AfterSuiteDidRun(afterSuite) - - reporter.SpecSuiteDidEnd(&types.SuiteSummary{ - NumberOfSpecsThatWillBeRun: 1, - NumberOfFailedSpecs: 1, - RunTime: 10 * time.Second, - }) - }) - - It("should record the test as having failed", func() { - output := readOutputFile() - Ω(output.Tests).Should(Equal(1)) - Ω(output.Failures).Should(Equal(1)) - Ω(output.Time).Should(Equal(10.0)) - Ω(output.TestCases[0].Name).Should(Equal("AfterSuite")) - Ω(output.TestCases[0].Time).Should(Equal(3.0)) - Ω(output.TestCases[0].ClassName).Should(Equal("My test suite")) - Ω(output.TestCases[0].FailureMessage.Type).Should(Equal("Failure")) - Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring("failed to setup")) - Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(afterSuite.Failure.ComponentCodeLocation.String())) - Ω(output.TestCases[0].Skipped).Should(BeNil()) - }) - }) - - specStateCases := []struct { - state types.SpecState - message string - }{ - {types.SpecStateFailed, "Failure"}, - {types.SpecStateTimedOut, "Timeout"}, - {types.SpecStatePanicked, "Panic"}, - } - - for _, specStateCase := range specStateCases { - specStateCase := specStateCase - Describe("a failing test", func() { - var spec *types.SpecSummary - BeforeEach(func() { - spec = &types.SpecSummary{ - ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, - State: specStateCase.state, - RunTime: 5 * time.Second, - Failure: types.SpecFailure{ - ComponentCodeLocation: codelocation.New(0), - Message: "I failed", - }, - } - reporter.SpecWillRun(spec) - reporter.SpecDidComplete(spec) - - reporter.SpecSuiteDidEnd(&types.SuiteSummary{ - NumberOfSpecsThatWillBeRun: 1, - NumberOfFailedSpecs: 1, - RunTime: 10 * time.Second, - }) - }) - - It("should record test as failing", func() { - output := readOutputFile() - Ω(output.Tests).Should(Equal(1)) - Ω(output.Failures).Should(Equal(1)) - Ω(output.Time).Should(Equal(10.0)) - Ω(output.TestCases[0].Name).Should(Equal("A B C")) - Ω(output.TestCases[0].ClassName).Should(Equal("My test suite")) - Ω(output.TestCases[0].FailureMessage.Type).Should(Equal(specStateCase.message)) - Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring("I failed")) - Ω(output.TestCases[0].FailureMessage.Message).Should(ContainSubstring(spec.Failure.ComponentCodeLocation.String())) - Ω(output.TestCases[0].Skipped).Should(BeNil()) - }) - }) - } - - for _, specStateCase := range []types.SpecState{types.SpecStatePending, types.SpecStateSkipped} { - specStateCase := specStateCase - Describe("a skipped test", func() { - var spec *types.SpecSummary - BeforeEach(func() { - spec = &types.SpecSummary{ - ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, - State: specStateCase, - RunTime: 5 * time.Second, - } - reporter.SpecWillRun(spec) - reporter.SpecDidComplete(spec) - - reporter.SpecSuiteDidEnd(&types.SuiteSummary{ - NumberOfSpecsThatWillBeRun: 1, - NumberOfFailedSpecs: 0, - RunTime: 10 * time.Second, - }) - }) - - It("should record test as failing", func() { - output := readOutputFile() - Ω(output.Tests).Should(Equal(1)) - Ω(output.Failures).Should(Equal(0)) - Ω(output.Time).Should(Equal(10.0)) - Ω(output.TestCases[0].Name).Should(Equal("A B C")) - Ω(output.TestCases[0].Skipped).ShouldNot(BeNil()) - }) - }) - } -}) diff --git a/kit/github.com/onsi/ginkgo/reporters/reporter.go b/kit/github.com/onsi/ginkgo/reporters/reporter.go deleted file mode 100644 index 50a2728..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/reporter.go +++ /dev/null @@ -1,15 +0,0 @@ -package reporters - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -type Reporter interface { - SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) - BeforeSuiteDidRun(setupSummary *types.SetupSummary) - SpecWillRun(specSummary *types.SpecSummary) - SpecDidComplete(specSummary *types.SpecSummary) - AfterSuiteDidRun(setupSummary *types.SetupSummary) - SpecSuiteDidEnd(summary *types.SuiteSummary) -} diff --git a/kit/github.com/onsi/ginkgo/reporters/reporters_suite_test.go b/kit/github.com/onsi/ginkgo/reporters/reporters_suite_test.go deleted file mode 100644 index 693692f..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/reporters_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package reporters_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestReporters(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Reporters Suite") -} diff --git a/kit/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/kit/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go deleted file mode 100644 index ce5433a..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go +++ /dev/null @@ -1,64 +0,0 @@ -package stenographer - -import ( - "fmt" - "strings" -) - -func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string { - var out string - - if len(args) > 0 { - out = fmt.Sprintf(format, args...) - } else { - out = format - } - - if s.color { - return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle) - } else { - return out - } -} - -func (s *consoleStenographer) printBanner(text string, bannerCharacter string) { - fmt.Println(text) - fmt.Println(strings.Repeat(bannerCharacter, len(text))) -} - -func (s *consoleStenographer) printNewLine() { - fmt.Println("") -} - -func (s *consoleStenographer) printDelimiter() { - fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30))) -} - -func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) { - fmt.Print(s.indent(indentation, format, args...)) -} - -func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) { - fmt.Println(s.indent(indentation, format, args...)) -} - -func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string { - var text string - - if len(args) > 0 { - text = fmt.Sprintf(format, args...) - } else { - text = format - } - - stringArray := strings.Split(text, "\n") - padding := "" - if indentation >= 0 { - padding = strings.Repeat(" ", indentation) - } - for i, s := range stringArray { - stringArray[i] = fmt.Sprintf("%s%s", padding, s) - } - - return strings.Join(stringArray, "\n") -} diff --git a/kit/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/kit/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go deleted file mode 100644 index fb3990a..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go +++ /dev/null @@ -1,138 +0,0 @@ -package stenographer - -import ( - "sync" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall { - return FakeStenographerCall{ - Method: method, - Args: args, - } -} - -type FakeStenographer struct { - calls []FakeStenographerCall - lock *sync.Mutex -} - -type FakeStenographerCall struct { - Method string - Args []interface{} -} - -func NewFakeStenographer() *FakeStenographer { - stenographer := &FakeStenographer{ - lock: &sync.Mutex{}, - } - stenographer.Reset() - return stenographer -} - -func (stenographer *FakeStenographer) Calls() []FakeStenographerCall { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - return stenographer.calls -} - -func (stenographer *FakeStenographer) Reset() { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - stenographer.calls = make([]FakeStenographerCall, 0) -} - -func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - results := make([]FakeStenographerCall, 0) - for _, call := range stenographer.calls { - if call.Method == method { - results = append(results, call) - } - } - - return results -} - -func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...)) -} - -func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { - stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct) -} - -func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { - stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct) -} - -func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) { - stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct) -} - -func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { - stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct) -} - -func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { - stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct) -} - -func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { - stenographer.registerCall("AnnounceSpecWillRun", spec) -} - -func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace) -} -func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) { - stenographer.registerCall("AnnounceCapturedOutput", output) -} - -func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { - stenographer.registerCall("AnnounceSuccesfulSpec", spec) -} - -func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { - stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct) -} - -func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { - stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct) -} - -func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { - stenographer.registerCall("AnnouncePendingSpec", spec, noisy) -} - -func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary) { - stenographer.registerCall("AnnounceSkippedSpec", spec) -} - -func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) { - stenographer.registerCall("SummarizeFailures", summaries) -} diff --git a/kit/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/kit/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go deleted file mode 100644 index dd31c45..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go +++ /dev/null @@ -1,503 +0,0 @@ -/* -The stenographer is used by Ginkgo's reporters to generate output. - -Move along, nothing to see here. -*/ - -package stenographer - -import ( - "fmt" - "strings" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" -) - -const defaultStyle = "\x1b[0m" -const boldStyle = "\x1b[1m" -const redColor = "\x1b[91m" -const greenColor = "\x1b[32m" -const yellowColor = "\x1b[33m" -const cyanColor = "\x1b[36m" -const grayColor = "\x1b[90m" -const lightGrayColor = "\x1b[37m" - -type cursorStateType int - -const ( - cursorStateTop cursorStateType = iota - cursorStateStreaming - cursorStateMidBlock - cursorStateEndBlock -) - -type Stenographer interface { - AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) - AnnounceAggregatedParallelRun(nodes int, succinct bool) - AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) - AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) - AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) - - AnnounceSpecWillRun(spec *types.SpecSummary) - AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) - AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) - - AnnounceCapturedOutput(output string) - - AnnounceSuccesfulSpec(spec *types.SpecSummary) - AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) - AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) - - AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) - AnnounceSkippedSpec(spec *types.SpecSummary) - - AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) - AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) - AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) - - SummarizeFailures(summaries []*types.SpecSummary) -} - -func New(color bool) Stenographer { - return &consoleStenographer{ - color: color, - cursorState: cursorStateTop, - } -} - -type consoleStenographer struct { - color bool - cursorState cursorStateType -} - -var alternatingColors = []string{defaultStyle, grayColor} - -func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { - if succinct { - s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description)) - return - } - s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=") - s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed)) - if randomizingAll { - s.print(0, " - Will randomize all specs") - } - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) { - if succinct { - s.print(0, "- node #%d ", node) - return - } - s.println(0, - "Parallel test node %s/%s. Assigned %s of %s specs.", - s.colorize(boldStyle, "%d", node), - s.colorize(boldStyle, "%d", nodes), - s.colorize(boldStyle, "%d", specsToRun), - s.colorize(boldStyle, "%d", totalSpecs), - ) - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { - if succinct { - s.print(0, "- %d nodes ", nodes) - return - } - s.println(0, - "Running in parallel across %s nodes", - s.colorize(boldStyle, "%d", nodes), - ) - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { - if succinct { - s.print(0, "- %d/%d specs ", specsToRun, total) - s.stream() - return - } - s.println(0, - "Will run %s of %s specs", - s.colorize(boldStyle, "%d", specsToRun), - s.colorize(boldStyle, "%d", total), - ) - - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { - if succinct && summary.SuiteSucceeded { - s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime) - return - } - s.printNewLine() - color := greenColor - if !summary.SuiteSucceeded { - color = redColor - } - s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds())) - - status := "" - if summary.SuiteSucceeded { - status = s.colorize(boldStyle+greenColor, "SUCCESS!") - } else { - status = s.colorize(boldStyle+redColor, "FAIL!") - } - - s.print(0, - "%s -- %s | %s | %s | %s ", - status, - s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs), - s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs), - s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs), - s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs), - ) -} - -func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { - s.startBlock() - for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] { - s.print(0, s.colorize(alternatingColors[i%2], text)+" ") - } - - indentation := 0 - if len(spec.ComponentTexts) > 2 { - indentation = 1 - s.printNewLine() - } - index := len(spec.ComponentTexts) - 1 - s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index])) - s.printNewLine() - s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String())) - s.printNewLine() - s.midBlock() -} - -func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace) -} - -func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace) -} - -func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) { - s.startBlock() - var message string - switch summary.State { - case types.SpecStateFailed: - message = "Failure" - case types.SpecStatePanicked: - message = "Panic" - case types.SpecStateTimedOut: - message = "Timeout" - } - - s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds())) - - indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, true, true) - - s.printNewLine() - s.printFailure(indentation, summary.State, summary.Failure, fullTrace) - - s.endBlock() -} - -func (s *consoleStenographer) AnnounceCapturedOutput(output string) { - if output == "" { - return - } - - s.startBlock() - s.println(0, output) - s.midBlock() -} - -func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { - s.print(0, s.colorize(greenColor, "•")) - s.stream() -} - -func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { - s.printBlockWithMessage( - s.colorize(greenColor, "• [SLOW TEST:%.3f seconds]", spec.RunTime.Seconds()), - "", - spec, - succinct, - ) -} - -func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { - s.printBlockWithMessage( - s.colorize(greenColor, "• [MEASUREMENT]"), - s.measurementReport(spec, succinct), - spec, - succinct, - ) -} - -func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { - if noisy { - s.printBlockWithMessage( - s.colorize(yellowColor, "P [PENDING]"), - "", - spec, - false, - ) - } else { - s.print(0, s.colorize(yellowColor, "P")) - s.stream() - } -} - -func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary) { - s.print(0, s.colorize(cyanColor, "S")) - s.stream() -} - -func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure("•... Timeout", spec, succinct, fullTrace) -} - -func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure("•! Panic", spec, succinct, fullTrace) -} - -func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure("• Failure", spec, succinct, fullTrace) -} - -func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) { - failingSpecs := []*types.SpecSummary{} - - for _, summary := range summaries { - if summary.HasFailureState() { - failingSpecs = append(failingSpecs, summary) - } - } - - if len(failingSpecs) == 0 { - return - } - - s.printNewLine() - s.printNewLine() - plural := "s" - if len(failingSpecs) == 1 { - plural = "" - } - s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural)) - for _, summary := range failingSpecs { - s.printNewLine() - if summary.HasFailureState() { - if summary.TimedOut() { - s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] ")) - } else if summary.Panicked() { - s.print(0, s.colorize(redColor+boldStyle, "[Panic!] ")) - } else if summary.Failed() { - s.print(0, s.colorize(redColor+boldStyle, "[Fail] ")) - } - s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, true, true) - s.printNewLine() - s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String())) - } - } -} - -func (s *consoleStenographer) startBlock() { - if s.cursorState == cursorStateStreaming { - s.printNewLine() - s.printDelimiter() - } else if s.cursorState == cursorStateMidBlock { - s.printNewLine() - } -} - -func (s *consoleStenographer) midBlock() { - s.cursorState = cursorStateMidBlock -} - -func (s *consoleStenographer) endBlock() { - s.printDelimiter() - s.cursorState = cursorStateEndBlock -} - -func (s *consoleStenographer) stream() { - s.cursorState = cursorStateStreaming -} - -func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) { - s.startBlock() - s.println(0, header) - - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, false, succinct) - - if message != "" { - s.printNewLine() - s.println(indentation, message) - } - - s.endBlock() -} - -func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.startBlock() - s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, spec.RunTime.Seconds())) - - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, true, succinct) - - s.printNewLine() - s.printFailure(indentation, spec.State, spec.Failure, fullTrace) - s.endBlock() -} - -func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) { - if state == types.SpecStatePanicked { - s.println(indentation, s.colorize(redColor+boldStyle, failure.Message)) - s.println(indentation, s.colorize(redColor, "%v", failure.ForwardedPanic)) - s.println(indentation, failure.Location.String()) - s.printNewLine() - s.println(indentation, s.colorize(redColor, "Full Stack Trace")) - s.println(indentation, failure.Location.FullStackTrace) - } else { - s.println(indentation, s.colorize(redColor, failure.Message)) - s.printNewLine() - s.println(indentation, failure.Location.String()) - if fullTrace { - s.printNewLine() - s.println(indentation, s.colorize(redColor, "Full Stack Trace")) - s.println(indentation, failure.Location.FullStackTrace) - } - } -} - -func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, failure bool, succinct bool) int { - startIndex := 1 - indentation := 0 - - if len(componentTexts) == 1 { - startIndex = 0 - } - - for i := startIndex; i < len(componentTexts); i++ { - if failure && i == failedComponentIndex { - blockType := "" - switch failedComponentType { - case types.SpecComponentTypeBeforeSuite: - blockType = "BeforeSuite" - case types.SpecComponentTypeAfterSuite: - blockType = "AfterSuite" - case types.SpecComponentTypeBeforeEach: - blockType = "BeforeEach" - case types.SpecComponentTypeJustBeforeEach: - blockType = "JustBeforeEach" - case types.SpecComponentTypeAfterEach: - blockType = "AfterEach" - case types.SpecComponentTypeIt: - blockType = "It" - case types.SpecComponentTypeMeasure: - blockType = "Measurement" - } - if succinct { - s.print(0, s.colorize(redColor+boldStyle, "[%s] %s ", blockType, componentTexts[i])) - } else { - s.println(indentation, s.colorize(redColor+boldStyle, "%s [%s]", componentTexts[i], blockType)) - s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) - } - } else { - if succinct { - s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i])) - } else { - s.println(indentation, componentTexts[i]) - s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) - } - } - indentation++ - } - - return indentation -} - -func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, failure bool, succinct bool) int { - indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, failure, succinct) - - if succinct { - if len(componentTexts) > 0 { - s.printNewLine() - s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1])) - } - s.printNewLine() - indentation = 1 - } else { - indentation-- - } - - return indentation -} - -func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string { - orderedKeys := make([]string, len(measurements)) - for key, measurement := range measurements { - orderedKeys[measurement.Order] = key - } - return orderedKeys -} - -func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string { - if len(spec.Measurements) == 0 { - return "Found no measurements" - } - - message := []string{} - orderedKeys := s.orderedMeasurementKeys(spec.Measurements) - - if succinct { - message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) - for _, key := range orderedKeys { - measurement := spec.Measurements[key] - message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s", - s.colorize(boldStyle, "%s", measurement.Name), - measurement.SmallestLabel, - s.colorize(greenColor, "%.3f", measurement.Smallest), - measurement.Units, - measurement.AverageLabel, - s.colorize(cyanColor, "%.3f", measurement.Average), - measurement.Units, - s.colorize(cyanColor, "%.3f", measurement.StdDeviation), - measurement.Units, - measurement.LargestLabel, - s.colorize(redColor, "%.3f", measurement.Largest), - measurement.Units, - )) - } - } else { - message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) - for _, key := range orderedKeys { - measurement := spec.Measurements[key] - info := "" - if measurement.Info != nil { - message = append(message, fmt.Sprintf("%v", measurement.Info)) - } - - message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s", - s.colorize(boldStyle, "%s", measurement.Name), - info, - measurement.SmallestLabel, - s.colorize(greenColor, "%.3f", measurement.Smallest), - measurement.Units, - measurement.LargestLabel, - s.colorize(redColor, "%.3f", measurement.Largest), - measurement.Units, - measurement.AverageLabel, - s.colorize(cyanColor, "%.3f", measurement.Average), - measurement.Units, - s.colorize(cyanColor, "%.3f", measurement.StdDeviation), - measurement.Units, - )) - } - } - - return strings.Join(message, "\n") -} diff --git a/kit/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/kit/github.com/onsi/ginkgo/reporters/teamcity_reporter.go deleted file mode 100644 index 76c1dd1..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/teamcity_reporter.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - -TeamCity Reporter for Ginkgo - -Makes use of TeamCity's support for Service Messages -http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests -*/ - -package reporters - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - "io" - "strings" -) - -const ( - messageId = "##teamcity" -) - -type TeamCityReporter struct { - writer io.Writer - testSuiteName string -} - -func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { - return &TeamCityReporter{ - writer: writer, - } -} - -func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - reporter.testSuiteName = escape(summary.SuiteDescription) - fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName) -} - -func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("BeforeSuite", setupSummary) -} - -func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("AfterSuite", setupSummary) -} - -func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - testName := escape(name) - fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) - message := escape(setupSummary.Failure.ComponentCodeLocation.String()) - details := escape(setupSummary.Failure.Message) - fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) - durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000 - fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) - } -} - -func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { - testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) - fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) -} - -func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { - testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) - - if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { - message := escape(specSummary.Failure.ComponentCodeLocation.String()) - details := escape(specSummary.Failure.Message) - fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) - } - if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { - fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName) - } - - durationInMilliseconds := specSummary.RunTime.Seconds() * 1000 - fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) -} - -func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName) -} - -func escape(output string) string { - output = strings.Replace(output, "|", "||", -1) - output = strings.Replace(output, "'", "|'", -1) - output = strings.Replace(output, "\n", "|n", -1) - output = strings.Replace(output, "\r", "|r", -1) - output = strings.Replace(output, "[", "|[", -1) - output = strings.Replace(output, "]", "|]", -1) - return output -} diff --git a/kit/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go b/kit/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go deleted file mode 100644 index 0d3278e..0000000 --- a/kit/github.com/onsi/ginkgo/reporters/teamcity_reporter_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package reporters_test - -import ( - "bytes" - "fmt" - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/internal/codelocation" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/reporters" - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "time" -) - -var _ = Describe("TeamCity Reporter", func() { - var ( - buffer bytes.Buffer - reporter Reporter - ) - - BeforeEach(func() { - buffer.Truncate(0) - reporter = reporters.NewTeamCityReporter(&buffer) - reporter.SpecSuiteWillBegin(config.GinkgoConfigType{}, &types.SuiteSummary{ - SuiteDescription: "Foo's test suite", - NumberOfSpecsThatWillBeRun: 1, - }) - }) - - Describe("a passing test", func() { - BeforeEach(func() { - beforeSuite := &types.SetupSummary{ - State: types.SpecStatePassed, - } - reporter.BeforeSuiteDidRun(beforeSuite) - - afterSuite := &types.SetupSummary{ - State: types.SpecStatePassed, - } - reporter.AfterSuiteDidRun(afterSuite) - - spec := &types.SpecSummary{ - ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, - State: types.SpecStatePassed, - RunTime: 5 * time.Second, - } - reporter.SpecWillRun(spec) - reporter.SpecDidComplete(spec) - - reporter.SpecSuiteDidEnd(&types.SuiteSummary{ - NumberOfSpecsThatWillBeRun: 1, - NumberOfFailedSpecs: 0, - RunTime: 10 * time.Second, - }) - }) - - It("should record the test as passing", func() { - actual := buffer.String() - expected := - "##teamcity[testSuiteStarted name='Foo|'s test suite']" + - "##teamcity[testStarted name='A B C']" + - "##teamcity[testFinished name='A B C' duration='5000']" + - "##teamcity[testSuiteFinished name='Foo|'s test suite']" - Ω(actual).Should(Equal(expected)) - }) - }) - - Describe("when the BeforeSuite fails", func() { - var beforeSuite *types.SetupSummary - - BeforeEach(func() { - beforeSuite = &types.SetupSummary{ - State: types.SpecStateFailed, - RunTime: 3 * time.Second, - Failure: types.SpecFailure{ - Message: "failed to setup\n", - ComponentCodeLocation: codelocation.New(0), - }, - } - reporter.BeforeSuiteDidRun(beforeSuite) - - reporter.SpecSuiteDidEnd(&types.SuiteSummary{ - NumberOfSpecsThatWillBeRun: 1, - NumberOfFailedSpecs: 1, - RunTime: 10 * time.Second, - }) - }) - - It("should record the test as having failed", func() { - actual := buffer.String() - expected := fmt.Sprintf( - "##teamcity[testSuiteStarted name='Foo|'s test suite']"+ - "##teamcity[testStarted name='BeforeSuite']"+ - "##teamcity[testFailed name='BeforeSuite' message='%s' details='failed to setup|n']"+ - "##teamcity[testFinished name='BeforeSuite' duration='3000']"+ - "##teamcity[testSuiteFinished name='Foo|'s test suite']", beforeSuite.Failure.ComponentCodeLocation.String(), - ) - Ω(actual).Should(Equal(expected)) - }) - }) - - Describe("when the AfterSuite fails", func() { - var afterSuite *types.SetupSummary - - BeforeEach(func() { - afterSuite = &types.SetupSummary{ - State: types.SpecStateFailed, - RunTime: 3 * time.Second, - Failure: types.SpecFailure{ - Message: "failed to setup\n", - ComponentCodeLocation: codelocation.New(0), - }, - } - reporter.AfterSuiteDidRun(afterSuite) - - reporter.SpecSuiteDidEnd(&types.SuiteSummary{ - NumberOfSpecsThatWillBeRun: 1, - NumberOfFailedSpecs: 1, - RunTime: 10 * time.Second, - }) - }) - - It("should record the test as having failed", func() { - actual := buffer.String() - expected := fmt.Sprintf( - "##teamcity[testSuiteStarted name='Foo|'s test suite']"+ - "##teamcity[testStarted name='AfterSuite']"+ - "##teamcity[testFailed name='AfterSuite' message='%s' details='failed to setup|n']"+ - "##teamcity[testFinished name='AfterSuite' duration='3000']"+ - "##teamcity[testSuiteFinished name='Foo|'s test suite']", afterSuite.Failure.ComponentCodeLocation.String(), - ) - Ω(actual).Should(Equal(expected)) - }) - }) - specStateCases := []struct { - state types.SpecState - message string - }{ - {types.SpecStateFailed, "Failure"}, - {types.SpecStateTimedOut, "Timeout"}, - {types.SpecStatePanicked, "Panic"}, - } - - for _, specStateCase := range specStateCases { - specStateCase := specStateCase - Describe("a failing test", func() { - var spec *types.SpecSummary - BeforeEach(func() { - spec = &types.SpecSummary{ - ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, - State: specStateCase.state, - RunTime: 5 * time.Second, - Failure: types.SpecFailure{ - ComponentCodeLocation: codelocation.New(0), - Message: "I failed", - }, - } - reporter.SpecWillRun(spec) - reporter.SpecDidComplete(spec) - - reporter.SpecSuiteDidEnd(&types.SuiteSummary{ - NumberOfSpecsThatWillBeRun: 1, - NumberOfFailedSpecs: 1, - RunTime: 10 * time.Second, - }) - }) - - It("should record test as failing", func() { - actual := buffer.String() - expected := - fmt.Sprintf("##teamcity[testSuiteStarted name='Foo|'s test suite']"+ - "##teamcity[testStarted name='A B C']"+ - "##teamcity[testFailed name='A B C' message='%s' details='I failed']"+ - "##teamcity[testFinished name='A B C' duration='5000']"+ - "##teamcity[testSuiteFinished name='Foo|'s test suite']", spec.Failure.ComponentCodeLocation.String()) - Ω(actual).Should(Equal(expected)) - }) - }) - } - - for _, specStateCase := range []types.SpecState{types.SpecStatePending, types.SpecStateSkipped} { - specStateCase := specStateCase - Describe("a skipped test", func() { - var spec *types.SpecSummary - BeforeEach(func() { - spec = &types.SpecSummary{ - ComponentTexts: []string{"[Top Level]", "A", "B", "C"}, - State: specStateCase, - RunTime: 5 * time.Second, - } - reporter.SpecWillRun(spec) - reporter.SpecDidComplete(spec) - - reporter.SpecSuiteDidEnd(&types.SuiteSummary{ - NumberOfSpecsThatWillBeRun: 1, - NumberOfFailedSpecs: 0, - RunTime: 10 * time.Second, - }) - }) - - It("should record test as ignored", func() { - actual := buffer.String() - expected := - "##teamcity[testSuiteStarted name='Foo|'s test suite']" + - "##teamcity[testStarted name='A B C']" + - "##teamcity[testIgnored name='A B C']" + - "##teamcity[testFinished name='A B C' duration='5000']" + - "##teamcity[testSuiteFinished name='Foo|'s test suite']" - Ω(actual).Should(Equal(expected)) - }) - }) - } -}) diff --git a/kit/github.com/onsi/ginkgo/types/code_location.go b/kit/github.com/onsi/ginkgo/types/code_location.go deleted file mode 100644 index 935a89e..0000000 --- a/kit/github.com/onsi/ginkgo/types/code_location.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -import ( - "fmt" -) - -type CodeLocation struct { - FileName string - LineNumber int - FullStackTrace string -} - -func (codeLocation CodeLocation) String() string { - return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) -} diff --git a/kit/github.com/onsi/ginkgo/types/synchronization.go b/kit/github.com/onsi/ginkgo/types/synchronization.go deleted file mode 100644 index fdd6ed5..0000000 --- a/kit/github.com/onsi/ginkgo/types/synchronization.go +++ /dev/null @@ -1,30 +0,0 @@ -package types - -import ( - "encoding/json" -) - -type RemoteBeforeSuiteState int - -const ( - RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota - - RemoteBeforeSuiteStatePending - RemoteBeforeSuiteStatePassed - RemoteBeforeSuiteStateFailed - RemoteBeforeSuiteStateDisappeared -) - -type RemoteBeforeSuiteData struct { - Data []byte - State RemoteBeforeSuiteState -} - -func (r RemoteBeforeSuiteData) ToJSON() []byte { - data, _ := json.Marshal(r) - return data -} - -type RemoteAfterSuiteData struct { - CanRun bool -} diff --git a/kit/github.com/onsi/ginkgo/types/types.go b/kit/github.com/onsi/ginkgo/types/types.go deleted file mode 100644 index 4a3b213..0000000 --- a/kit/github.com/onsi/ginkgo/types/types.go +++ /dev/null @@ -1,141 +0,0 @@ -package types - -import ( - "time" -) - -const GINKGO_FOCUS_EXIT_CODE = 197 - -type SuiteSummary struct { - SuiteDescription string - SuiteSucceeded bool - SuiteID string - - NumberOfSpecsBeforeParallelization int - NumberOfTotalSpecs int - NumberOfSpecsThatWillBeRun int - NumberOfPendingSpecs int - NumberOfSkippedSpecs int - NumberOfPassedSpecs int - NumberOfFailedSpecs int - RunTime time.Duration -} - -type SpecSummary struct { - ComponentTexts []string - ComponentCodeLocations []CodeLocation - - State SpecState - RunTime time.Duration - Failure SpecFailure - IsMeasurement bool - NumberOfSamples int - Measurements map[string]*SpecMeasurement - - CapturedOutput string - SuiteID string -} - -func (s SpecSummary) HasFailureState() bool { - return s.State == SpecStateTimedOut || s.State == SpecStatePanicked || s.State == SpecStateFailed -} - -func (s SpecSummary) TimedOut() bool { - return s.State == SpecStateTimedOut -} - -func (s SpecSummary) Panicked() bool { - return s.State == SpecStatePanicked -} - -func (s SpecSummary) Failed() bool { - return s.State == SpecStateFailed -} - -func (s SpecSummary) Passed() bool { - return s.State == SpecStatePassed -} - -func (s SpecSummary) Skipped() bool { - return s.State == SpecStateSkipped -} - -func (s SpecSummary) Pending() bool { - return s.State == SpecStatePending -} - -type SetupSummary struct { - ComponentType SpecComponentType - CodeLocation CodeLocation - - State SpecState - RunTime time.Duration - Failure SpecFailure - - CapturedOutput string - SuiteID string -} - -type SpecFailure struct { - Message string - Location CodeLocation - ForwardedPanic interface{} - - ComponentIndex int - ComponentType SpecComponentType - ComponentCodeLocation CodeLocation -} - -type SpecMeasurement struct { - Name string - Info interface{} - Order int - - Results []float64 - - Smallest float64 - Largest float64 - Average float64 - StdDeviation float64 - - SmallestLabel string - LargestLabel string - AverageLabel string - Units string -} - -type SpecState uint - -const ( - SpecStateInvalid SpecState = iota - - SpecStatePending - SpecStateSkipped - SpecStatePassed - SpecStateFailed - SpecStatePanicked - SpecStateTimedOut -) - -type SpecComponentType uint - -const ( - SpecComponentTypeInvalid SpecComponentType = iota - - SpecComponentTypeContainer - SpecComponentTypeBeforeSuite - SpecComponentTypeAfterSuite - SpecComponentTypeBeforeEach - SpecComponentTypeJustBeforeEach - SpecComponentTypeAfterEach - SpecComponentTypeIt - SpecComponentTypeMeasure -) - -type FlagType uint - -const ( - FlagTypeNone FlagType = iota - FlagTypeFocused - FlagTypePending -) diff --git a/kit/github.com/onsi/ginkgo/types/types_suite_test.go b/kit/github.com/onsi/ginkgo/types/types_suite_test.go deleted file mode 100644 index d425807..0000000 --- a/kit/github.com/onsi/ginkgo/types/types_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package types_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestTypes(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Types Suite") -} diff --git a/kit/github.com/onsi/ginkgo/types/types_test.go b/kit/github.com/onsi/ginkgo/types/types_test.go deleted file mode 100644 index e006caa..0000000 --- a/kit/github.com/onsi/ginkgo/types/types_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package types_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/types" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var specStates = []SpecState{ - SpecStatePassed, - SpecStateTimedOut, - SpecStatePanicked, - SpecStateFailed, - SpecStatePending, - SpecStateSkipped, -} - -func verifySpecSummary(caller func(SpecSummary) bool, trueStates ...SpecState) { - summary := SpecSummary{} - trueStateLookup := map[SpecState]bool{} - for _, state := range trueStates { - trueStateLookup[state] = true - summary.State = state - Ω(caller(summary)).Should(BeTrue()) - } - - for _, state := range specStates { - if trueStateLookup[state] { - continue - } - summary.State = state - Ω(caller(summary)).Should(BeFalse()) - } -} - -var _ = Describe("Types", func() { - Describe("SpecSummary", func() { - It("knows when it is in a failure-like state", func() { - verifySpecSummary(func(summary SpecSummary) bool { - return summary.HasFailureState() - }, SpecStateTimedOut, SpecStatePanicked, SpecStateFailed) - }) - - It("knows when it passed", func() { - verifySpecSummary(func(summary SpecSummary) bool { - return summary.Passed() - }, SpecStatePassed) - }) - - It("knows when it has failed", func() { - verifySpecSummary(func(summary SpecSummary) bool { - return summary.Failed() - }, SpecStateFailed) - }) - - It("knows when it has panicked", func() { - verifySpecSummary(func(summary SpecSummary) bool { - return summary.Panicked() - }, SpecStatePanicked) - }) - - It("knows when it has timed out", func() { - verifySpecSummary(func(summary SpecSummary) bool { - return summary.TimedOut() - }, SpecStateTimedOut) - }) - - It("knows when it is pending", func() { - verifySpecSummary(func(summary SpecSummary) bool { - return summary.Pending() - }, SpecStatePending) - }) - - It("knows when it is skipped", func() { - verifySpecSummary(func(summary SpecSummary) bool { - return summary.Skipped() - }, SpecStateSkipped) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/.gitignore b/kit/github.com/onsi/gomega/.gitignore deleted file mode 100644 index 614bfc2..0000000 --- a/kit/github.com/onsi/gomega/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -*.test -. -.git.genus diff --git a/kit/github.com/onsi/gomega/.travis.yml b/kit/github.com/onsi/gomega/.travis.yml deleted file mode 100644 index 2ecdf95..0000000 --- a/kit/github.com/onsi/gomega/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.3 - -install: - - go get -v ./... - - go get github.com/onsi/ginkgo - - go install github.com/onsi/ginkgo/ginkgo - -script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race diff --git a/kit/github.com/onsi/gomega/CHANGELOG.md b/kit/github.com/onsi/gomega/CHANGELOG.md deleted file mode 100644 index cbcdd08..0000000 --- a/kit/github.com/onsi/gomega/CHANGELOG.md +++ /dev/null @@ -1,49 +0,0 @@ -## 1.0 (8/2/2014) - -No changes. Dropping "beta" from the version number. - -## 1.0.0-beta (7/8/2014) -Breaking Changes: - -- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead. -- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher - -New Test-Support Features: - -- `ghttp`: supports testing http clients - - Provides a flexible fake http server - - Provides a collection of chainable http handlers that perform assertions. -- `gbytes`: supports making ordered assertions against streams of data - - Provides a `gbytes.Buffer` - - Provides a `Say` matcher to perform ordered assertions against output data -- `gexec`: supports testing external processes - - Provides support for building Go binaries - - Wraps and starts `exec.Cmd` commands - - Makes it easy to assert against stdout and stderr - - Makes it easy to send signals and wait for processes to exit - - Provides an `Exit` matcher to assert against exit code. - -DSL Changes: - -- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs. -- The default timeouts for `Eventually` and `Consistently` are now configurable. - -New Matchers: - -- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map. -- `BeTemporally`: like `BeNumerically` but for `time.Time` -- `HaveKeyWithValue`: asserts a map has a given key with the given value. - -Updated Matchers: - -- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher. -- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed. - -Misc: - -- Start using semantic versioning -- Start maintaining changelog - -Major refactor: - -- Pull out Gomega's internal to `internal` diff --git a/kit/github.com/onsi/gomega/MIT.LICENSE b/kit/github.com/onsi/gomega/MIT.LICENSE deleted file mode 100644 index 941ee5b..0000000 --- a/kit/github.com/onsi/gomega/MIT.LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2013 Onsi Fakhouri - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/kit/github.com/onsi/gomega/README.md b/kit/github.com/onsi/gomega/README.md deleted file mode 100644 index 9520451..0000000 --- a/kit/github.com/onsi/gomega/README.md +++ /dev/null @@ -1,17 +0,0 @@ -![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png) - -[![Build Status](https://travis-ci.org/onsi/gomega.png)](https://travis-ci.org/onsi/gomega) - -Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided_matchers). - -To discuss Gomega and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega). - -## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang - -Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/) - -## License - -Gomega is MIT-Licensed - -The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license. diff --git a/kit/github.com/onsi/gomega/format/format.go b/kit/github.com/onsi/gomega/format/format.go deleted file mode 100644 index ec9c91a..0000000 --- a/kit/github.com/onsi/gomega/format/format.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information. -*/ -package format - -import ( - "fmt" - "reflect" - "strings" -) - -// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects -var MaxDepth = uint(10) - -/* -By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output. - -Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead. - -Note that GoString and String don't always have all the information you need to understand why a test failed! -*/ -var UseStringerRepresentation = false - -//The default indentation string emitted by the format package -var Indent = " " - -var longFormThreshold = 20 - -/* -Generates a formatted matcher success/failure message of the form: - - Expected - - - - -If expected is omited, then the message looks like: - - Expected - - -*/ -func Message(actual interface{}, message string, expected ...interface{}) string { - if len(expected) == 0 { - return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message) - } else { - return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1)) - } -} - -/* -Pretty prints the passed in object at the passed in indentation level. - -Object recurses into deeply nested objects emitting pretty-printed representations of their components. - -Modify format.MaxDepth to control how deep the recursion is allowed to go -Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of -recursing into the object. -*/ -func Object(object interface{}, indentation uint) string { - indent := strings.Repeat(Indent, int(indentation)) - value := reflect.ValueOf(object) - return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation)) -} - -/* -IndentString takes a string and indents each line by the specified amount. -*/ -func IndentString(s string, indentation uint) string { - components := strings.Split(s, "\n") - result := "" - indent := strings.Repeat(Indent, int(indentation)) - for i, component := range components { - result += indent + component - if i < len(components)-1 { - result += "\n" - } - } - - return result -} - -func formatType(object interface{}) string { - t := reflect.TypeOf(object) - if t == nil { - return "nil" - } - switch t.Kind() { - case reflect.Chan: - v := reflect.ValueOf(object) - return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) - case reflect.Ptr: - return fmt.Sprintf("%T | %p", object, object) - case reflect.Slice: - v := reflect.ValueOf(object) - return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) - case reflect.Map: - v := reflect.ValueOf(object) - return fmt.Sprintf("%T | len:%d", object, v.Len()) - default: - return fmt.Sprintf("%T", object) - } -} - -func formatValue(value reflect.Value, indentation uint) string { - if indentation > MaxDepth { - return "..." - } - - if isNilValue(value) { - return "nil" - } - - if UseStringerRepresentation { - if value.CanInterface() { - obj := value.Interface() - switch x := obj.(type) { - case fmt.GoStringer: - return x.GoString() - case fmt.Stringer: - return x.String() - } - } - } - - switch value.Kind() { - case reflect.Bool: - return fmt.Sprintf("%v", value.Bool()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return fmt.Sprintf("%v", value.Int()) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return fmt.Sprintf("%v", value.Uint()) - case reflect.Uintptr: - return fmt.Sprintf("0x%x", value.Uint()) - case reflect.Float32, reflect.Float64: - return fmt.Sprintf("%v", value.Float()) - case reflect.Complex64, reflect.Complex128: - return fmt.Sprintf("%v", value.Complex()) - case reflect.Chan: - return fmt.Sprintf("0x%x", value.Pointer()) - case reflect.Func: - return fmt.Sprintf("0x%x", value.Pointer()) - case reflect.Ptr: - return formatValue(value.Elem(), indentation) - case reflect.Slice: - if value.Type().Elem().Kind() == reflect.Uint8 { - return formatString(value.Bytes(), indentation) - } - return formatSlice(value, indentation) - case reflect.String: - return formatString(value.String(), indentation) - case reflect.Array: - return formatSlice(value, indentation) - case reflect.Map: - return formatMap(value, indentation) - case reflect.Struct: - return formatStruct(value, indentation) - case reflect.Interface: - return formatValue(value.Elem(), indentation) - default: - if value.CanInterface() { - return fmt.Sprintf("%#v", value.Interface()) - } else { - return fmt.Sprintf("%#v", value) - } - } -} - -func formatString(object interface{}, indentation uint) string { - if indentation == 1 { - s := fmt.Sprintf("%s", object) - components := strings.Split(s, "\n") - result := "" - for i, component := range components { - if i == 0 { - result += component - } else { - result += Indent + component - } - if i < len(components)-1 { - result += "\n" - } - } - - return fmt.Sprintf("%s", result) - } else { - return fmt.Sprintf("%q", object) - } -} - -func formatSlice(v reflect.Value, indentation uint) string { - l := v.Len() - result := make([]string, l) - longest := 0 - for i := 0; i < l; i++ { - result[i] = formatValue(v.Index(i), indentation+1) - if len(result[i]) > longest { - longest = len(result[i]) - } - } - - if longest > longFormThreshold { - indenter := strings.Repeat(Indent, int(indentation)) - return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) - } else { - return fmt.Sprintf("[%s]", strings.Join(result, ", ")) - } -} - -func formatMap(v reflect.Value, indentation uint) string { - l := v.Len() - result := make([]string, l) - - longest := 0 - for i, key := range v.MapKeys() { - value := v.MapIndex(key) - result[i] = fmt.Sprintf("%s: %s", formatValue(key, 0), formatValue(value, indentation+1)) - if len(result[i]) > longest { - longest = len(result[i]) - } - } - - if longest > longFormThreshold { - indenter := strings.Repeat(Indent, int(indentation)) - return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) - } else { - return fmt.Sprintf("{%s}", strings.Join(result, ", ")) - } -} - -func formatStruct(v reflect.Value, indentation uint) string { - t := v.Type() - - l := v.NumField() - result := []string{} - longest := 0 - for i := 0; i < l; i++ { - structField := t.Field(i) - fieldEntry := v.Field(i) - representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1)) - result = append(result, representation) - if len(representation) > longest { - longest = len(representation) - } - } - if longest > longFormThreshold { - indenter := strings.Repeat(Indent, int(indentation)) - return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) - } else { - return fmt.Sprintf("{%s}", strings.Join(result, ", ")) - } -} - -func isNilValue(a reflect.Value) bool { - switch a.Kind() { - case reflect.Invalid: - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return a.IsNil() - } - - return false -} - -func isNil(a interface{}) bool { - if a == nil { - return true - } - - switch reflect.TypeOf(a).Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return reflect.ValueOf(a).IsNil() - } - - return false -} diff --git a/kit/github.com/onsi/gomega/format/format_suite_test.go b/kit/github.com/onsi/gomega/format/format_suite_test.go deleted file mode 100644 index 46b6431..0000000 --- a/kit/github.com/onsi/gomega/format/format_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package format_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestFormat(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Format Suite") -} diff --git a/kit/github.com/onsi/gomega/format/format_test.go b/kit/github.com/onsi/gomega/format/format_test.go deleted file mode 100644 index 40a20f1..0000000 --- a/kit/github.com/onsi/gomega/format/format_test.go +++ /dev/null @@ -1,449 +0,0 @@ -package format_test - -import ( - "fmt" - "strings" - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/types" -) - -//recursive struct - -type StringAlias string -type ByteAlias []byte -type IntAlias int - -type AStruct struct { - Exported string -} - -type SimpleStruct struct { - Name string - Enumeration int - Veritas bool - Data []byte - secret uint32 -} - -type ComplexStruct struct { - Strings []string - SimpleThings []*SimpleStruct - DataMaps map[int]ByteAlias -} - -type SecretiveStruct struct { - boolValue bool - intValue int - uintValue uint - uintptrValue uintptr - floatValue float32 - complexValue complex64 - chanValue chan bool - funcValue func() - pointerValue *int - sliceValue []string - byteSliceValue []byte - stringValue string - arrValue [3]int - byteArrValue [3]byte - mapValue map[string]int - structValue AStruct - interfaceValue interface{} -} - -type GoStringer struct { -} - -func (g GoStringer) GoString() string { - return "go-string" -} - -func (g GoStringer) String() string { - return "string" -} - -type Stringer struct { -} - -func (g Stringer) String() string { - return "string" -} - -var _ = Describe("Format", func() { - match := func(typeRepresentation string, valueRepresentation string, args ...interface{}) types.GomegaMatcher { - if len(args) > 0 { - valueRepresentation = fmt.Sprintf(valueRepresentation, args...) - } - return Equal(fmt.Sprintf("%s<%s>: %s", Indent, typeRepresentation, valueRepresentation)) - } - - matchRegexp := func(typeRepresentation string, valueRepresentation string, args ...interface{}) types.GomegaMatcher { - if len(args) > 0 { - valueRepresentation = fmt.Sprintf(valueRepresentation, args...) - } - return MatchRegexp(fmt.Sprintf("%s<%s>: %s", Indent, typeRepresentation, valueRepresentation)) - } - - hashMatchingRegexp := func(entries ...string) string { - entriesSwitch := "(" + strings.Join(entries, "|") + ")" - arr := make([]string, len(entries)) - for i := range arr { - arr[i] = entriesSwitch - } - return "{" + strings.Join(arr, ", ") + "}" - } - - Describe("Message", func() { - Context("with only an actual value", func() { - It("should print out an indented formatted representation of the value and the message", func() { - Ω(Message(3, "to be three.")).Should(Equal("Expected\n : 3\nto be three.")) - }) - }) - - Context("with an actual and an expected value", func() { - It("should print out an indented formatted representatino of both values, and the message", func() { - Ω(Message(3, "to equal", 4)).Should(Equal("Expected\n : 3\nto equal\n : 4")) - }) - }) - }) - - Describe("IndentString", func() { - It("should indent the string", func() { - Ω(IndentString("foo\n bar\nbaz", 2)).Should(Equal(" foo\n bar\n baz")) - }) - }) - - Describe("Object", func() { - Describe("formatting boolean values", func() { - It("should give the type and format values correctly", func() { - Ω(Object(true, 1)).Should(match("bool", "true")) - Ω(Object(false, 1)).Should(match("bool", "false")) - }) - }) - - Describe("formatting numbers", func() { - It("should give the type and format values correctly", func() { - Ω(Object(int(3), 1)).Should(match("int", "3")) - Ω(Object(int8(3), 1)).Should(match("int8", "3")) - Ω(Object(int16(3), 1)).Should(match("int16", "3")) - Ω(Object(int32(3), 1)).Should(match("int32", "3")) - Ω(Object(int64(3), 1)).Should(match("int64", "3")) - - Ω(Object(uint(3), 1)).Should(match("uint", "3")) - Ω(Object(uint8(3), 1)).Should(match("uint8", "3")) - Ω(Object(uint16(3), 1)).Should(match("uint16", "3")) - Ω(Object(uint32(3), 1)).Should(match("uint32", "3")) - Ω(Object(uint64(3), 1)).Should(match("uint64", "3")) - }) - - It("should handle uintptr differently", func() { - Ω(Object(uintptr(3), 1)).Should(match("uintptr", "0x3")) - }) - }) - - Describe("formatting channels", func() { - It("should give the type and format values correctly", func() { - c := make(chan<- bool, 3) - c <- true - c <- false - Ω(Object(c, 1)).Should(match("chan<- bool | len:2, cap:3", "%v", c)) - }) - }) - - Describe("formatting strings", func() { - It("should give the type and format values correctly", func() { - s := "a\nb\nc" - Ω(Object(s, 1)).Should(match("string", `a - b - c`)) - }) - }) - - Describe("formatting []byte slices", func() { - It("should present them as strings", func() { - b := []byte("a\nb\nc") - Ω(Object(b, 1)).Should(matchRegexp(`\[\]uint8 \| len:5, cap:\d+`, `a - b - c`)) - }) - }) - - Describe("formatting functions", func() { - It("should give the type and format values correctly", func() { - f := func(a string, b []int) ([]byte, error) { - return []byte("abc"), nil - } - Ω(Object(f, 1)).Should(match("func(string, []int) ([]uint8, error)", "%v", f)) - }) - }) - - Describe("formatting pointers", func() { - It("should give the type and dereference the value to format it correctly", func() { - a := 3 - Ω(Object(&a, 1)).Should(match(fmt.Sprintf("*int | %p", &a), "3")) - }) - - Context("when there are pointers to pointers...", func() { - It("should recursively deference the pointer until it gets to a value", func() { - a := 3 - var b *int - var c **int - var d ***int - b = &a - c = &b - d = &c - - Ω(Object(d, 1)).Should(match(fmt.Sprintf("***int | %p", d), "3")) - }) - }) - - Context("when the pointer points to nil", func() { - It("should say nil and not explode", func() { - var a *AStruct - Ω(Object(a, 1)).Should(match("*format_test.AStruct | 0x0", "nil")) - }) - }) - }) - - Describe("formatting arrays", func() { - It("should give the type and format values correctly", func() { - w := [3]string{"Jed Bartlet", "Toby Ziegler", "CJ Cregg"} - Ω(Object(w, 1)).Should(match("[3]string", `["Jed Bartlet", "Toby Ziegler", "CJ Cregg"]`)) - }) - - Context("with byte arrays", func() { - It("should give the type and format values correctly", func() { - w := [3]byte{17, 28, 19} - Ω(Object(w, 1)).Should(match("[3]uint8", `[17, 28, 19]`)) - }) - }) - }) - - Describe("formatting slices", func() { - It("should include the length and capacity in the type information", func() { - s := make([]bool, 3, 4) - Ω(Object(s, 1)).Should(match("[]bool | len:3, cap:4", "[false, false, false]")) - }) - - Context("when the slice contains long entries", func() { - It("should format the entries with newlines", func() { - w := []string{"Josiah Edward Bartlet", "Toby Ziegler", "CJ Cregg"} - expected := `[ - "Josiah Edward Bartlet", - "Toby Ziegler", - "CJ Cregg", - ]` - Ω(Object(w, 1)).Should(match("[]string | len:3, cap:3", expected)) - }) - }) - }) - - Describe("formatting maps", func() { - It("should include the length in the type information", func() { - m := make(map[int]bool, 5) - m[3] = true - m[4] = false - Ω(Object(m, 1)).Should(matchRegexp(`map\[int\]bool \| len:2`, hashMatchingRegexp("3: true", "4: false"))) - }) - - Context("when the slice contains long entries", func() { - It("should format the entries with newlines", func() { - m := map[string][]byte{} - m["Josiah Edward Bartlet"] = []byte("Martin Sheen") - m["Toby Ziegler"] = []byte("Richard Schiff") - m["CJ Cregg"] = []byte("Allison Janney") - expected := `{ - ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), - ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), - ("Josiah Edward Bartlet": "Martin Sheen"|"Toby Ziegler": "Richard Schiff"|"CJ Cregg": "Allison Janney"), - }` - Ω(Object(m, 1)).Should(matchRegexp(`map\[string\]\[\]uint8 \| len:3`, expected)) - }) - }) - }) - - Describe("formatting structs", func() { - It("should include the struct name and the field names", func() { - s := SimpleStruct{ - Name: "Oswald", - Enumeration: 17, - Veritas: true, - Data: []byte("datum"), - secret: 1983, - } - - Ω(Object(s, 1)).Should(match("format_test.SimpleStruct", `{Name: "Oswald", Enumeration: 17, Veritas: true, Data: "datum", secret: 1983}`)) - }) - - Context("when the struct contains long entries", func() { - It("should format the entries with new lines", func() { - s := &SimpleStruct{ - Name: "Mithrandir Gandalf Greyhame", - Enumeration: 2021, - Veritas: true, - Data: []byte("wizard"), - secret: 3, - } - - Ω(Object(s, 1)).Should(match(fmt.Sprintf("*format_test.SimpleStruct | %p", s), `{ - Name: "Mithrandir Gandalf Greyhame", - Enumeration: 2021, - Veritas: true, - Data: "wizard", - secret: 3, - }`)) - }) - }) - }) - - Describe("formatting nil values", func() { - It("should print out nil", func() { - Ω(Object(nil, 1)).Should(match("nil", "nil")) - var typedNil *AStruct - Ω(Object(typedNil, 1)).Should(match("*format_test.AStruct | 0x0", "nil")) - var c chan<- bool - Ω(Object(c, 1)).Should(match("chan<- bool | len:0, cap:0", "nil")) - var s []string - Ω(Object(s, 1)).Should(match("[]string | len:0, cap:0", "nil")) - var m map[string]bool - Ω(Object(m, 1)).Should(match("map[string]bool | len:0", "nil")) - }) - }) - - Describe("formatting aliased types", func() { - It("should print out the correct alias type", func() { - Ω(Object(StringAlias("alias"), 1)).Should(match("format_test.StringAlias", `alias`)) - Ω(Object(ByteAlias("alias"), 1)).Should(matchRegexp(`format_test\.ByteAlias \| len:5, cap:\d+`, `alias`)) - Ω(Object(IntAlias(3), 1)).Should(match("format_test.IntAlias", "3")) - }) - }) - - Describe("handling nested things", func() { - It("should produce a correctly nested representation", func() { - s := ComplexStruct{ - Strings: []string{"lots", "of", "short", "strings"}, - SimpleThings: []*SimpleStruct{ - {"short", 7, true, []byte("succinct"), 17}, - {"something longer", 427, true, []byte("designed to wrap around nicely"), 30}, - }, - DataMaps: map[int]ByteAlias{ - 17: ByteAlias("some substantially longer chunks of data"), - 1138: ByteAlias("that should make things wrap"), - }, - } - expected := `{ - Strings: \["lots", "of", "short", "strings"\], - SimpleThings: \[ - {Name: "short", Enumeration: 7, Veritas: true, Data: "succinct", secret: 17}, - { - Name: "something longer", - Enumeration: 427, - Veritas: true, - Data: "designed to wrap around nicely", - secret: 30, - }, - \], - DataMaps: { - (17: "some substantially longer chunks of data"|1138: "that should make things wrap"), - (17: "some substantially longer chunks of data"|1138: "that should make things wrap"), - }, - }` - Ω(Object(s, 1)).Should(matchRegexp(`format_test\.ComplexStruct`, expected)) - }) - }) - }) - - Describe("Handling unexported fields in structs", func() { - It("should handle all the various types correctly", func() { - a := int(5) - s := SecretiveStruct{ - boolValue: true, - intValue: 3, - uintValue: 4, - uintptrValue: 5, - floatValue: 6.0, - complexValue: complex(5.0, 3.0), - chanValue: make(chan bool, 2), - funcValue: func() {}, - pointerValue: &a, - sliceValue: []string{"string", "slice"}, - byteSliceValue: []byte("bytes"), - stringValue: "a string", - arrValue: [3]int{11, 12, 13}, - byteArrValue: [3]byte{17, 20, 32}, - mapValue: map[string]int{"a key": 20, "b key": 30}, - structValue: AStruct{"exported"}, - interfaceValue: map[string]int{"a key": 17}, - } - - expected := fmt.Sprintf(`{ - boolValue: true, - intValue: 3, - uintValue: 4, - uintptrValue: 0x5, - floatValue: 6, - complexValue: \(5\+3i\), - chanValue: %p, - funcValue: %p, - pointerValue: 5, - sliceValue: \["string", "slice"\], - byteSliceValue: "bytes", - stringValue: "a string", - arrValue: \[11, 12, 13\], - byteArrValue: \[17, 20, 32\], - mapValue: %s, - structValue: {Exported: "exported"}, - interfaceValue: {"a key": 17}, - }`, s.chanValue, s.funcValue, hashMatchingRegexp(`"a key": 20`, `"b key": 30`)) - - Ω(Object(s, 1)).Should(matchRegexp(`format_test\.SecretiveStruct`, expected)) - }) - }) - - Describe("Handling interfaces", func() { - It("should unpack the interface", func() { - outerHash := map[string]interface{}{} - innerHash := map[string]int{} - - innerHash["inner"] = 3 - outerHash["integer"] = 2 - outerHash["map"] = innerHash - - expected := hashMatchingRegexp(`"integer": 2`, `"map": {"inner": 3}`) - Ω(Object(outerHash, 1)).Should(matchRegexp(`map\[string\]interface {} \| len:2`, expected)) - }) - }) - - Describe("Handling recursive things", func() { - It("should not go crazy...", func() { - m := map[string]interface{}{} - m["integer"] = 2 - m["map"] = m - Ω(Object(m, 1)).Should(ContainSubstring("...")) - }) - }) - - Describe("When instructed to use the Stringer representation", func() { - BeforeEach(func() { - UseStringerRepresentation = true - }) - - AfterEach(func() { - UseStringerRepresentation = false - }) - - Context("when passed a GoStringer", func() { - It("should use what GoString() returns", func() { - Ω(Object(GoStringer{}, 1)).Should(ContainSubstring(": go-string")) - }) - }) - - Context("when passed a stringer", func() { - It("should use what String() returns", func() { - Ω(Object(Stringer{}, 1)).Should(ContainSubstring(": string")) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/gbytes/buffer.go b/kit/github.com/onsi/gomega/gbytes/buffer.go deleted file mode 100644 index 7e42334..0000000 --- a/kit/github.com/onsi/gomega/gbytes/buffer.go +++ /dev/null @@ -1,204 +0,0 @@ -/* -Package gbytes provides a buffer that supports incrementally detecting input. - -You use gbytes.Buffer with the gbytes.Say matcher. When Say finds a match, it fastforwards the buffer's read cursor to the end of that match. - -Subsequent matches against the buffer will only operate against data that appears *after* the read cursor. - -The read cursor is an opaque implementation detail that you cannot access. You should use the Say matcher to sift through the buffer. You can always -access the entire buffer's contents with Contents(). - -*/ -package gbytes - -import ( - "errors" - "fmt" - "regexp" - "sync" - "time" -) - -/* -gbytes.Buffer implements an io.Writer and can be used with the gbytes.Say matcher. - -You should only use a gbytes.Buffer in test code. It stores all writes in an in-memory buffer - behavior that is inappropriate for production code! -*/ -type Buffer struct { - contents []byte - readCursor uint64 - lock *sync.Mutex - detectCloser chan interface{} - closed bool -} - -/* -NewBuffer returns a new gbytes.Buffer -*/ -func NewBuffer() *Buffer { - return &Buffer{ - lock: &sync.Mutex{}, - } -} - -/* -BufferWithBytes returns a new gbytes.Buffer seeded with the passed in bytes -*/ -func BufferWithBytes(bytes []byte) *Buffer { - return &Buffer{ - lock: &sync.Mutex{}, - contents: bytes, - } -} - -/* -Write implements the io.Writer interface -*/ -func (b *Buffer) Write(p []byte) (n int, err error) { - b.lock.Lock() - defer b.lock.Unlock() - - if b.closed { - return 0, errors.New("attempt to write to closed buffer") - } - - b.contents = append(b.contents, p...) - return len(p), nil -} - -/* -Close signifies that the buffer will no longer be written to -*/ -func (b *Buffer) Close() error { - b.lock.Lock() - defer b.lock.Unlock() - - b.closed = true - - return nil -} - -/* -Closed returns true if the buffer has been closed -*/ -func (b *Buffer) Closed() bool { - b.lock.Lock() - defer b.lock.Unlock() - - return b.closed -} - -/* -Contents returns all data ever written to the buffer. -*/ -func (b *Buffer) Contents() []byte { - b.lock.Lock() - defer b.lock.Unlock() - - contents := make([]byte, len(b.contents)) - copy(contents, b.contents) - return contents -} - -/* -Detect takes a regular expression and returns a channel. - -The channel will receive true the first time data matching the regular expression is written to the buffer. -The channel is subsequently closed and the buffer's read-cursor is fast-forwarded to just after the matching region. - -You typically don't need to use Detect and should use the ghttp.Say matcher instead. Detect is useful, however, in cases where your code must -be branch and handle different outputs written to the buffer. - -For example, consider a buffer hooked up to the stdout of a client library. You may (or may not, depending on state outside of your control) need to authenticate the client library. - -You could do something like: - -select { -case <-buffer.Detect("You are not logged in"): - //log in -case <-buffer.Detect("Success"): - //carry on -case <-time.After(time.Second): - //welp -} -buffer.CancelDetects() - -You should always call CancelDetects after using Detect. This will close any channels that have not detected and clean up the goroutines that were spawned to support them. - -Finally, you can pass detect a format string followed by variadic arguments. This will construct the regexp using fmt.Sprintf. -*/ -func (b *Buffer) Detect(desired string, args ...interface{}) chan bool { - formattedRegexp := desired - if len(args) > 0 { - formattedRegexp = fmt.Sprintf(desired, args...) - } - re := regexp.MustCompile(formattedRegexp) - - b.lock.Lock() - defer b.lock.Unlock() - - if b.detectCloser == nil { - b.detectCloser = make(chan interface{}) - } - - closer := b.detectCloser - response := make(chan bool) - go func() { - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - defer close(response) - for { - select { - case <-ticker.C: - b.lock.Lock() - data, cursor := b.contents[b.readCursor:], b.readCursor - loc := re.FindIndex(data) - b.lock.Unlock() - - if loc != nil { - response <- true - b.lock.Lock() - newCursorPosition := cursor + uint64(loc[1]) - if newCursorPosition >= b.readCursor { - b.readCursor = newCursorPosition - } - b.lock.Unlock() - return - } - case <-closer: - return - } - } - }() - - return response -} - -/* -CancelDetects cancels any pending detects and cleans up their goroutines. You should always call this when you're done with a set of Detect channels. -*/ -func (b *Buffer) CancelDetects() { - b.lock.Lock() - defer b.lock.Unlock() - - close(b.detectCloser) - b.detectCloser = nil -} - -func (b *Buffer) didSay(re *regexp.Regexp) (bool, []byte) { - b.lock.Lock() - defer b.lock.Unlock() - - unreadBytes := b.contents[b.readCursor:] - copyOfUnreadBytes := make([]byte, len(unreadBytes)) - copy(copyOfUnreadBytes, unreadBytes) - - loc := re.FindIndex(unreadBytes) - - if loc != nil { - b.readCursor += uint64(loc[1]) - return true, copyOfUnreadBytes - } else { - return false, copyOfUnreadBytes - } -} diff --git a/kit/github.com/onsi/gomega/gbytes/buffer_test.go b/kit/github.com/onsi/gomega/gbytes/buffer_test.go deleted file mode 100644 index 09b4910..0000000 --- a/kit/github.com/onsi/gomega/gbytes/buffer_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package gbytes_test - -import ( - "time" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gbytes" - - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("Buffer", func() { - var buffer *Buffer - - BeforeEach(func() { - buffer = NewBuffer() - }) - - Describe("dumping the entire contents of the buffer", func() { - It("should return everything that's been written", func() { - buffer.Write([]byte("abc")) - buffer.Write([]byte("def")) - Ω(buffer.Contents()).Should(Equal([]byte("abcdef"))) - - Ω(buffer).Should(Say("bcd")) - Ω(buffer.Contents()).Should(Equal([]byte("abcdef"))) - }) - }) - - Describe("creating a buffer with bytes", func() { - It("should create the buffer with the cursor set to the beginning", func() { - buffer := BufferWithBytes([]byte("abcdef")) - Ω(buffer.Contents()).Should(Equal([]byte("abcdef"))) - Ω(buffer).Should(Say("abc")) - Ω(buffer).ShouldNot(Say("abc")) - Ω(buffer).Should(Say("def")) - }) - }) - - Describe("detecting regular expressions", func() { - It("should fire the appropriate channel when the passed in pattern matches, then close it", func(done Done) { - go func() { - time.Sleep(10 * time.Millisecond) - buffer.Write([]byte("abcde")) - }() - - A := buffer.Detect("%s", "a.c") - B := buffer.Detect("def") - - var gotIt bool - select { - case gotIt = <-A: - case <-B: - Fail("should not have gotten here") - } - - Ω(gotIt).Should(BeTrue()) - Eventually(A).Should(BeClosed()) - - buffer.Write([]byte("f")) - Eventually(B).Should(Receive()) - Eventually(B).Should(BeClosed()) - - close(done) - }) - - It("should fast-forward the buffer upon detection", func(done Done) { - buffer.Write([]byte("abcde")) - <-buffer.Detect("abc") - Ω(buffer).ShouldNot(Say("abc")) - Ω(buffer).Should(Say("de")) - close(done) - }) - - It("should only fast-forward the buffer when the channel is read, and only if doing so would not rewind it", func(done Done) { - buffer.Write([]byte("abcde")) - A := buffer.Detect("abc") - time.Sleep(20 * time.Millisecond) //give the goroutine a chance to detect and write to the channel - Ω(buffer).Should(Say("abcd")) - <-A - Ω(buffer).ShouldNot(Say("d")) - Ω(buffer).Should(Say("e")) - Eventually(A).Should(BeClosed()) - close(done) - }) - - It("should be possible to cancel a detection", func(done Done) { - A := buffer.Detect("abc") - B := buffer.Detect("def") - buffer.CancelDetects() - buffer.Write([]byte("abcdef")) - Eventually(A).Should(BeClosed()) - Eventually(B).Should(BeClosed()) - - Ω(buffer).Should(Say("bcde")) - <-buffer.Detect("f") - close(done) - }) - }) - - Describe("closing the buffer", func() { - It("should error when further write attempts are made", func() { - _, err := buffer.Write([]byte("abc")) - Ω(err).ShouldNot(HaveOccurred()) - - buffer.Close() - - _, err = buffer.Write([]byte("def")) - Ω(err).Should(HaveOccurred()) - - Ω(buffer.Contents()).Should(Equal([]byte("abc"))) - }) - - It("should be closed", func() { - Ω(buffer.Closed()).Should(BeFalse()) - - buffer.Close() - - Ω(buffer.Closed()).Should(BeTrue()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go b/kit/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go deleted file mode 100644 index adc4278..0000000 --- a/kit/github.com/onsi/gomega/gbytes/gbuffer_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package gbytes_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestGbytes(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Gbytes Suite") -} diff --git a/kit/github.com/onsi/gomega/gbytes/say_matcher.go b/kit/github.com/onsi/gomega/gbytes/say_matcher.go deleted file mode 100644 index fe1efea..0000000 --- a/kit/github.com/onsi/gomega/gbytes/say_matcher.go +++ /dev/null @@ -1,105 +0,0 @@ -package gbytes - -import ( - "fmt" - "regexp" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" -) - -//Objects satisfying the BufferProvider can be used with the Say matcher. -type BufferProvider interface { - Buffer() *Buffer -} - -/* -Say is a Gomega matcher that operates on gbytes.Buffers: - - Ω(buffer).Should(Say("something")) - -will succeed if the unread portion of the buffer matches the regular expression "something". - -When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match. -Thus, subsequent calls to Say will only match against the unread portion of the buffer - -Say pairs very well with Eventually. To asser that a buffer eventually receives data matching "[123]-star" within 3 seconds you can: - - Eventually(buffer, 3).Should(Say("[123]-star")) - -Ditto with consistently. To assert that a buffer does not receive data matching "never-see-this" for 1 second you can: - - Consistently(buffer, 1).ShouldNot(Say("never-see-this")) - -In addition to bytes.Buffers, Say can operate on objects that implement the gbytes.BufferProvider interface. -In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer() - -If the buffer is closed, the Say matcher will tell Eventually to abort. -*/ -func Say(expected string, args ...interface{}) *sayMatcher { - formattedRegexp := expected - if len(args) > 0 { - formattedRegexp = fmt.Sprintf(expected, args...) - } - return &sayMatcher{ - re: regexp.MustCompile(formattedRegexp), - } -} - -type sayMatcher struct { - re *regexp.Regexp - receivedSayings []byte -} - -func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) { - var buffer *Buffer - - switch x := actual.(type) { - case *Buffer: - buffer = x - case BufferProvider: - buffer = x.Buffer() - default: - return nil, false - } - - return buffer, true -} - -func (m *sayMatcher) Match(actual interface{}) (success bool, err error) { - buffer, ok := m.buffer(actual) - if !ok { - return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider. Got:\n%s", format.Object(actual, 1)) - } - - didSay, sayings := buffer.didSay(m.re) - m.receivedSayings = sayings - - return didSay, nil -} - -func (m *sayMatcher) FailureMessage(actual interface{}) (message string) { - return fmt.Sprintf( - "Got stuck at:\n%s\nWaiting for:\n%s", - format.IndentString(string(m.receivedSayings), 1), - format.IndentString(m.re.String(), 1), - ) -} - -func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return fmt.Sprintf( - "Saw:\n%s\nWhich matches the unexpected:\n%s", - format.IndentString(string(m.receivedSayings), 1), - format.IndentString(m.re.String(), 1), - ) -} - -func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { - switch x := actual.(type) { - case *Buffer: - return !x.Closed() - case BufferProvider: - return !x.Buffer().Closed() - default: - return true - } -} diff --git a/kit/github.com/onsi/gomega/gbytes/say_matcher_test.go b/kit/github.com/onsi/gomega/gbytes/say_matcher_test.go deleted file mode 100644 index ae0b4ae..0000000 --- a/kit/github.com/onsi/gomega/gbytes/say_matcher_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package gbytes_test - -import ( - "time" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gbytes" - - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -type speaker struct { - buffer *Buffer -} - -func (s *speaker) Buffer() *Buffer { - return s.buffer -} - -var _ = Describe("SayMatcher", func() { - var buffer *Buffer - - BeforeEach(func() { - buffer = NewBuffer() - buffer.Write([]byte("abc")) - }) - - Context("when actual is not a gexec Buffer, or a BufferProvider", func() { - It("should error", func() { - failures := InterceptGomegaFailures(func() { - Ω("foo").Should(Say("foo")) - }) - Ω(failures[0]).Should(ContainSubstring("*gbytes.Buffer")) - }) - }) - - Context("when a match is found", func() { - It("should succeed", func() { - Ω(buffer).Should(Say("abc")) - }) - - It("should support printf-like formatting", func() { - Ω(buffer).Should(Say("a%sc", "b")) - }) - - It("should use a regular expression", func() { - Ω(buffer).Should(Say("a.c")) - }) - - It("should fastforward the buffer", func() { - buffer.Write([]byte("def")) - Ω(buffer).Should(Say("abcd")) - Ω(buffer).Should(Say("ef")) - Ω(buffer).ShouldNot(Say("[a-z]")) - }) - }) - - Context("when no match is found", func() { - It("should not error", func() { - Ω(buffer).ShouldNot(Say("def")) - }) - - Context("when the buffer is closed", func() { - BeforeEach(func() { - buffer.Close() - }) - - It("should abort an eventually", func() { - t := time.Now() - failures := InterceptGomegaFailures(func() { - Eventually(buffer).Should(Say("def")) - }) - Eventually(buffer).ShouldNot(Say("def")) - Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) - Ω(failures).Should(HaveLen(1)) - - t = time.Now() - Eventually(buffer).Should(Say("abc")) - Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) - }) - - It("should abort a consistently", func() { - t := time.Now() - Consistently(buffer, 2.0).ShouldNot(Say("def")) - Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) - }) - - It("should not error with a synchronous matcher", func() { - Ω(buffer).ShouldNot(Say("def")) - Ω(buffer).Should(Say("abc")) - }) - }) - }) - - Context("when a positive match fails", func() { - It("should report where it got stuck", func() { - Ω(buffer).Should(Say("abc")) - buffer.Write([]byte("def")) - failures := InterceptGomegaFailures(func() { - Ω(buffer).Should(Say("abc")) - }) - Ω(failures[0]).Should(ContainSubstring("Got stuck at:")) - Ω(failures[0]).Should(ContainSubstring("def")) - }) - }) - - Context("when a negative match fails", func() { - It("should report where it got stuck", func() { - failures := InterceptGomegaFailures(func() { - Ω(buffer).ShouldNot(Say("abc")) - }) - Ω(failures[0]).Should(ContainSubstring("Saw:")) - Ω(failures[0]).Should(ContainSubstring("Which matches the unexpected:")) - Ω(failures[0]).Should(ContainSubstring("abc")) - }) - }) - - Context("when a match is not found", func() { - It("should not fastforward the buffer", func() { - Ω(buffer).ShouldNot(Say("def")) - Ω(buffer).Should(Say("abc")) - }) - }) - - Context("a nice real-life example", func() { - It("should behave well", func() { - Ω(buffer).Should(Say("abc")) - go func() { - time.Sleep(10 * time.Millisecond) - buffer.Write([]byte("def")) - }() - Ω(buffer).ShouldNot(Say("def")) - Eventually(buffer).Should(Say("def")) - }) - }) - - Context("when actual is a BufferProvider", func() { - It("should use actual's buffer", func() { - s := &speaker{ - buffer: NewBuffer(), - } - - Ω(s).ShouldNot(Say("abc")) - - s.Buffer().Write([]byte("abc")) - Ω(s).Should(Say("abc")) - }) - - It("should abort an eventually", func() { - s := &speaker{ - buffer: NewBuffer(), - } - - s.buffer.Close() - - t := time.Now() - failures := InterceptGomegaFailures(func() { - Eventually(s).Should(Say("def")) - }) - Ω(failures).Should(HaveLen(1)) - Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/gexec/_fixture/firefly/main.go b/kit/github.com/onsi/gomega/gexec/_fixture/firefly/main.go deleted file mode 100644 index 16091c2..0000000 --- a/kit/github.com/onsi/gomega/gexec/_fixture/firefly/main.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "fmt" - "math/rand" - "os" - "strconv" - "time" -) - -var outQuote = "We've done the impossible, and that makes us mighty." -var errQuote = "Ah, curse your sudden but inevitable betrayal!" - -var randomQuotes = []string{ - "Can we maybe vote on the whole murdering people issue?", - "I swear by my pretty floral bonnet, I will end you.", - "My work's illegal, but at least it's honest.", -} - -func main() { - fmt.Fprintln(os.Stdout, outQuote) - fmt.Fprintln(os.Stderr, errQuote) - - randomIndex := rand.New(rand.NewSource(time.Now().UnixNano())).Intn(len(randomQuotes)) - - time.Sleep(100 * time.Millisecond) - - fmt.Fprintln(os.Stdout, randomQuotes[randomIndex]) - - if len(os.Args) == 2 { - exitCode, _ := strconv.Atoi(os.Args[1]) - os.Exit(exitCode) - } else { - os.Exit(randomIndex) - } -} diff --git a/kit/github.com/onsi/gomega/gexec/build.go b/kit/github.com/onsi/gomega/gexec/build.go deleted file mode 100644 index 3e9bf9f..0000000 --- a/kit/github.com/onsi/gomega/gexec/build.go +++ /dev/null @@ -1,78 +0,0 @@ -package gexec - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "runtime" -) - -var tmpDir string - -/* -Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory. -A path pointing to this binary is returned. - -Build uses the $GOPATH set in your environment. It passes the variadic args on to `go build`. -*/ -func Build(packagePath string, args ...string) (compiledPath string, err error) { - return BuildIn(os.Getenv("GOPATH"), packagePath, args...) -} - -/* -BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument). -*/ -func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) { - tmpDir, err := temporaryDirectory() - if err != nil { - return "", err - } - - if len(gopath) == 0 { - return "", errors.New("$GOPATH not provided when building " + packagePath) - } - - executable := filepath.Join(tmpDir, path.Base(packagePath)) - if runtime.GOOS == "windows" { - executable = executable + ".exe" - } - - cmdArgs := append([]string{"build"}, args...) - cmdArgs = append(cmdArgs, "-o", executable, packagePath) - - build := exec.Command("go", cmdArgs...) - build.Env = append([]string{"GOPATH=" + gopath}, os.Environ()...) - - output, err := build.CombinedOutput() - if err != nil { - return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output)) - } - - return executable, nil -} - -/* -You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by -gexec. In Ginkgo this is typically done in an AfterSuite callback. -*/ -func CleanupBuildArtifacts() { - if tmpDir != "" { - os.RemoveAll(tmpDir) - } -} - -func temporaryDirectory() (string, error) { - var err error - if tmpDir == "" { - tmpDir, err = ioutil.TempDir("", "gexec_artifacts") - if err != nil { - return "", err - } - } - - return ioutil.TempDir(tmpDir, "g") -} diff --git a/kit/github.com/onsi/gomega/gexec/exit_matcher.go b/kit/github.com/onsi/gomega/gexec/exit_matcher.go deleted file mode 100644 index 6851c5e..0000000 --- a/kit/github.com/onsi/gomega/gexec/exit_matcher.go +++ /dev/null @@ -1,88 +0,0 @@ -package gexec - -import ( - "fmt" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" -) - -/* -The Exit matcher operates on a session: - - Ω(session).Should(Exit()) - -Exit passes if the session has already exited. - -If no status code is provided, then Exit will succeed if the session has exited regardless of exit code. -Otherwise, Exit will only succeed if the process has exited with the provided status code. - -Note that the process must have already exited. To wait for a process to exit, use Eventually: - - Eventually(session, 3).Should(Exit(0)) -*/ -func Exit(optionalExitCode ...int) *exitMatcher { - exitCode := -1 - if len(optionalExitCode) > 0 { - exitCode = optionalExitCode[0] - } - - return &exitMatcher{ - exitCode: exitCode, - } -} - -type exitMatcher struct { - exitCode int - didExit bool - actualExitCode int -} - -type Exiter interface { - ExitCode() int -} - -func (m *exitMatcher) Match(actual interface{}) (success bool, err error) { - exiter, ok := actual.(Exiter) - if !ok { - return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1)) - } - - m.actualExitCode = exiter.ExitCode() - - if m.actualExitCode == -1 { - return false, nil - } - - if m.exitCode == -1 { - return true, nil - } - return m.exitCode == m.actualExitCode, nil -} - -func (m *exitMatcher) FailureMessage(actual interface{}) (message string) { - if m.actualExitCode == -1 { - return "Expected process to exit. It did not." - } else { - return format.Message(m.actualExitCode, "to match exit code:", m.exitCode) - } -} - -func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) { - if m.actualExitCode == -1 { - return "you really shouldn't be able to see this!" - } else { - if m.exitCode == -1 { - return "Expected process not to exit. It did." - } else { - return format.Message(m.actualExitCode, "not to match exit code:", m.exitCode) - } - } -} - -func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { - session, ok := actual.(*Session) - if ok { - return session.ExitCode() == -1 - } - return true -} diff --git a/kit/github.com/onsi/gomega/gexec/exit_matcher_test.go b/kit/github.com/onsi/gomega/gexec/exit_matcher_test.go deleted file mode 100644 index af308e1..0000000 --- a/kit/github.com/onsi/gomega/gexec/exit_matcher_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package gexec_test - -import ( - "os/exec" - "time" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" - - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -type NeverExits struct{} - -func (e NeverExits) ExitCode() int { - return -1 -} - -var _ = Describe("ExitMatcher", func() { - var command *exec.Cmd - var session *Session - - BeforeEach(func() { - var err error - command = exec.Command(fireflyPath, "0") - session, err = Start(command, nil, nil) - Ω(err).ShouldNot(HaveOccurred()) - }) - - Describe("when passed something that is an Exiter", func() { - It("should act normally", func() { - failures := InterceptGomegaFailures(func() { - Ω(NeverExits{}).Should(Exit()) - }) - - Ω(failures[0]).Should(ContainSubstring("Expected process to exit. It did not.")) - }) - }) - - Describe("when passed something that is not an Exiter", func() { - It("should error", func() { - failures := InterceptGomegaFailures(func() { - Ω("aardvark").Should(Exit()) - }) - - Ω(failures[0]).Should(ContainSubstring("Exit must be passed a gexec.Exiter")) - }) - }) - - Context("with no exit code", func() { - It("should say the right things when it fails", func() { - Ω(session).ShouldNot(Exit()) - - failures := InterceptGomegaFailures(func() { - Ω(session).Should(Exit()) - }) - - Ω(failures[0]).Should(ContainSubstring("Expected process to exit. It did not.")) - - Eventually(session).Should(Exit()) - - Ω(session).Should(Exit()) - - failures = InterceptGomegaFailures(func() { - Ω(session).ShouldNot(Exit()) - }) - - Ω(failures[0]).Should(ContainSubstring("Expected process not to exit. It did.")) - }) - }) - - Context("with an exit code", func() { - It("should say the right things when it fails", func() { - Ω(session).ShouldNot(Exit(0)) - Ω(session).ShouldNot(Exit(1)) - - failures := InterceptGomegaFailures(func() { - Ω(session).Should(Exit(0)) - }) - - Ω(failures[0]).Should(ContainSubstring("Expected process to exit. It did not.")) - - Eventually(session).Should(Exit(0)) - - Ω(session).Should(Exit(0)) - - failures = InterceptGomegaFailures(func() { - Ω(session).Should(Exit(1)) - }) - - Ω(failures[0]).Should(ContainSubstring("to match exit code:")) - - Ω(session).ShouldNot(Exit(1)) - - failures = InterceptGomegaFailures(func() { - Ω(session).ShouldNot(Exit(0)) - }) - - Ω(failures[0]).Should(ContainSubstring("not to match exit code:")) - }) - }) - - Describe("bailing out early", func() { - It("should bail out early once the process exits", func() { - t := time.Now() - - failures := InterceptGomegaFailures(func() { - Eventually(session).Should(Exit(1)) - }) - Ω(time.Since(t)).Should(BeNumerically("<=", 500*time.Millisecond)) - Ω(failures).Should(HaveLen(1)) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/gexec/gexec_suite_test.go b/kit/github.com/onsi/gomega/gexec/gexec_suite_test.go deleted file mode 100644 index a1b4de5..0000000 --- a/kit/github.com/onsi/gomega/gexec/gexec_suite_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package gexec_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" - - "testing" -) - -var fireflyPath string - -func TestGexec(t *testing.T) { - BeforeSuite(func() { - var err error - fireflyPath, err = gexec.Build("./_fixture/firefly") - Ω(err).ShouldNot(HaveOccurred()) - }) - - AfterSuite(func() { - gexec.CleanupBuildArtifacts() - }) - - RegisterFailHandler(Fail) - RunSpecs(t, "Gexec Suite") -} diff --git a/kit/github.com/onsi/gomega/gexec/prefixed_writer.go b/kit/github.com/onsi/gomega/gexec/prefixed_writer.go deleted file mode 100644 index 556182b..0000000 --- a/kit/github.com/onsi/gomega/gexec/prefixed_writer.go +++ /dev/null @@ -1,80 +0,0 @@ -package gexec - -import ( - "bytes" - "io" - "sync" -) - -/* -PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line. -This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each -session by passing in a PrefixedWriter: - -gexec.Start(cmd, NewPrefixedWriter("[my-cmd] ", GinkgoWriter), NewPrefixedWriter("[my-cmd] ", GinkgoWriter)) -*/ -type PrefixedWriter struct { - prefix []byte - writer io.Writer - lock *sync.Mutex - isNewLine bool - isFirstWrite bool -} - -func NewPrefixedWriter(prefix string, writer io.Writer) *PrefixedWriter { - return &PrefixedWriter{ - prefix: []byte(prefix), - writer: writer, - lock: &sync.Mutex{}, - isFirstWrite: true, - } -} - -func (w *PrefixedWriter) Write(b []byte) (int, error) { - w.lock.Lock() - defer w.lock.Unlock() - - newLine := []byte("\n") - segments := bytes.Split(b, newLine) - - if len(segments) != 0 { - toWrite := []byte{} - if w.isFirstWrite { - toWrite = append(toWrite, w.prefix...) - toWrite = append(toWrite, segments[0]...) - w.isFirstWrite = false - } else if w.isNewLine { - toWrite = append(toWrite, newLine...) - toWrite = append(toWrite, w.prefix...) - toWrite = append(toWrite, segments[0]...) - } else { - toWrite = append(toWrite, segments[0]...) - } - - for i := 1; i < len(segments)-1; i++ { - toWrite = append(toWrite, newLine...) - toWrite = append(toWrite, w.prefix...) - toWrite = append(toWrite, segments[i]...) - } - - if len(segments) > 1 { - lastSegment := segments[len(segments)-1] - - if len(lastSegment) == 0 { - w.isNewLine = true - } else { - toWrite = append(toWrite, newLine...) - toWrite = append(toWrite, w.prefix...) - toWrite = append(toWrite, lastSegment...) - w.isNewLine = false - } - } - - _, err := w.writer.Write(toWrite) - if err != nil { - return 0, err - } - } - - return len(b), nil -} diff --git a/kit/github.com/onsi/gomega/gexec/prefixed_writer_test.go b/kit/github.com/onsi/gomega/gexec/prefixed_writer_test.go deleted file mode 100644 index 81f51d1..0000000 --- a/kit/github.com/onsi/gomega/gexec/prefixed_writer_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package gexec_test - -import ( - "bytes" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" - - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("PrefixedWriter", func() { - var buffer *bytes.Buffer - var writer *PrefixedWriter - BeforeEach(func() { - buffer = &bytes.Buffer{} - writer = NewPrefixedWriter("[p]", buffer) - }) - - It("should emit the prefix on newlines", func() { - writer.Write([]byte("abc")) - writer.Write([]byte("def\n")) - writer.Write([]byte("hij\n")) - writer.Write([]byte("\n\n")) - writer.Write([]byte("klm\n\nnop")) - writer.Write([]byte("")) - writer.Write([]byte("qrs")) - writer.Write([]byte("\ntuv\nwx")) - writer.Write([]byte("yz\n\n")) - - Ω(buffer.String()).Should(Equal(`[p]abcdef -[p]hij -[p] -[p] -[p]klm -[p] -[p]nopqrs -[p]tuv -[p]wxyz -[p]`)) - }) -}) diff --git a/kit/github.com/onsi/gomega/gexec/session.go b/kit/github.com/onsi/gomega/gexec/session.go deleted file mode 100644 index d8a5e56..0000000 --- a/kit/github.com/onsi/gomega/gexec/session.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Package gexec provides support for testing external processes. -*/ -package gexec - -import ( - "io" - "os" - "os/exec" - "reflect" - "sync" - "syscall" - - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gbytes" -) - -const INVALID_EXIT_CODE = 254 - -type Session struct { - //The wrapped command - Command *exec.Cmd - - //A *gbytes.Buffer connected to the command's stdout - Out *gbytes.Buffer - - //A *gbytes.Buffer connected to the command's stderr - Err *gbytes.Buffer - - //A channel that will close when the command exits - Exited <-chan struct{} - - lock *sync.Mutex - exitCode int -} - -/* -Start starts the passed-in *exec.Cmd command. It wraps the command in a *gexec.Session. - -The session pipes the command's stdout and stderr to two *gbytes.Buffers available as properties on the session: session.Out and session.Err. -These buffers can be used with the gbytes.Say matcher to match against unread output: - - Ω(session.Out).Should(gbytes.Say("foo-out")) - Ω(session.Err).Should(gbytes.Say("foo-err")) - -In addition, Session satisfies the gbytes.BufferProvider interface and provides the stdout *gbytes.Buffer. This allows you to replace the first line, above, with: - - Ω(session).Should(gbytes.Say("foo-out")) - -When outWriter and/or errWriter are non-nil, the session will pipe stdout and/or stderr output both into the session *gybtes.Buffers and to the passed-in outWriter/errWriter. -This is useful for capturing the process's output or logging it to screen. In particular, when using Ginkgo it can be convenient to direct output to the GinkgoWriter: - - session, err := Start(command, GinkgoWriter, GinkgoWriter) - -This will log output when running tests in verbose mode, but - otherwise - will only log output when a test fails. - -The session wrapper is responsible for waiting on the *exec.Cmd command. You *should not* call command.Wait() yourself. -Instead, to assert that the command has exited you can use the gexec.Exit matcher: - - Ω(session).Should(gexec.Exit()) - -When the session exits it closes the stdout and stderr gbytes buffers. This will short circuit any -Eventuallys waiting fo the buffers to Say something. -*/ -func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Session, error) { - exited := make(chan struct{}) - - session := &Session{ - Command: command, - Out: gbytes.NewBuffer(), - Err: gbytes.NewBuffer(), - Exited: exited, - lock: &sync.Mutex{}, - exitCode: -1, - } - - var commandOut, commandErr io.Writer - - commandOut, commandErr = session.Out, session.Err - - if outWriter != nil && !reflect.ValueOf(outWriter).IsNil() { - commandOut = io.MultiWriter(commandOut, outWriter) - } - - if errWriter != nil && !reflect.ValueOf(errWriter).IsNil() { - commandErr = io.MultiWriter(commandErr, errWriter) - } - - command.Stdout = commandOut - command.Stderr = commandErr - - err := command.Start() - if err == nil { - go session.monitorForExit(exited) - } - - return session, err -} - -/* -Buffer implements the gbytes.BufferProvider interface and returns s.Out -This allows you to make gbytes.Say matcher assertions against stdout without having to reference .Out: - - Eventually(session).Should(gbytes.Say("foo")) -*/ -func (s *Session) Buffer() *gbytes.Buffer { - return s.Out -} - -/* -ExitCode returns the wrapped command's exit code. If the command hasn't exited yet, ExitCode returns -1. - -To assert that the command has exited it is more convenient to use the Exit matcher: - - Eventually(s).Should(gexec.Exit()) - -When the process exits because it has received a particular signal, the exit code will be 128+signal-value -(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html) - -*/ -func (s *Session) ExitCode() int { - s.lock.Lock() - defer s.lock.Unlock() - return s.exitCode -} - -/* -Wait waits until the wrapped command exits. It can be passed an optional timeout. -If the command does not exit within the timeout, Wait will trigger a test failure. - -Wait returns the session, making it possible to chain: - - session.Wait().Out.Contents() - -will wait for the command to exit then return the entirety of Out's contents. - -Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does. -*/ -func (s *Session) Wait(timeout ...interface{}) *Session { - Eventually(s, timeout...).Should(Exit()) - return s -} - -/* -Kill sends the running command a SIGKILL signal. It does not wait for the process to exit. - -If the command has already exited, Kill returns silently. - -The session is returned to enable chaining. -*/ -func (s *Session) Kill() *Session { - if s.ExitCode() != -1 { - return s - } - s.Command.Process.Kill() - return s -} - -/* -Interrupt sends the running command a SIGINT signal. It does not wait for the process to exit. - -If the command has already exited, Interrupt returns silently. - -The session is returned to enable chaining. -*/ -func (s *Session) Interrupt() *Session { - return s.Signal(syscall.SIGINT) -} - -/* -Terminate sends the running command a SIGTERM signal. It does not wait for the process to exit. - -If the command has already exited, Terminate returns silently. - -The session is returned to enable chaining. -*/ -func (s *Session) Terminate() *Session { - return s.Signal(syscall.SIGTERM) -} - -/* -Terminate sends the running command the passed in signal. It does not wait for the process to exit. - -If the command has already exited, Signal returns silently. - -The session is returned to enable chaining. -*/ -func (s *Session) Signal(signal os.Signal) *Session { - if s.ExitCode() != -1 { - return s - } - s.Command.Process.Signal(signal) - return s -} - -func (s *Session) monitorForExit(exited chan<- struct{}) { - err := s.Command.Wait() - s.lock.Lock() - s.Out.Close() - s.Err.Close() - status := s.Command.ProcessState.Sys().(syscall.WaitStatus) - if status.Signaled() { - s.exitCode = 128 + int(status.Signal()) - } else { - exitStatus := status.ExitStatus() - if exitStatus == -1 && err != nil { - s.exitCode = INVALID_EXIT_CODE - } - s.exitCode = exitStatus - } - s.lock.Unlock() - - close(exited) -} diff --git a/kit/github.com/onsi/gomega/gexec/session_test.go b/kit/github.com/onsi/gomega/gexec/session_test.go deleted file mode 100644 index 28ea612..0000000 --- a/kit/github.com/onsi/gomega/gexec/session_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package gexec_test - -import ( - "os/exec" - "syscall" - "time" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gbytes" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/gexec" - - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("Session", func() { - var command *exec.Cmd - var session *Session - - var outWriter, errWriter *Buffer - - BeforeEach(func() { - outWriter = nil - errWriter = nil - }) - - JustBeforeEach(func() { - command = exec.Command(fireflyPath) - var err error - session, err = Start(command, outWriter, errWriter) - Ω(err).ShouldNot(HaveOccurred()) - }) - - Context("running a command", func() { - It("should start the process", func() { - Ω(command.Process).ShouldNot(BeNil()) - }) - - It("should wrap the process's stdout and stderr with gbytes buffers", func(done Done) { - Eventually(session.Out).Should(Say("We've done the impossible, and that makes us mighty")) - Eventually(session.Err).Should(Say("Ah, curse your sudden but inevitable betrayal!")) - defer session.Out.CancelDetects() - - select { - case <-session.Out.Detect("Can we maybe vote on the whole murdering people issue"): - Eventually(session).Should(Exit(0)) - case <-session.Out.Detect("I swear by my pretty floral bonnet, I will end you."): - Eventually(session).Should(Exit(1)) - case <-session.Out.Detect("My work's illegal, but at least it's honest."): - Eventually(session).Should(Exit(2)) - } - - close(done) - }) - - It("should satisfy the gbytes.BufferProvider interface, passing Stdout", func() { - Eventually(session).Should(Say("We've done the impossible, and that makes us mighty")) - Eventually(session).Should(Exit()) - }) - }) - - Describe("providing the exit code", func() { - It("should provide the app's exit code", func() { - Ω(session.ExitCode()).Should(Equal(-1)) - - Eventually(session).Should(Exit()) - Ω(session.ExitCode()).Should(BeNumerically(">=", 0)) - Ω(session.ExitCode()).Should(BeNumerically("<", 3)) - }) - }) - - Describe("wait", func() { - It("should wait till the command exits", func() { - Ω(session.ExitCode()).Should(Equal(-1)) - Ω(session.Wait().ExitCode()).Should(BeNumerically(">=", 0)) - Ω(session.Wait().ExitCode()).Should(BeNumerically("<", 3)) - }) - }) - - Describe("exited", func() { - It("should close when the command exits", func() { - Eventually(session.Exited).Should(BeClosed()) - Ω(session.ExitCode()).ShouldNot(Equal(-1)) - }) - }) - - Describe("kill", func() { - It("should kill the command and wait for it to exit", func() { - session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) - Ω(err).ShouldNot(HaveOccurred()) - - session.Kill() - Ω(session).ShouldNot(Exit(), "Should not exit immediately...") - Eventually(session).Should(Exit(128 + 9)) - }) - }) - - Describe("interrupt", func() { - It("should interrupt the command", func() { - session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) - Ω(err).ShouldNot(HaveOccurred()) - - session.Interrupt() - Ω(session).ShouldNot(Exit(), "Should not exit immediately...") - Eventually(session).Should(Exit(128 + 2)) - }) - }) - - Describe("terminate", func() { - It("should terminate the command", func() { - session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) - Ω(err).ShouldNot(HaveOccurred()) - - session.Terminate() - Ω(session).ShouldNot(Exit(), "Should not exit immediately...") - Eventually(session).Should(Exit(128 + 15)) - }) - }) - - Describe("signal", func() { - It("should send the signal to the command", func() { - session, err := Start(exec.Command("sleep", "10000000"), GinkgoWriter, GinkgoWriter) - Ω(err).ShouldNot(HaveOccurred()) - - session.Signal(syscall.SIGABRT) - Ω(session).ShouldNot(Exit(), "Should not exit immediately...") - Eventually(session).Should(Exit(128 + 6)) - }) - }) - - Context("when the command exits", func() { - It("should close the buffers", func() { - Eventually(session).Should(Exit()) - - Ω(session.Out.Closed()).Should(BeTrue()) - Ω(session.Err.Closed()).Should(BeTrue()) - - Ω(session.Out).Should(Say("We've done the impossible, and that makes us mighty")) - }) - - var So = It - - So("this means that eventually should short circuit", func() { - t := time.Now() - failures := InterceptGomegaFailures(func() { - Eventually(session).Should(Say("blah blah blah blah blah")) - }) - Ω(time.Since(t)).Should(BeNumerically("<=", 500*time.Millisecond)) - Ω(failures).Should(HaveLen(1)) - }) - }) - - Context("when wrapping out and err", func() { - BeforeEach(func() { - outWriter = NewBuffer() - errWriter = NewBuffer() - }) - - It("should route to both the provided writers and the gbytes buffers", func() { - Eventually(session.Out).Should(Say("We've done the impossible, and that makes us mighty")) - Eventually(session.Err).Should(Say("Ah, curse your sudden but inevitable betrayal!")) - - Ω(outWriter.Contents()).Should(ContainSubstring("We've done the impossible, and that makes us mighty")) - Ω(errWriter.Contents()).Should(ContainSubstring("Ah, curse your sudden but inevitable betrayal!")) - - Eventually(session).Should(Exit()) - - Ω(outWriter.Contents()).Should(Equal(session.Out.Contents())) - Ω(errWriter.Contents()).Should(Equal(session.Err.Contents())) - }) - }) - - Describe("when the command fails to start", func() { - It("should return an error", func() { - _, err := Start(exec.Command("agklsjdfas"), nil, nil) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/ghttp/handlers.go b/kit/github.com/onsi/gomega/ghttp/handlers.go deleted file mode 100644 index 4611013..0000000 --- a/kit/github.com/onsi/gomega/ghttp/handlers.go +++ /dev/null @@ -1,202 +0,0 @@ -package ghttp - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/types" -) - -//CombineHandler takes variadic list of handlers and produces one handler -//that calls each handler in order. -func CombineHandlers(handlers ...http.HandlerFunc) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - for _, handler := range handlers { - handler(w, req) - } - } -} - -//VerifyRequest returns a handler that verifies that a request uses the specified method to connect to the specified path -//You may also pass in an optional rawQuery string which is tested against the request's `req.URL.RawQuery` -// -//For path, you may pass in a string, in which case strict equality will be applied -//Alternatively you can pass in a matcher (ContainSubstring("/foo") and MatchRegexp("/foo/[a-f0-9]+") for example) -func VerifyRequest(method string, path interface{}, rawQuery ...string) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - Ω(req.Method).Should(Equal(method), "Method mismatch") - switch p := path.(type) { - case types.GomegaMatcher: - Ω(req.URL.Path).Should(p, "Path mismatch") - default: - Ω(req.URL.Path).Should(Equal(path), "Path mismatch") - } - if len(rawQuery) > 0 { - Ω(req.URL.RawQuery).Should(Equal(rawQuery[0]), "RawQuery mismatch") - } - } -} - -//VerifyContentType returns a handler that verifies that a request has a Content-Type header set to the -//specified value -func VerifyContentType(contentType string) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - Ω(req.Header.Get("Content-Type")).Should(Equal(contentType)) - } -} - -//VerifyBasicAuth returns a handler that verifies the request contains a BasicAuth Authorization header -//matching the passed in username and password -func VerifyBasicAuth(username string, password string) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - auth := req.Header.Get("Authorization") - decoded, err := base64.StdEncoding.DecodeString(auth[6:]) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(string(decoded)).Should(Equal(fmt.Sprintf("%s:%s", username, password)), "Authorization mismatch") - } -} - -//VerifyHeader returns a handler that verifies the request contains the passed in headers. -//The passed in header keys are first canonicalized via http.CanonicalHeaderKey. -// -//The request must contain *all* the passed in headers, but it is allowed to have additional headers -//beyond the passed in set. -func VerifyHeader(header http.Header) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - for key, values := range header { - key = http.CanonicalHeaderKey(key) - Ω(req.Header[key]).Should(Equal(values), "Header mismatch for key: %s", key) - } - } -} - -//VerifyHeaderKV returns a handler that verifies the request contains a header matching the passed in key and values -//(recall that a `http.Header` is a mapping from string (key) to []string (values)) -//It is a convenience wrapper around `VerifyHeader` that allows you to avoid having to create an `http.Header` object. -func VerifyHeaderKV(key string, values ...string) http.HandlerFunc { - return VerifyHeader(http.Header{key: values}) -} - -//VerifyJSON returns a handler that verifies that the body of the request is a valid JSON representation -//matching the passed in JSON string. It does this using Gomega's MatchJSON method -// -//VerifyJSON also verifies that the request's content type is application/json -func VerifyJSON(expectedJSON string) http.HandlerFunc { - return CombineHandlers( - VerifyContentType("application/json"), - func(w http.ResponseWriter, req *http.Request) { - body, err := ioutil.ReadAll(req.Body) - req.Body.Close() - Ω(err).ShouldNot(HaveOccurred()) - Ω(body).Should(MatchJSON(expectedJSON), "JSON Mismatch") - }, - ) -} - -//VerifyJSONRepresenting is similar to VerifyJSON. Instead of taking a JSON string, however, it -//takes an arbitrary JSON-encodable object and verifies that the requests's body is a JSON representation -//that matches the object -func VerifyJSONRepresenting(object interface{}) http.HandlerFunc { - data, err := json.Marshal(object) - Ω(err).ShouldNot(HaveOccurred()) - return CombineHandlers( - VerifyContentType("application/json"), - VerifyJSON(string(data)), - ) -} - -func copyHeader(src http.Header, dst http.Header) { - for key, value := range src { - dst[key] = value - } -} - -/* -RespondWith returns a handler that responds to a request with the specified status code and body - -Body may be a string or []byte - -Also, RespondWith can be given an optional http.Header. The headers defined therein will be added to the response headers. -*/ -func RespondWith(statusCode int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - if len(optionalHeader) == 1 { - copyHeader(optionalHeader[0], w.Header()) - } - w.WriteHeader(statusCode) - switch x := body.(type) { - case string: - w.Write([]byte(x)) - case []byte: - w.Write(x) - default: - Ω(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.") - } - } -} - -/* -RespondWithPtr returns a handler that responds to a request with the specified status code and body - -Unlike RespondWith, you pass RepondWithPtr a pointer to the status code and body allowing different tests -to share the same setup but specify different status codes and bodies. - -Also, RespondWithPtr can be given an optional http.Header. The headers defined therein will be added to the response headers. -Since the http.Header can be mutated after the fact you don't need to pass in a pointer. -*/ -func RespondWithPtr(statusCode *int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - if len(optionalHeader) == 1 { - copyHeader(optionalHeader[0], w.Header()) - } - w.WriteHeader(*statusCode) - if body != nil { - switch x := (body).(type) { - case *string: - w.Write([]byte(*x)) - case *[]byte: - w.Write(*x) - default: - Ω(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.") - } - } - } -} - -/* -RespondWithJSONEncoded returns a handler that responds to a request with the specified status code and a body -containing the JSON-encoding of the passed in object - -Also, RespondWithJSONEncoded can be given an optional http.Header. The headers defined therein will be added to the response headers. -*/ -func RespondWithJSONEncoded(statusCode int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc { - data, err := json.Marshal(object) - Ω(err).ShouldNot(HaveOccurred()) - return RespondWith(statusCode, string(data), optionalHeader...) -} - -/* -RespondWithJSONEncodedPtr behaves like RespondWithJSONEncoded but takes a pointer -to a status code and object. - -This allows different tests to share the same setup but specify different status codes and JSON-encoded -objects. - -Also, RespondWithJSONEncodedPtr can be given an optional http.Header. The headers defined therein will be added to the response headers. -Since the http.Header can be mutated after the fact you don't need to pass in a pointer. -*/ -func RespondWithJSONEncodedPtr(statusCode *int, object *interface{}, optionalHeader ...http.Header) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - data, err := json.Marshal(*object) - Ω(err).ShouldNot(HaveOccurred()) - if len(optionalHeader) == 1 { - copyHeader(optionalHeader[0], w.Header()) - } - w.WriteHeader(*statusCode) - w.Write(data) - } -} diff --git a/kit/github.com/onsi/gomega/ghttp/test_server.go b/kit/github.com/onsi/gomega/ghttp/test_server.go deleted file mode 100644 index b59ba74..0000000 --- a/kit/github.com/onsi/gomega/ghttp/test_server.go +++ /dev/null @@ -1,303 +0,0 @@ -/* -Package ghttp supports testing HTTP clients by providing a test server (simply a thin wrapper around httptest's server) that supports -registering multiple handlers. Incoming requests are not routed between the different handlers -- rather it is merely the order of the handlers that matters. The first request is handled by the first -registered handler, the second request by the second handler, etc. - -The intent here is to have each handler *verify* that the incoming request is valid. To accomplish, ghttp -also provides a collection of bite-size handlers that each perform one aspect of request verification. These can -be composed together and registered with a ghttp server. The result is an expressive language for describing -the requests generated by the client under test. - -Here's a simple example, note that the server handler is only defined in one BeforeEach and then modified, as required, by the nested BeforeEaches. -A more comprehensive example is available at https://onsi.github.io/gomega/#_testing_http_clients - - var _ = Describe("A Sprockets Client", func() { - var server *ghttp.Server - var client *SprocketClient - BeforeEach(func() { - server = ghttp.NewServer() - client = NewSprocketClient(server.URL(), "skywalker", "tk427") - }) - - AfterEach(func() { - server.Close() - }) - - Describe("fetching sprockets", func() { - var statusCode int - var sprockets []Sprocket - BeforeEach(func() { - statusCode = http.StatusOK - sprockets = []Sprocket{} - server.AppendHandlers(ghttp.CombineHandlers( - ghttp.VerifyRequest("GET", "/sprockets"), - ghttp.VerifyBasicAuth("skywalker", "tk427"), - ghttp.RespondWithJSONEncodedPtr(&statusCode, &sprockets), - )) - }) - - Context("when requesting all sprockets", func() { - Context("when the response is succesful", func() { - BeforeEach(func() { - sprockets = []Sprocket{ - NewSprocket("Alfalfa"), - NewSprocket("Banana"), - } - }) - - It("should return the returned sprockets", func() { - Ω(client.Sprockets()).Should(Equal(sprockets)) - }) - }) - - Context("when the response is missing", func() { - BeforeEach(func() { - statusCode = http.StatusNotFound - }) - - It("should return an empty list of sprockets", func() { - Ω(client.Sprockets()).Should(BeEmpty()) - }) - }) - - Context("when the response fails to authenticate", func() { - BeforeEach(func() { - statusCode = http.StatusUnauthorized - }) - - It("should return an AuthenticationError error", func() { - sprockets, err := client.Sprockets() - Ω(sprockets).Should(BeEmpty()) - Ω(err).Should(MatchError(AuthenticationError)) - }) - }) - - Context("when the response is a server failure", func() { - BeforeEach(func() { - statusCode = http.StatusInternalServerError - }) - - It("should return an InternalError error", func() { - sprockets, err := client.Sprockets() - Ω(sprockets).Should(BeEmpty()) - Ω(err).Should(MatchError(InternalError)) - }) - }) - }) - - Context("when requesting some sprockets", func() { - BeforeEach(func() { - sprockets = []Sprocket{ - NewSprocket("Alfalfa"), - NewSprocket("Banana"), - } - - server.WrapHandler(0, ghttp.VerifyRequest("GET", "/sprockets", "filter=FOOD")) - }) - - It("should make the request with a filter", func() { - Ω(client.Sprockets("food")).Should(Equal(sprockets)) - }) - }) - }) - }) -*/ -package ghttp - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "reflect" - "regexp" - "sync" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -func new() *Server { - return &Server{ - AllowUnhandledRequests: false, - UnhandledRequestStatusCode: http.StatusInternalServerError, - writeLock: &sync.Mutex{}, - } -} - -type routedHandler struct { - method string - pathRegexp *regexp.Regexp - path string - handler http.HandlerFunc -} - -// NewServer returns a new `*ghttp.Server` that wraps an `httptest` server. The server is started automatically. -func NewServer() *Server { - s := new() - s.HTTPTestServer = httptest.NewServer(s) - return s -} - -// NewTLSServer returns a new `*ghttp.Server` that wraps an `httptest` TLS server. The server is started automatically. -func NewTLSServer() *Server { - s := new() - s.HTTPTestServer = httptest.NewTLSServer(s) - return s -} - -type Server struct { - //The underlying httptest server - HTTPTestServer *httptest.Server - - //Defaults to false. If set to true, the Server will allow more requests than there are registered handlers. - AllowUnhandledRequests bool - - //The status code returned when receiving an unhandled request. - //Defaults to http.StatusInternalServerError. - //Only applies if AllowUnhandledRequests is true - UnhandledRequestStatusCode int - - receivedRequests []*http.Request - requestHandlers []http.HandlerFunc - routedHandlers []routedHandler - - writeLock *sync.Mutex - calls int -} - -//URL() returns a url that will hit the server -func (s *Server) URL() string { - return s.HTTPTestServer.URL -} - -//Close() should be called at the end of each test. It spins down and cleans up the test server. -func (s *Server) Close() { - server := s.HTTPTestServer - s.HTTPTestServer = nil - server.Close() -} - -//ServeHTTP() makes Server an http.Handler -//When the server receives a request it handles the request in the following order: -// -//1. If the request matches a handler registered with RouteToHandler, that handler is called. -//2. Otherwise, if there are handlers registered via AppendHandlers, those handlers are called in order. -//3. If all registered handlers have been called then: -// a) If AllowUnhandledRequests is true, the request will be handled with response code of UnhandledRequestStatusCode -// b) If AllowUnhandledRequests is false, the request will not be handled and the current test will be marked as failed. -func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { - s.writeLock.Lock() - defer s.writeLock.Unlock() - defer func() { - recover() - }() - - if routedHandler, ok := s.handlerForRoute(req.Method, req.URL.Path); ok { - routedHandler(w, req) - } else if s.calls < len(s.requestHandlers) { - s.requestHandlers[s.calls](w, req) - s.calls++ - } else { - if s.AllowUnhandledRequests { - ioutil.ReadAll(req.Body) - req.Body.Close() - w.WriteHeader(s.UnhandledRequestStatusCode) - } else { - Ω(req).Should(BeNil(), "Received Unhandled Request") - } - } - s.receivedRequests = append(s.receivedRequests, req) -} - -//ReceivedRequests is an array containing all requests received by the server (both handled and unhandled requests) -func (s *Server) ReceivedRequests() []*http.Request { - s.writeLock.Lock() - defer s.writeLock.Unlock() - - return s.receivedRequests -} - -//RouteToHandler can be used to register handlers that will always handle requests that match -//the passed in method and path. -// -//The path may be either a string object or a *regexp.Regexp. -func (s *Server) RouteToHandler(method string, path interface{}, handler http.HandlerFunc) { - s.writeLock.Lock() - defer s.writeLock.Unlock() - - rh := routedHandler{ - method: method, - handler: handler, - } - - switch p := path.(type) { - case *regexp.Regexp: - rh.pathRegexp = p - case string: - rh.path = p - default: - panic("path must be a string or a regular expression") - } - - for i, existingRH := range s.routedHandlers { - if existingRH.method == method && - reflect.DeepEqual(existingRH.pathRegexp, rh.pathRegexp) && - existingRH.path == rh.path { - s.routedHandlers[i] = rh - return - } - } - s.routedHandlers = append(s.routedHandlers, rh) -} - -func (s *Server) handlerForRoute(method string, path string) (http.HandlerFunc, bool) { - for _, rh := range s.routedHandlers { - if rh.method == method { - if rh.pathRegexp != nil { - if rh.pathRegexp.Match([]byte(path)) { - return rh.handler, true - } - } else if rh.path == path { - return rh.handler, true - } - } - } - - return nil, false -} - -//AppendHandlers will appends http.HandlerFuncs to the server's list of registered handlers. The first incoming request is handled by the first handler, the second by the second, etc... -func (s *Server) AppendHandlers(handlers ...http.HandlerFunc) { - s.writeLock.Lock() - defer s.writeLock.Unlock() - - s.requestHandlers = append(s.requestHandlers, handlers...) -} - -//SetHandler overrides the registered handler at the passed in index with the passed in handler -//This is useful, for example, when a server has been set up in a shared context, but must be tweaked -//for a particular test. -func (s *Server) SetHandler(index int, handler http.HandlerFunc) { - s.writeLock.Lock() - defer s.writeLock.Unlock() - - s.requestHandlers[index] = handler -} - -//GetHandler returns the handler registered at the passed in index. -func (s *Server) GetHandler(index int) http.HandlerFunc { - s.writeLock.Lock() - defer s.writeLock.Unlock() - - return s.requestHandlers[index] -} - -//WrapHandler combines the passed in handler with the handler registered at the passed in index. -//This is useful, for example, when a server has been set up in a shared context but must be tweaked -//for a particular test. -// -//If the currently registered handler is A, and the new passed in handler is B then -//WrapHandler will generate a new handler that first calls A, then calls B, and assign it to index -func (s *Server) WrapHandler(index int, handler http.HandlerFunc) { - existingHandler := s.GetHandler(index) - s.SetHandler(index, CombineHandlers(existingHandler, handler)) -} diff --git a/kit/github.com/onsi/gomega/ghttp/test_server_suite_test.go b/kit/github.com/onsi/gomega/ghttp/test_server_suite_test.go deleted file mode 100644 index bf7e198..0000000 --- a/kit/github.com/onsi/gomega/ghttp/test_server_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package ghttp_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestGHTTP(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "GHTTP Suite") -} diff --git a/kit/github.com/onsi/gomega/ghttp/test_server_test.go b/kit/github.com/onsi/gomega/ghttp/test_server_test.go deleted file mode 100644 index 9d6f8fd..0000000 --- a/kit/github.com/onsi/gomega/ghttp/test_server_test.go +++ /dev/null @@ -1,555 +0,0 @@ -package ghttp_test - -import ( - "bytes" - "io/ioutil" - "net/http" - "regexp" - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/ghttp" -) - -var _ = Describe("TestServer", func() { - var ( - resp *http.Response - err error - s *Server - ) - - BeforeEach(func() { - s = NewServer() - }) - - AfterEach(func() { - s.Close() - }) - - Describe("allowing unhandled requests", func() { - Context("when true", func() { - BeforeEach(func() { - s.AllowUnhandledRequests = true - s.UnhandledRequestStatusCode = http.StatusForbidden - resp, err = http.Get(s.URL() + "/foo") - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("should allow unhandled requests and respond with the passed in status code", func() { - Ω(err).ShouldNot(HaveOccurred()) - Ω(resp.StatusCode).Should(Equal(http.StatusForbidden)) - - data, err := ioutil.ReadAll(resp.Body) - Ω(err).ShouldNot(HaveOccurred()) - Ω(data).Should(BeEmpty()) - }) - - It("should record the requests", func() { - Ω(s.ReceivedRequests()).Should(HaveLen(1)) - Ω(s.ReceivedRequests()[0].URL.Path).Should(Equal("/foo")) - }) - }) - - Context("when false", func() { - It("should fail when attempting a request", func() { - failures := InterceptGomegaFailures(func() { - http.Get(s.URL() + "/foo") - }) - - Ω(failures[0]).Should(ContainSubstring("Received Unhandled Request")) - }) - }) - }) - - Describe("Managing Handlers", func() { - var called []string - BeforeEach(func() { - called = []string{} - s.RouteToHandler("GET", "/routed", func(w http.ResponseWriter, req *http.Request) { - called = append(called, "r1") - }) - s.RouteToHandler("POST", regexp.MustCompile(`/routed\d`), func(w http.ResponseWriter, req *http.Request) { - called = append(called, "r2") - }) - s.AppendHandlers(func(w http.ResponseWriter, req *http.Request) { - called = append(called, "A") - }, func(w http.ResponseWriter, req *http.Request) { - called = append(called, "B") - }) - }) - - It("should prefer routed handlers if there is a match", func() { - http.Get(s.URL() + "/routed") - http.Post(s.URL()+"/routed7", "application/json", nil) - http.Get(s.URL() + "/foo") - http.Get(s.URL() + "/routed") - http.Post(s.URL()+"/routed9", "application/json", nil) - http.Get(s.URL() + "/bar") - - failures := InterceptGomegaFailures(func() { - http.Get(s.URL() + "/foo") - http.Get(s.URL() + "/routed/not/a/match") - http.Get(s.URL() + "/routed7") - http.Post(s.URL()+"/routed", "application/json", nil) - }) - - Ω(failures[0]).Should(ContainSubstring("Received Unhandled Request")) - Ω(failures).Should(HaveLen(4)) - - http.Post(s.URL()+"/routed3", "application/json", nil) - - Ω(called).Should(Equal([]string{"r1", "r2", "A", "r1", "r2", "B", "r2"})) - }) - - It("should override routed handlers when reregistered", func() { - s.RouteToHandler("GET", "/routed", func(w http.ResponseWriter, req *http.Request) { - called = append(called, "r3") - }) - s.RouteToHandler("POST", regexp.MustCompile(`/routed\d`), func(w http.ResponseWriter, req *http.Request) { - called = append(called, "r4") - }) - - http.Get(s.URL() + "/routed") - http.Post(s.URL()+"/routed7", "application/json", nil) - - Ω(called).Should(Equal([]string{"r3", "r4"})) - }) - - It("should call the appended handlers, in order, as requests come in", func() { - http.Get(s.URL() + "/foo") - Ω(called).Should(Equal([]string{"A"})) - - http.Get(s.URL() + "/foo") - Ω(called).Should(Equal([]string{"A", "B"})) - - failures := InterceptGomegaFailures(func() { - http.Get(s.URL() + "/foo") - }) - - Ω(failures[0]).Should(ContainSubstring("Received Unhandled Request")) - }) - - Describe("Overwriting an existing handler", func() { - BeforeEach(func() { - s.SetHandler(0, func(w http.ResponseWriter, req *http.Request) { - called = append(called, "C") - }) - }) - - It("should override the specified handler", func() { - http.Get(s.URL() + "/foo") - http.Get(s.URL() + "/foo") - Ω(called).Should(Equal([]string{"C", "B"})) - }) - }) - - Describe("Getting an existing handler", func() { - It("should return the handler func", func() { - s.GetHandler(1)(nil, nil) - Ω(called).Should(Equal([]string{"B"})) - }) - }) - - Describe("Wrapping an existing handler", func() { - BeforeEach(func() { - s.WrapHandler(0, func(w http.ResponseWriter, req *http.Request) { - called = append(called, "C") - }) - }) - - It("should wrap the existing handler in a new handler", func() { - http.Get(s.URL() + "/foo") - http.Get(s.URL() + "/foo") - Ω(called).Should(Equal([]string{"A", "C", "B"})) - }) - }) - }) - - Describe("Request Handlers", func() { - Describe("VerifyRequest", func() { - BeforeEach(func() { - s.AppendHandlers(VerifyRequest("GET", "/foo")) - }) - - It("should verify the method, path", func() { - resp, err = http.Get(s.URL() + "/foo?baz=bar") - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("should verify the method, path", func() { - failures := InterceptGomegaFailures(func() { - http.Get(s.URL() + "/foo2") - }) - Ω(failures).Should(HaveLen(1)) - }) - - It("should verify the method, path", func() { - failures := InterceptGomegaFailures(func() { - http.Post(s.URL()+"/foo", "application/json", nil) - }) - Ω(failures).Should(HaveLen(1)) - }) - - Context("when passed a rawQuery", func() { - It("should also be possible to verify the rawQuery", func() { - s.SetHandler(0, VerifyRequest("GET", "/foo", "baz=bar")) - resp, err = http.Get(s.URL() + "/foo?baz=bar") - Ω(err).ShouldNot(HaveOccurred()) - }) - }) - - Context("when passed a matcher for path", func() { - It("should apply the matcher", func() { - s.SetHandler(0, VerifyRequest("GET", MatchRegexp(`/foo/[a-f]*/3`))) - resp, err = http.Get(s.URL() + "/foo/abcdefa/3") - Ω(err).ShouldNot(HaveOccurred()) - }) - }) - }) - - Describe("VerifyContentType", func() { - BeforeEach(func() { - s.AppendHandlers(CombineHandlers( - VerifyRequest("GET", "/foo"), - VerifyContentType("application/octet-stream"), - )) - }) - - It("should verify the content type", func() { - req, err := http.NewRequest("GET", s.URL()+"/foo", nil) - Ω(err).ShouldNot(HaveOccurred()) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err = http.DefaultClient.Do(req) - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("should verify the content type", func() { - req, err := http.NewRequest("GET", s.URL()+"/foo", nil) - Ω(err).ShouldNot(HaveOccurred()) - req.Header.Set("Content-Type", "application/json") - - failures := InterceptGomegaFailures(func() { - http.DefaultClient.Do(req) - }) - Ω(failures).Should(HaveLen(1)) - }) - }) - - Describe("Verify BasicAuth", func() { - BeforeEach(func() { - s.AppendHandlers(CombineHandlers( - VerifyRequest("GET", "/foo"), - VerifyBasicAuth("bob", "password"), - )) - }) - - It("should verify basic auth", func() { - req, err := http.NewRequest("GET", s.URL()+"/foo", nil) - Ω(err).ShouldNot(HaveOccurred()) - req.SetBasicAuth("bob", "password") - - resp, err = http.DefaultClient.Do(req) - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("should verify basic auth", func() { - req, err := http.NewRequest("GET", s.URL()+"/foo", nil) - Ω(err).ShouldNot(HaveOccurred()) - req.SetBasicAuth("bob", "bassword") - - failures := InterceptGomegaFailures(func() { - http.DefaultClient.Do(req) - }) - Ω(failures).Should(HaveLen(1)) - }) - - }) - - Describe("VerifyHeader", func() { - BeforeEach(func() { - s.AppendHandlers(CombineHandlers( - VerifyRequest("GET", "/foo"), - VerifyHeader(http.Header{ - "accept": []string{"jpeg", "png"}, - "cache-control": []string{"omicron"}, - "Return-Path": []string{"hobbiton"}, - }), - )) - }) - - It("should verify the headers", func() { - req, err := http.NewRequest("GET", s.URL()+"/foo", nil) - Ω(err).ShouldNot(HaveOccurred()) - req.Header.Add("Accept", "jpeg") - req.Header.Add("Accept", "png") - req.Header.Add("Cache-Control", "omicron") - req.Header.Add("return-path", "hobbiton") - - resp, err = http.DefaultClient.Do(req) - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("should verify the headers", func() { - req, err := http.NewRequest("GET", s.URL()+"/foo", nil) - Ω(err).ShouldNot(HaveOccurred()) - req.Header.Add("Schmaccept", "jpeg") - req.Header.Add("Schmaccept", "png") - req.Header.Add("Cache-Control", "omicron") - req.Header.Add("return-path", "hobbiton") - - failures := InterceptGomegaFailures(func() { - http.DefaultClient.Do(req) - }) - Ω(failures).Should(HaveLen(1)) - }) - }) - - Describe("VerifyHeaderKV", func() { - BeforeEach(func() { - s.AppendHandlers(CombineHandlers( - VerifyRequest("GET", "/foo"), - VerifyHeaderKV("accept", "jpeg", "png"), - VerifyHeaderKV("cache-control", "omicron"), - VerifyHeaderKV("Return-Path", "hobbiton"), - )) - }) - - It("should verify the headers", func() { - req, err := http.NewRequest("GET", s.URL()+"/foo", nil) - Ω(err).ShouldNot(HaveOccurred()) - req.Header.Add("Accept", "jpeg") - req.Header.Add("Accept", "png") - req.Header.Add("Cache-Control", "omicron") - req.Header.Add("return-path", "hobbiton") - - resp, err = http.DefaultClient.Do(req) - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("should verify the headers", func() { - req, err := http.NewRequest("GET", s.URL()+"/foo", nil) - Ω(err).ShouldNot(HaveOccurred()) - req.Header.Add("Accept", "jpeg") - req.Header.Add("Cache-Control", "omicron") - req.Header.Add("return-path", "hobbiton") - - failures := InterceptGomegaFailures(func() { - http.DefaultClient.Do(req) - }) - Ω(failures).Should(HaveLen(1)) - }) - }) - - Describe("VerifyJSON", func() { - BeforeEach(func() { - s.AppendHandlers(CombineHandlers( - VerifyRequest("POST", "/foo"), - VerifyJSON(`{"a":3, "b":2}`), - )) - }) - - It("should verify the json body and the content type", func() { - resp, err = http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`{"b":2, "a":3}`))) - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("should verify the json body and the content type", func() { - failures := InterceptGomegaFailures(func() { - http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`{"b":2, "a":4}`))) - }) - Ω(failures).Should(HaveLen(1)) - }) - - It("should verify the json body and the content type", func() { - failures := InterceptGomegaFailures(func() { - http.Post(s.URL()+"/foo", "application/not-json", bytes.NewReader([]byte(`{"b":2, "a":3}`))) - }) - Ω(failures).Should(HaveLen(1)) - }) - }) - - Describe("VerifyJSONRepresenting", func() { - BeforeEach(func() { - s.AppendHandlers(CombineHandlers( - VerifyRequest("POST", "/foo"), - VerifyJSONRepresenting([]int{1, 3, 5}), - )) - }) - - It("should verify the json body and the content type", func() { - resp, err = http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`[1,3,5]`))) - Ω(err).ShouldNot(HaveOccurred()) - }) - - It("should verify the json body and the content type", func() { - failures := InterceptGomegaFailures(func() { - http.Post(s.URL()+"/foo", "application/json", bytes.NewReader([]byte(`[1,3]`))) - }) - Ω(failures).Should(HaveLen(1)) - }) - }) - - Describe("RespondWith", func() { - Context("without headers", func() { - BeforeEach(func() { - s.AppendHandlers(CombineHandlers( - VerifyRequest("POST", "/foo"), - RespondWith(http.StatusCreated, "sweet"), - ), CombineHandlers( - VerifyRequest("POST", "/foo"), - RespondWith(http.StatusOK, []byte("sour")), - )) - }) - - It("should return the response", func() { - resp, err = http.Post(s.URL()+"/foo", "application/json", nil) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) - - body, err := ioutil.ReadAll(resp.Body) - Ω(err).ShouldNot(HaveOccurred()) - Ω(body).Should(Equal([]byte("sweet"))) - - resp, err = http.Post(s.URL()+"/foo", "application/json", nil) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(resp.StatusCode).Should(Equal(http.StatusOK)) - - body, err = ioutil.ReadAll(resp.Body) - Ω(err).ShouldNot(HaveOccurred()) - Ω(body).Should(Equal([]byte("sour"))) - }) - }) - - Context("with headers", func() { - BeforeEach(func() { - s.AppendHandlers(CombineHandlers( - VerifyRequest("POST", "/foo"), - RespondWith(http.StatusCreated, "sweet", http.Header{"X-Custom-Header": []string{"my header"}}), - )) - }) - - It("should return the headers too", func() { - resp, err = http.Post(s.URL()+"/foo", "application/json", nil) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) - Ω(ioutil.ReadAll(resp.Body)).Should(Equal([]byte("sweet"))) - Ω(resp.Header.Get("X-Custom-Header")).Should(Equal("my header")) - }) - }) - }) - - Describe("RespondWithPtr", func() { - var code int - var byteBody []byte - var stringBody string - BeforeEach(func() { - code = http.StatusOK - byteBody = []byte("sweet") - stringBody = "sour" - - s.AppendHandlers(CombineHandlers( - VerifyRequest("POST", "/foo"), - RespondWithPtr(&code, &byteBody), - ), CombineHandlers( - VerifyRequest("POST", "/foo"), - RespondWithPtr(&code, &stringBody), - )) - }) - - It("should return the response", func() { - code = http.StatusCreated - byteBody = []byte("tasty") - stringBody = "treat" - - resp, err = http.Post(s.URL()+"/foo", "application/json", nil) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) - - body, err := ioutil.ReadAll(resp.Body) - Ω(err).ShouldNot(HaveOccurred()) - Ω(body).Should(Equal([]byte("tasty"))) - - resp, err = http.Post(s.URL()+"/foo", "application/json", nil) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) - - body, err = ioutil.ReadAll(resp.Body) - Ω(err).ShouldNot(HaveOccurred()) - Ω(body).Should(Equal([]byte("treat"))) - }) - - Context("when passed a nil body", func() { - BeforeEach(func() { - s.SetHandler(0, CombineHandlers( - VerifyRequest("POST", "/foo"), - RespondWithPtr(&code, nil), - )) - }) - - It("should return an empty body and not explode", func() { - resp, err = http.Post(s.URL()+"/foo", "application/json", nil) - - Ω(err).ShouldNot(HaveOccurred()) - Ω(resp.StatusCode).Should(Equal(http.StatusOK)) - body, err := ioutil.ReadAll(resp.Body) - Ω(err).ShouldNot(HaveOccurred()) - Ω(body).Should(BeEmpty()) - - Ω(s.ReceivedRequests()).Should(HaveLen(1)) - }) - }) - }) - - Describe("RespondWithJSON", func() { - BeforeEach(func() { - s.AppendHandlers(CombineHandlers( - VerifyRequest("POST", "/foo"), - RespondWithJSONEncoded(http.StatusCreated, []int{1, 2, 3}), - )) - }) - - It("should return the response", func() { - resp, err = http.Post(s.URL()+"/foo", "application/json", nil) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) - - body, err := ioutil.ReadAll(resp.Body) - Ω(err).ShouldNot(HaveOccurred()) - Ω(body).Should(MatchJSON("[1,2,3]")) - }) - }) - - Describe("RespondWithJSONPtr", func() { - var code int - var object interface{} - BeforeEach(func() { - code = http.StatusOK - object = []int{1, 2, 3} - - s.AppendHandlers(CombineHandlers( - VerifyRequest("POST", "/foo"), - RespondWithJSONEncodedPtr(&code, &object), - )) - }) - - It("should return the response", func() { - code = http.StatusCreated - object = []int{4, 5, 6} - resp, err = http.Post(s.URL()+"/foo", "application/json", nil) - Ω(err).ShouldNot(HaveOccurred()) - - Ω(resp.StatusCode).Should(Equal(http.StatusCreated)) - - body, err := ioutil.ReadAll(resp.Body) - Ω(err).ShouldNot(HaveOccurred()) - Ω(body).Should(MatchJSON("[4,5,6]")) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/gomega_dsl.go b/kit/github.com/onsi/gomega/gomega_dsl.go deleted file mode 100644 index 36fafa5..0000000 --- a/kit/github.com/onsi/gomega/gomega_dsl.go +++ /dev/null @@ -1,321 +0,0 @@ -/* -Gomega is the Ginkgo BDD-style testing framework's preferred matcher library. - -The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/ - -Gomega on Github: http://github.com/gocircuit/escher/kit/github.com/onsi/gomega - -Learn more about Ginkgo online: http://onsi.github.io/ginkgo - -Ginkgo on Github: http://github.com/onsi/ginkgo - -Gomega is MIT-Licensed -*/ -package gomega - -import ( - "fmt" - "reflect" - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/internal/assertion" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/internal/asyncassertion" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/internal/testingtsupport" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/types" -) - -const GOMEGA_VERSION = "1.0" - -var globalFailHandler types.GomegaFailHandler - -var defaultEventuallyTimeout = time.Second -var defaultEventuallyPollingInterval = 10 * time.Millisecond -var defaultConsistentlyDuration = 100 * time.Millisecond -var defaultConsistentlyPollingInterval = 10 * time.Millisecond - -//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails -//the fail handler passed into RegisterFailHandler is called. -func RegisterFailHandler(handler types.GomegaFailHandler) { - globalFailHandler = handler -} - -//RegisterTestingT connects Gomega to Golang's XUnit style -//Testing.T tests. You'll need to call this at the top of each XUnit style test: -// -// func TestFarmHasCow(t *testing.T) { -// RegisterTestingT(t) -// -// f := farm.New([]string{"Cow", "Horse"}) -// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") -// } -// -// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to -// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests -// in parallel as the global fail handler cannot point to more than one testing.T at a time. -// -// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). -func RegisterTestingT(t types.GomegaTestingT) { - RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t)) -} - -//InterceptGomegaHandlers runs a given callback and returns an array of -//failure messages generated by any Gomega assertions within the callback. -// -//This is accomplished by temporarily replacing the *global* fail handler -//with a fail handler that simply annotates failures. The original fail handler -//is reset when InterceptGomegaFailures returns. -// -//This is most useful when testing custom matchers, but can also be used to check -//on a value using a Gomega assertion without causing a test failure. -func InterceptGomegaFailures(f func()) []string { - originalHandler := globalFailHandler - failures := []string{} - RegisterFailHandler(func(message string, callerSkip ...int) { - failures = append(failures, message) - }) - f() - RegisterFailHandler(originalHandler) - return failures -} - -//Ω wraps an actual value allowing assertions to be made on it: -// Ω("foo").Should(Equal("foo")) -// -//If Ω is passed more than one argument it will pass the *first* argument to the matcher. -//All subsequent arguments will be required to be nil/zero. -// -//This is convenient if you want to make an assertion on a method/function that returns -//a value and an error - a common patter in Go. -// -//For example, given a function with signature: -// func MyAmazingThing() (int, error) -// -//Then: -// Ω(MyAmazingThing()).Should(Equal(3)) -//Will succeed only if `MyAmazingThing()` returns `(3, nil)` -// -//Ω and Expect are identical -func Ω(actual interface{}, extra ...interface{}) GomegaAssertion { - return ExpectWithOffset(0, actual, extra...) -} - -//Expect wraps an actual value allowing assertions to be made on it: -// Expect("foo").To(Equal("foo")) -// -//If Expect is passed more than one argument it will pass the *first* argument to the matcher. -//All subsequent arguments will be required to be nil/zero. -// -//This is convenient if you want to make an assertion on a method/function that returns -//a value and an error - a common patter in Go. -// -//For example, given a function with signature: -// func MyAmazingThing() (int, error) -// -//Then: -// Expect(MyAmazingThing()).Should(Equal(3)) -//Will succeed only if `MyAmazingThing()` returns `(3, nil)` -// -//Expect and Ω are identical -func Expect(actual interface{}, extra ...interface{}) GomegaAssertion { - return ExpectWithOffset(0, actual, extra...) -} - -//ExpectWithOffset wraps an actual value allowing assertions to be made on it: -// ExpectWithOffset(1, "foo").To(Equal("foo")) -// -//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument -//this is used to modify the call-stack offset when computing line numbers. -// -//This is most useful in helper functions that make assertions. If you want Gomega's -//error message to refer to the calling line in the test (as opposed to the line in the helper function) -//set the first argument of `ExpectWithOffset` appropriately. -func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion { - return assertion.New(actual, globalFailHandler, offset, extra...) -} - -//Eventually wraps an actual value allowing assertions to be made on it. -//The assertion is tried periodically until it passes or a timeout occurs. -// -//Both the timeout and polling interval are configurable as optional arguments: -//The first optional argument is the timeout -//The second optional argument is the polling interval -// -//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the -//last case they are interpreted as seconds. -// -//If Eventually is passed an actual that is a function taking no arguments and returning at least one value, -//then Eventually will call the function periodically and try the matcher against the function's first return value. -// -//Example: -// -// Eventually(func() int { -// return thingImPolling.Count() -// }).Should(BeNumerically(">=", 17)) -// -//Note that this example could be rewritten: -// -// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) -// -//If the function returns more than one value, then Eventually will pass the first value to the matcher and -//assert that all other values are nil/zero. -//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. -// -//For example, consider a method that returns a value and an error: -// func FetchFromDB() (string, error) -// -//Then -// Eventually(FetchFromDB).Should(Equal("hasselhoff")) -// -//Will pass only if the the returned error is nil and the returned string passes the matcher. -// -//Eventually's default timeout is 1 second, and its default polling interval is 10ms -func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { - return EventuallyWithOffset(0, actual, intervals...) -} - -//EventuallyWithOffset operates like Eventually but takes an additional -//initial argument to indicate an offset in the call stack. This is useful when building helper -//functions that contain matchers. To learn more, read about `ExpectWithOffset`. -func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { - timeoutInterval := defaultEventuallyTimeout - pollingInterval := defaultEventuallyPollingInterval - if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) - } - if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) - } - return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) -} - -//Consistently wraps an actual value allowing assertions to be made on it. -//The assertion is tried periodically and is required to pass for a period of time. -// -//Both the total time and polling interval are configurable as optional arguments: -//The first optional argument is the duration that Consistently will run for -//The second optional argument is the polling interval -// -//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the -//last case they are interpreted as seconds. -// -//If Consistently is passed an actual that is a function taking no arguments and returning at least one value, -//then Consistently will call the function periodically and try the matcher against the function's first return value. -// -//If the function returns more than one value, then Consistently will pass the first value to the matcher and -//assert that all other values are nil/zero. -//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. -// -//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. -//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: -// -// Consistently(channel).ShouldNot(Receive()) -// -//Consistently's default duration is 100ms, and its default polling interval is 10ms -func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { - return ConsistentlyWithOffset(0, actual, intervals...) -} - -//ConsistentlyWithOffset operates like Consistnetly but takes an additional -//initial argument to indicate an offset in the call stack. This is useful when building helper -//functions that contain matchers. To learn more, read about `ExpectWithOffset`. -func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { - timeoutInterval := defaultConsistentlyDuration - pollingInterval := defaultConsistentlyPollingInterval - if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) - } - if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) - } - return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) -} - -//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. -func SetDefaultEventuallyTimeout(t time.Duration) { - defaultEventuallyTimeout = t -} - -//Set the default polling interval for Eventually. -func SetDefaultEventuallyPollingInterval(t time.Duration) { - defaultEventuallyPollingInterval = t -} - -//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long. -func SetDefaultConsistentlyDuration(t time.Duration) { - defaultConsistentlyDuration = t -} - -//Set the default polling interval for Consistently. -func SetDefaultConsistentlyPollingInterval(t time.Duration) { - defaultConsistentlyPollingInterval = t -} - -//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against -//the matcher passed to the Should and ShouldNot methods. -// -//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to -//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more -//descriptive -// -//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. -// -//Example: -// -// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") -// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") -type GomegaAsyncAssertion interface { - Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool -} - -//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher -//passed to the Should/ShouldNot and To/ToNot/NotTo methods. -// -//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect -//though this is not enforced. -// -//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() -//and is used to annotate failure messages. -// -//All methods return a bool that is true if hte assertion passed and false if it failed. -// -//Example: -// -// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) -type GomegaAssertion interface { - Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool - - To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool - ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool - NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool -} - -//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it -type OmegaMatcher types.GomegaMatcher - -func toDuration(input interface{}) time.Duration { - duration, ok := input.(time.Duration) - if ok { - return duration - } - - value := reflect.ValueOf(input) - kind := reflect.TypeOf(input).Kind() - - if reflect.Int <= kind && kind <= reflect.Int64 { - return time.Duration(value.Int()) * time.Second - } else if reflect.Uint <= kind && kind <= reflect.Uint64 { - return time.Duration(value.Uint()) * time.Second - } else if reflect.Float32 <= kind && kind <= reflect.Float64 { - return time.Duration(value.Float() * float64(time.Second)) - } else if reflect.String == kind { - duration, err := time.ParseDuration(value.String()) - if err != nil { - panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) - } - return duration - } - - panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) -} diff --git a/kit/github.com/onsi/gomega/internal/assertion/assertion.go b/kit/github.com/onsi/gomega/internal/assertion/assertion.go deleted file mode 100644 index 626ec6b..0000000 --- a/kit/github.com/onsi/gomega/internal/assertion/assertion.go +++ /dev/null @@ -1,98 +0,0 @@ -package assertion - -import ( - "fmt" - "reflect" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/types" -) - -type Assertion struct { - actualInput interface{} - fail types.GomegaFailHandler - offset int - extra []interface{} -} - -func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion { - return &Assertion{ - actualInput: actualInput, - fail: fail, - offset: offset, - extra: extra, - } -} - -func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) -} - -func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) -} - -func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) -} - -func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) -} - -func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) -} - -func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { - switch len(optionalDescription) { - case 0: - return "" - default: - return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" - } -} - -func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { - matches, err := matcher.Match(assertion.actualInput) - description := assertion.buildDescription(optionalDescription...) - if err != nil { - assertion.fail(description+err.Error(), 2+assertion.offset) - return false - } - if matches != desiredMatch { - var message string - if desiredMatch { - message = matcher.FailureMessage(assertion.actualInput) - } else { - message = matcher.NegatedFailureMessage(assertion.actualInput) - } - assertion.fail(description+message, 2+assertion.offset) - return false - } - - return true -} - -func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool { - success, message := vetExtras(assertion.extra) - if success { - return true - } - - description := assertion.buildDescription(optionalDescription...) - assertion.fail(description+message, 2+assertion.offset) - return false -} - -func vetExtras(extras []interface{}) (bool, string) { - for i, extra := range extras { - if extra != nil { - zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() - if !reflect.DeepEqual(zeroValue, extra) { - message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) - return false, message - } - } - } - return true, "" -} diff --git a/kit/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go b/kit/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go deleted file mode 100644 index 958e414..0000000 --- a/kit/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package assertion_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestAssertion(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Assertion Suite") -} diff --git a/kit/github.com/onsi/gomega/internal/assertion/assertion_test.go b/kit/github.com/onsi/gomega/internal/assertion/assertion_test.go deleted file mode 100644 index 73c18d4..0000000 --- a/kit/github.com/onsi/gomega/internal/assertion/assertion_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package assertion_test - -import ( - "errors" - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/internal/assertion" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/internal/fakematcher" -) - -var _ = Describe("Assertion", func() { - var ( - a *Assertion - failureMessage string - failureCallerSkip int - matcher *fakematcher.FakeMatcher - ) - - input := "The thing I'm testing" - - var fakeFailHandler = func(message string, callerSkip ...int) { - failureMessage = message - if len(callerSkip) == 1 { - failureCallerSkip = callerSkip[0] - } - } - - BeforeEach(func() { - matcher = &fakematcher.FakeMatcher{} - failureMessage = "" - failureCallerSkip = 0 - a = New(input, fakeFailHandler, 1) - }) - - Context("when called", func() { - It("should pass the provided input value to the matcher", func() { - a.Should(matcher) - - Ω(matcher.ReceivedActual).Should(Equal(input)) - matcher.ReceivedActual = "" - - a.ShouldNot(matcher) - - Ω(matcher.ReceivedActual).Should(Equal(input)) - matcher.ReceivedActual = "" - - a.To(matcher) - - Ω(matcher.ReceivedActual).Should(Equal(input)) - matcher.ReceivedActual = "" - - a.ToNot(matcher) - - Ω(matcher.ReceivedActual).Should(Equal(input)) - matcher.ReceivedActual = "" - - a.NotTo(matcher) - - Ω(matcher.ReceivedActual).Should(Equal(input)) - }) - }) - - Context("when the matcher succeeds", func() { - BeforeEach(func() { - matcher.MatchesToReturn = true - matcher.ErrToReturn = nil - }) - - Context("and a positive assertion is being made", func() { - It("should not call the failure callback", func() { - a.Should(matcher) - Ω(failureMessage).Should(Equal("")) - }) - - It("should be true", func() { - Ω(a.Should(matcher)).Should(BeTrue()) - }) - }) - - Context("and a negative assertion is being made", func() { - It("should call the failure callback", func() { - a.ShouldNot(matcher) - Ω(failureMessage).Should(Equal("negative: The thing I'm testing")) - Ω(failureCallerSkip).Should(Equal(3)) - }) - - It("should be false", func() { - Ω(a.ShouldNot(matcher)).Should(BeFalse()) - }) - }) - }) - - Context("when the matcher fails", func() { - BeforeEach(func() { - matcher.MatchesToReturn = false - matcher.ErrToReturn = nil - }) - - Context("and a positive assertion is being made", func() { - It("should call the failure callback", func() { - a.Should(matcher) - Ω(failureMessage).Should(Equal("positive: The thing I'm testing")) - Ω(failureCallerSkip).Should(Equal(3)) - }) - - It("should be false", func() { - Ω(a.Should(matcher)).Should(BeFalse()) - }) - }) - - Context("and a negative assertion is being made", func() { - It("should not call the failure callback", func() { - a.ShouldNot(matcher) - Ω(failureMessage).Should(Equal("")) - }) - - It("should be true", func() { - Ω(a.ShouldNot(matcher)).Should(BeTrue()) - }) - }) - }) - - Context("When reporting a failure", func() { - BeforeEach(func() { - matcher.MatchesToReturn = false - matcher.ErrToReturn = nil - }) - - Context("and there is an optional description", func() { - It("should append the description to the failure message", func() { - a.Should(matcher, "A description") - Ω(failureMessage).Should(Equal("A description\npositive: The thing I'm testing")) - Ω(failureCallerSkip).Should(Equal(3)) - }) - }) - - Context("and there are multiple arguments to the optional description", func() { - It("should append the formatted description to the failure message", func() { - a.Should(matcher, "A description of [%d]", 3) - Ω(failureMessage).Should(Equal("A description of [3]\npositive: The thing I'm testing")) - Ω(failureCallerSkip).Should(Equal(3)) - }) - }) - }) - - Context("When the matcher returns an error", func() { - BeforeEach(func() { - matcher.ErrToReturn = errors.New("Kaboom!") - }) - - Context("and a positive assertion is being made", func() { - It("should call the failure callback", func() { - matcher.MatchesToReturn = true - a.Should(matcher) - Ω(failureMessage).Should(Equal("Kaboom!")) - Ω(failureCallerSkip).Should(Equal(3)) - }) - }) - - Context("and a negative assertion is being made", func() { - It("should call the failure callback", func() { - matcher.MatchesToReturn = false - a.ShouldNot(matcher) - Ω(failureMessage).Should(Equal("Kaboom!")) - Ω(failureCallerSkip).Should(Equal(3)) - }) - }) - - It("should always be false", func() { - Ω(a.Should(matcher)).Should(BeFalse()) - Ω(a.ShouldNot(matcher)).Should(BeFalse()) - }) - }) - - Context("when there are extra parameters", func() { - It("(a simple example)", func() { - Ω(func() (string, int, error) { - return "foo", 0, nil - }()).Should(Equal("foo")) - }) - - Context("when the parameters are all nil or zero", func() { - It("should invoke the matcher", func() { - matcher.MatchesToReturn = true - matcher.ErrToReturn = nil - - var typedNil []string - a = New(input, fakeFailHandler, 1, 0, nil, typedNil) - - result := a.Should(matcher) - Ω(result).Should(BeTrue()) - Ω(matcher.ReceivedActual).Should(Equal(input)) - - Ω(failureMessage).Should(BeZero()) - }) - }) - - Context("when any of the parameters are not nil or zero", func() { - It("should call the failure callback", func() { - matcher.MatchesToReturn = false - matcher.ErrToReturn = nil - - a = New(input, fakeFailHandler, 1, errors.New("foo")) - result := a.Should(matcher) - Ω(result).Should(BeFalse()) - Ω(matcher.ReceivedActual).Should(BeZero(), "The matcher doesn't even get called") - Ω(failureMessage).Should(ContainSubstring("foo")) - failureMessage = "" - - a = New(input, fakeFailHandler, 1, nil, 1) - result = a.ShouldNot(matcher) - Ω(result).Should(BeFalse()) - Ω(failureMessage).Should(ContainSubstring("1")) - failureMessage = "" - - a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) - result = a.To(matcher) - Ω(result).Should(BeFalse()) - Ω(failureMessage).Should(ContainSubstring("foo")) - failureMessage = "" - - a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) - result = a.ToNot(matcher) - Ω(result).Should(BeFalse()) - Ω(failureMessage).Should(ContainSubstring("foo")) - failureMessage = "" - - a = New(input, fakeFailHandler, 1, nil, 0, []string{"foo"}) - result = a.NotTo(matcher) - Ω(result).Should(BeFalse()) - Ω(failureMessage).Should(ContainSubstring("foo")) - Ω(failureCallerSkip).Should(Equal(3)) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/kit/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go deleted file mode 100644 index 7c89b96..0000000 --- a/kit/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go +++ /dev/null @@ -1,197 +0,0 @@ -package asyncassertion - -import ( - "errors" - "fmt" - "reflect" - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/types" -) - -type AsyncAssertionType uint - -const ( - AsyncAssertionTypeEventually AsyncAssertionType = iota - AsyncAssertionTypeConsistently -) - -type AsyncAssertion struct { - asyncType AsyncAssertionType - actualInput interface{} - timeoutInterval time.Duration - pollingInterval time.Duration - fail types.GomegaFailHandler - offset int -} - -func New(asyncType AsyncAssertionType, actualInput interface{}, fail types.GomegaFailHandler, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { - actualType := reflect.TypeOf(actualInput) - if actualType.Kind() == reflect.Func { - if actualType.NumIn() != 0 || actualType.NumOut() == 0 { - panic("Expected a function with no arguments and one or more return values.") - } - } - - return &AsyncAssertion{ - asyncType: asyncType, - actualInput: actualInput, - fail: fail, - timeoutInterval: timeoutInterval, - pollingInterval: pollingInterval, - offset: offset, - } -} - -func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - return assertion.match(matcher, true, optionalDescription...) -} - -func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - return assertion.match(matcher, false, optionalDescription...) -} - -func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { - switch len(optionalDescription) { - case 0: - return "" - default: - return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" - } -} - -func (assertion *AsyncAssertion) actualInputIsAFunction() bool { - actualType := reflect.TypeOf(assertion.actualInput) - return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0 -} - -func (assertion *AsyncAssertion) pollActual() (interface{}, error) { - if assertion.actualInputIsAFunction() { - values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{}) - - extras := []interface{}{} - for _, value := range values[1:] { - extras = append(extras, value.Interface()) - } - - success, message := vetExtras(extras) - - if !success { - return nil, errors.New(message) - } - - return values[0].Interface(), nil - } - - return assertion.actualInput, nil -} - -type oracleMatcher interface { - MatchMayChangeInTheFuture(actual interface{}) bool -} - -func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { - if assertion.actualInputIsAFunction() { - return true - } - - oracleMatcher, ok := matcher.(oracleMatcher) - if !ok { - return true - } - - return oracleMatcher.MatchMayChangeInTheFuture(value) -} - -func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { - timer := time.Now() - timeout := time.After(assertion.timeoutInterval) - - description := assertion.buildDescription(optionalDescription...) - - var matches bool - var err error - mayChange := true - value, err := assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) - } - - fail := func(preamble string) { - errMsg := "" - message := "" - if err != nil { - errMsg = "Error: " + err.Error() - } else { - if desiredMatch { - message = matcher.FailureMessage(value) - } else { - message = matcher.NegatedFailureMessage(value) - } - } - assertion.fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) - } - - if assertion.asyncType == AsyncAssertionTypeEventually { - for { - if err == nil && matches == desiredMatch { - return true - } - - if !mayChange { - fail("No future change is possible. Bailing out early") - return false - } - - select { - case <-time.After(assertion.pollingInterval): - value, err = assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) - } - case <-timeout: - fail("Timed out") - return false - } - } - } else if assertion.asyncType == AsyncAssertionTypeConsistently { - for { - if !(err == nil && matches == desiredMatch) { - fail("Failed") - return false - } - - if !mayChange { - return true - } - - select { - case <-time.After(assertion.pollingInterval): - value, err = assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) - } - case <-timeout: - return true - } - } - } - - return false -} - -func vetExtras(extras []interface{}) (bool, string) { - for i, extra := range extras { - if extra != nil { - zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() - if !reflect.DeepEqual(zeroValue, extra) { - message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) - return false, message - } - } - } - return true, "" -} diff --git a/kit/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go b/kit/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go deleted file mode 100644 index f662c98..0000000 --- a/kit/github.com/onsi/gomega/internal/asyncassertion/async_assertion_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package asyncassertion_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestAsyncAssertion(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "AsyncAssertion Suite") -} diff --git a/kit/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go b/kit/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go deleted file mode 100644 index 3fd6fea..0000000 --- a/kit/github.com/onsi/gomega/internal/asyncassertion/async_assertion_test.go +++ /dev/null @@ -1,315 +0,0 @@ -package asyncassertion_test - -import ( - "errors" - "time" - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/internal/asyncassertion" -) - -var _ = Describe("Async Assertion", func() { - var ( - failureMessage string - callerSkip int - ) - - var fakeFailHandler = func(message string, skip ...int) { - failureMessage = message - callerSkip = skip[0] - } - - BeforeEach(func() { - failureMessage = "" - callerSkip = 0 - }) - - Describe("Eventually", func() { - Context("the positive case", func() { - It("should poll the function and matcher", func() { - arr := []int{} - a := New(AsyncAssertionTypeEventually, func() []int { - arr = append(arr, 1) - return arr - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.Should(HaveLen(10)) - - Ω(arr).Should(HaveLen(10)) - Ω(failureMessage).Should(BeZero()) - }) - - It("should continue when the matcher errors", func() { - var arr = []int{} - a := New(AsyncAssertionTypeEventually, func() interface{} { - arr = append(arr, 1) - if len(arr) == 4 { - return 0 //this should cause the matcher to error - } - return arr - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.Should(HaveLen(4), "My description %d", 2) - - Ω(failureMessage).Should(ContainSubstring("Timed out after")) - Ω(failureMessage).Should(ContainSubstring("My description 2")) - Ω(callerSkip).Should(Equal(4)) - }) - - It("should be able to timeout", func() { - arr := []int{} - a := New(AsyncAssertionTypeEventually, func() []int { - arr = append(arr, 1) - return arr - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.Should(HaveLen(11), "My description %d", 2) - - Ω(arr).Should(HaveLen(10)) - Ω(failureMessage).Should(ContainSubstring("Timed out after")) - Ω(failureMessage).Should(ContainSubstring("<[]int | len:10"), "Should pass the correct value to the matcher message formatter.") - Ω(failureMessage).Should(ContainSubstring("My description 2")) - Ω(callerSkip).Should(Equal(4)) - }) - }) - - Context("the negative case", func() { - It("should poll the function and matcher", func() { - counter := 0 - arr := []int{} - a := New(AsyncAssertionTypeEventually, func() []int { - counter += 1 - if counter >= 10 { - arr = append(arr, 1) - } - return arr - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.ShouldNot(HaveLen(0)) - - Ω(arr).Should(HaveLen(1)) - Ω(failureMessage).Should(BeZero()) - }) - - It("should timeout when the matcher errors", func() { - a := New(AsyncAssertionTypeEventually, func() interface{} { - return 0 //this should cause the matcher to error - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.ShouldNot(HaveLen(0), "My description %d", 2) - - Ω(failureMessage).Should(ContainSubstring("Timed out after")) - Ω(failureMessage).Should(ContainSubstring("Error:")) - Ω(failureMessage).Should(ContainSubstring("My description 2")) - Ω(callerSkip).Should(Equal(4)) - }) - - It("should be able to timeout", func() { - a := New(AsyncAssertionTypeEventually, func() []int { - return []int{} - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.ShouldNot(HaveLen(0), "My description %d", 2) - - Ω(failureMessage).Should(ContainSubstring("Timed out after")) - Ω(failureMessage).Should(ContainSubstring("<[]int | len:0"), "Should pass the correct value to the matcher message formatter.") - Ω(failureMessage).Should(ContainSubstring("My description 2")) - Ω(callerSkip).Should(Equal(4)) - }) - }) - - Context("with a function that returns multiple values", func() { - It("should eventually succeed if the additional arguments are nil", func() { - i := 0 - Eventually(func() (int, error) { - i++ - return i, nil - }).Should(Equal(10)) - }) - - It("should eventually timeout if the additional arguments are not nil", func() { - i := 0 - a := New(AsyncAssertionTypeEventually, func() (int, error) { - i++ - return i, errors.New("bam") - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - a.Should(Equal(2)) - - Ω(failureMessage).Should(ContainSubstring("Timed out after")) - Ω(failureMessage).Should(ContainSubstring("Error:")) - Ω(failureMessage).Should(ContainSubstring("bam")) - Ω(callerSkip).Should(Equal(4)) - }) - }) - }) - - Describe("Consistently", func() { - Describe("The positive case", func() { - Context("when the matcher consistently passes for the duration", func() { - It("should pass", func() { - calls := 0 - a := New(AsyncAssertionTypeConsistently, func() string { - calls++ - return "foo" - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.Should(Equal("foo")) - Ω(calls).Should(Equal(10)) - Ω(failureMessage).Should(BeZero()) - }) - }) - - Context("when the matcher fails at some point", func() { - It("should fail", func() { - calls := 0 - a := New(AsyncAssertionTypeConsistently, func() interface{} { - calls++ - if calls > 9 { - return "bar" - } - return "foo" - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.Should(Equal("foo")) - Ω(failureMessage).Should(ContainSubstring("to equal")) - Ω(callerSkip).Should(Equal(4)) - }) - }) - - Context("when the matcher errors at some point", func() { - It("should fail", func() { - calls := 0 - a := New(AsyncAssertionTypeConsistently, func() interface{} { - calls++ - if calls > 5 { - return 3 - } - return []int{1, 2, 3} - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.Should(HaveLen(3)) - Ω(failureMessage).Should(ContainSubstring("HaveLen matcher expects")) - Ω(callerSkip).Should(Equal(4)) - }) - }) - }) - - Describe("The negative case", func() { - Context("when the matcher consistently passes for the duration", func() { - It("should pass", func() { - c := make(chan bool) - a := New(AsyncAssertionTypeConsistently, c, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.ShouldNot(Receive()) - Ω(failureMessage).Should(BeZero()) - }) - }) - - Context("when the matcher fails at some point", func() { - It("should fail", func() { - c := make(chan bool) - go func() { - time.Sleep(time.Duration(100 * time.Millisecond)) - c <- true - }() - - a := New(AsyncAssertionTypeConsistently, c, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.ShouldNot(Receive()) - Ω(failureMessage).Should(ContainSubstring("not to receive anything")) - }) - }) - - Context("when the matcher errors at some point", func() { - It("should fail", func() { - calls := 0 - a := New(AsyncAssertionTypeConsistently, func() interface{} { - calls++ - return calls - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - - a.ShouldNot(BeNumerically(">", 5)) - Ω(failureMessage).Should(ContainSubstring("not to be >")) - Ω(callerSkip).Should(Equal(4)) - }) - }) - }) - - Context("with a function that returns multiple values", func() { - It("should consistently succeed if the additional arguments are nil", func() { - i := 2 - Consistently(func() (int, error) { - i++ - return i, nil - }).Should(BeNumerically(">=", 2)) - }) - - It("should eventually timeout if the additional arguments are not nil", func() { - i := 2 - a := New(AsyncAssertionTypeEventually, func() (int, error) { - i++ - return i, errors.New("bam") - }, fakeFailHandler, time.Duration(0.2*float64(time.Second)), time.Duration(0.02*float64(time.Second)), 1) - a.Should(BeNumerically(">=", 2)) - - Ω(failureMessage).Should(ContainSubstring("Error:")) - Ω(failureMessage).Should(ContainSubstring("bam")) - Ω(callerSkip).Should(Equal(4)) - }) - }) - }) - - Context("when passed a function with the wrong # or arguments & returns", func() { - It("should panic", func() { - Ω(func() { - New(AsyncAssertionTypeEventually, func() {}, fakeFailHandler, 0, 0, 1) - }).Should(Panic()) - - Ω(func() { - New(AsyncAssertionTypeEventually, func(a string) int { return 0 }, fakeFailHandler, 0, 0, 1) - }).Should(Panic()) - - Ω(func() { - New(AsyncAssertionTypeEventually, func() int { return 0 }, fakeFailHandler, 0, 0, 1) - }).ShouldNot(Panic()) - - Ω(func() { - New(AsyncAssertionTypeEventually, func() (int, error) { return 0, nil }, fakeFailHandler, 0, 0, 1) - }).ShouldNot(Panic()) - }) - }) - - Describe("bailing early", func() { - Context("when actual is a value", func() { - It("Eventually should bail out and fail early if the matcher says to", func() { - c := make(chan bool) - close(c) - - t := time.Now() - failures := InterceptGomegaFailures(func() { - Eventually(c, 0.1).Should(Receive()) - }) - Ω(time.Since(t)).Should(BeNumerically("<", 90*time.Millisecond)) - - Ω(failures).Should(HaveLen(1)) - }) - }) - - Context("when actual is a function", func() { - It("should never bail early", func() { - c := make(chan bool) - close(c) - - t := time.Now() - failures := InterceptGomegaFailures(func() { - Eventually(func() chan bool { - return c - }, 0.1).Should(Receive()) - }) - Ω(time.Since(t)).Should(BeNumerically(">=", 90*time.Millisecond)) - - Ω(failures).Should(HaveLen(1)) - }) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go b/kit/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go deleted file mode 100644 index 6e351a7..0000000 --- a/kit/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go +++ /dev/null @@ -1,23 +0,0 @@ -package fakematcher - -import "fmt" - -type FakeMatcher struct { - ReceivedActual interface{} - MatchesToReturn bool - ErrToReturn error -} - -func (matcher *FakeMatcher) Match(actual interface{}) (bool, error) { - matcher.ReceivedActual = actual - - return matcher.MatchesToReturn, matcher.ErrToReturn -} - -func (matcher *FakeMatcher) FailureMessage(actual interface{}) string { - return fmt.Sprintf("positive: %v", actual) -} - -func (matcher *FakeMatcher) NegatedFailureMessage(actual interface{}) string { - return fmt.Sprintf("negative: %v", actual) -} diff --git a/kit/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/kit/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go deleted file mode 100644 index e892a5f..0000000 --- a/kit/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go +++ /dev/null @@ -1,40 +0,0 @@ -package testingtsupport - -import ( - "regexp" - "runtime/debug" - "strings" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/types" -) - -type gomegaTestingT interface { - Errorf(format string, args ...interface{}) -} - -func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler { - return func(message string, callerSkip ...int) { - skip := 1 - if len(callerSkip) > 0 { - skip = callerSkip[0] - } - stackTrace := pruneStack(string(debug.Stack()), skip) - t.Errorf("\n%s\n%s", stackTrace, message) - } -} - -func pruneStack(fullStackTrace string, skip int) string { - stack := strings.Split(fullStackTrace, "\n") - if len(stack) > 2*(skip+1) { - stack = stack[2*(skip+1):] - } - prunedStack := []string{} - re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) - for i := 0; i < len(stack)/2; i++ { - if !re.Match([]byte(stack[i*2])) { - prunedStack = append(prunedStack, stack[i*2]) - prunedStack = append(prunedStack, stack[i*2+1]) - } - } - return strings.Join(prunedStack, "\n") -} diff --git a/kit/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go b/kit/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go deleted file mode 100644 index 8999eac..0000000 --- a/kit/github.com/onsi/gomega/internal/testingtsupport/testing_t_support_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package testingtsupport_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "testing" -) - -func TestTestingT(t *testing.T) { - RegisterTestingT(t) - Ω(true).Should(BeTrue()) -} diff --git a/kit/github.com/onsi/gomega/matchers.go b/kit/github.com/onsi/gomega/matchers.go deleted file mode 100644 index 23c00c5..0000000 --- a/kit/github.com/onsi/gomega/matchers.go +++ /dev/null @@ -1,293 +0,0 @@ -package gomega - -import ( - "time" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/types" -) - -//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about -//types when performing comparisons. -//It is an error for both actual and expected to be nil. Use BeNil() instead. -func Equal(expected interface{}) types.GomegaMatcher { - return &matchers.EqualMatcher{ - Expected: expected, - } -} - -//BeEquivalentTo is more lax than Equal, allowing equality between different types. -//This is done by converting actual to have the type of expected before -//attempting equality with reflect.DeepEqual. -//It is an error for actual and expected to be nil. Use BeNil() instead. -func BeEquivalentTo(expected interface{}) types.GomegaMatcher { - return &matchers.BeEquivalentToMatcher{ - Expected: expected, - } -} - -//BeNil succeeds if actual is nil -func BeNil() types.GomegaMatcher { - return &matchers.BeNilMatcher{} -} - -//BeTrue succeeds if actual is true -func BeTrue() types.GomegaMatcher { - return &matchers.BeTrueMatcher{} -} - -//BeFalse succeeds if actual is false -func BeFalse() types.GomegaMatcher { - return &matchers.BeFalseMatcher{} -} - -//HaveOccurred succeeds if actual is a non-nil error -//The typical Go error checking pattern looks like: -// err := SomethingThatMightFail() -// Ω(err).ShouldNot(HaveOccurred()) -func HaveOccurred() types.GomegaMatcher { - return &matchers.HaveOccurredMatcher{} -} - -//MatchError succeeds if actual is a non-nil error that matches the passed in string/error. -// -//These are valid use-cases: -// Ω(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" -// Ω(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) -// -//It is an error for err to be nil or an object that does not implement the Error interface -func MatchError(expected interface{}) types.GomegaMatcher { - return &matchers.MatchErrorMatcher{ - Expected: expected, - } -} - -//BeClosed succeeds if actual is a closed channel. -//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil -// -//In order to check whether or not the channel is closed, Gomega must try to read from the channel -//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about -//values coming down the channel. -// -//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before -//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). -// -//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. -func BeClosed() types.GomegaMatcher { - return &matchers.BeClosedMatcher{} -} - -//Receive succeeds if there is a value to be received on actual. -//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. -// -//Receive returns immediately and never blocks: -// -//- If there is nothing on the channel `c` then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. -// -//- If the channel `c` is closed then *both* Ω(c).Should(Receive()) and Ω(c).ShouldNot(Receive()) will error. -// -//- If there is something on the channel `c` ready to be read, then Ω(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. -// -//If you have a go-routine running in the background that will write to channel `c` you can: -// Eventually(c).Should(Receive()) -// -//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) -// -//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: -// Consistently(c).ShouldNot(Receive()) -// -//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: -// Ω(c).Should(Receive(Equal("foo"))) -// -//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. -// -//Passing Receive a matcher is especially useful when paired with Eventually: -// -// Eventually(c).Should(Receive(ContainSubstring("bar"))) -// -//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. -// -//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: -// var myThing thing -// Eventually(thingChan).Should(Receive(&myThing)) -// Ω(myThing.Sprocket).Should(Equal("foo")) -// Ω(myThing.IsValid()).Should(BeTrue()) -func Receive(args ...interface{}) types.GomegaMatcher { - var arg interface{} - if len(args) > 0 { - arg = args[0] - } - - return &matchers.ReceiveMatcher{ - Arg: arg, - } -} - -//BeSent succeeds if a value can be sent to actual. -//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. -//In addition, actual must not be closed. -// -//BeSent never blocks: -// -//- If the channel `c` is not ready to receive then Ω(c).Should(BeSent("foo")) will fail immediately -//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout -//- If the channel `c` is closed then Ω(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately -// -//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). -//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. -func BeSent(arg interface{}) types.GomegaMatcher { - return &matchers.BeSentMatcher{ - Arg: arg, - } -} - -//MatchRegexp succeeds if actual is a string or stringer that matches the -//passed-in regexp. Optional arguments can be provided to construct a regexp -//via fmt.Sprintf(). -func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { - return &matchers.MatchRegexpMatcher{ - Regexp: regexp, - Args: args, - } -} - -//ContainSubstring succeeds if actual is a string or stringer that contains the -//passed-in regexp. Optional arguments can be provided to construct the substring -//via fmt.Sprintf(). -func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { - return &matchers.ContainSubstringMatcher{ - Substr: substr, - Args: args, - } -} - -//MatchJSON succeeds if actual is a string or stringer of JSON that matches -//the expected JSON. The JSONs are decoded and the resulting objects are compared via -//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. -func MatchJSON(json interface{}) types.GomegaMatcher { - return &matchers.MatchJSONMatcher{ - JSONToMatch: json, - } -} - -//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. -func BeEmpty() types.GomegaMatcher { - return &matchers.BeEmptyMatcher{} -} - -//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. -func HaveLen(count int) types.GomegaMatcher { - return &matchers.HaveLenMatcher{ - Count: count, - } -} - -//BeZero succeeds if actual is the zero value for its type or if actual is nil. -func BeZero() types.GomegaMatcher { - return &matchers.BeZeroMatcher{} -} - -//ContainElement succeeds if actual contains the passed in element. -//By default ContainElement() uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Ω([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) -// -//Actual must be an array, slice or map. -//For maps, ContainElement searches through the map's values. -func ContainElement(element interface{}) types.GomegaMatcher { - return &matchers.ContainElementMatcher{ - Element: element, - } -} - -//ConsistOf succeeds if actual contains preciely the elements passed into the matcher. The ordering of the elements does not matter. -//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: -// -// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) -// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) -// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) -// -//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. -// -//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it -//is the only element passed in to ConsistOf: -// -// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) -// -//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. - -func ConsistOf(elements ...interface{}) types.GomegaMatcher { - return &matchers.ConsistOfMatcher{ - Elements: elements, - } -} - -//HaveKey succeeds if actual is a map with the passed in key. -//By default HaveKey uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) -func HaveKey(key interface{}) types.GomegaMatcher { - return &matchers.HaveKeyMatcher{ - Key: key, - } -} - -//HaveKeyWithValue succeeds if actual is a map with the passed in key and value. -//By default HaveKeyWithValue uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) -// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) -func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { - return &matchers.HaveKeyWithValueMatcher{ - Key: key, - Value: value, - } -} - -//BeNumerically performs numerical assertions in a type-agnostic way. -//Actual and expected should be numbers, though the specific type of -//number is irrelevant (floa32, float64, uint8, etc...). -// -//There are six, self-explanatory, supported comparators: -// Ω(1.0).Should(BeNumerically("==", 1)) -// Ω(1.0).Should(BeNumerically("~", 0.999, 0.01)) -// Ω(1.0).Should(BeNumerically(">", 0.9)) -// Ω(1.0).Should(BeNumerically(">=", 1.0)) -// Ω(1.0).Should(BeNumerically("<", 3)) -// Ω(1.0).Should(BeNumerically("<=", 1.0)) -func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { - return &matchers.BeNumericallyMatcher{ - Comparator: comparator, - CompareTo: compareTo, - } -} - -//BeTemporally compares time.Time's like BeNumerically -//Actual and expected must be time.Time. The comparators are the same as for BeNumerically -// Ω(time.Now()).Should(BeTemporally(">", time.Time{})) -// Ω(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) -func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher { - return &matchers.BeTemporallyMatcher{ - Comparator: comparator, - CompareTo: compareTo, - Threshold: threshold, - } -} - -//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. -//It will return an error when one of the values is nil. -// Ω(0).Should(BeAssignableToTypeOf(0)) // Same values -// Ω(5).Should(BeAssignableToTypeOf(-1)) // different values same type -// Ω("foo").Should(BeAssignableToTypeOf("bar")) // different values same type -// Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) -func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { - return &matchers.AssignableToTypeOfMatcher{ - Expected: expected, - } -} - -//Panic succeeds if actual is a function that, when invoked, panics. -//Actual must be a function that takes no arguments and returns no results. -func Panic() types.GomegaMatcher { - return &matchers.PanicMatcher{} -} diff --git a/kit/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/kit/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go deleted file mode 100644 index 347ed92..0000000 --- a/kit/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go +++ /dev/null @@ -1,30 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type AssignableToTypeOfMatcher struct { - Expected interface{} -} - -func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) { - if actual == nil || matcher.Expected == nil { - return false, fmt.Errorf("Refusing to compare to .") - } - - actualType := reflect.TypeOf(actual) - expectedType := reflect.TypeOf(matcher.Expected) - - return actualType.AssignableTo(expectedType), nil -} - -func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string { - return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected)) -} - -func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string { - return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected)) -} diff --git a/kit/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go b/kit/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go deleted file mode 100644 index a2a8b68..0000000 --- a/kit/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("AssignableToTypeOf", func() { - Context("When asserting assignability between types", func() { - It("should do the right thing", func() { - Ω(0).Should(BeAssignableToTypeOf(0)) - Ω(5).Should(BeAssignableToTypeOf(-1)) - Ω("foo").Should(BeAssignableToTypeOf("bar")) - Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) - - Ω(0).ShouldNot(BeAssignableToTypeOf("bar")) - Ω(5).ShouldNot(BeAssignableToTypeOf(struct{ Foo string }{})) - Ω("foo").ShouldNot(BeAssignableToTypeOf(42)) - }) - }) - - Context("When asserting nil values", func() { - It("should error", func() { - success, err := (&AssignableToTypeOfMatcher{Expected: nil}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/be_closed_matcher.go b/kit/github.com/onsi/gomega/matchers/be_closed_matcher.go deleted file mode 100644 index dae53f3..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_closed_matcher.go +++ /dev/null @@ -1,45 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type BeClosedMatcher struct { -} - -func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) { - if !isChan(actual) { - return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1)) - } - - channelType := reflect.TypeOf(actual) - channelValue := reflect.ValueOf(actual) - - if channelType.ChanDir() == reflect.SendDir { - return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1)) - } - - winnerIndex, _, open := reflect.Select([]reflect.SelectCase{ - reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, - reflect.SelectCase{Dir: reflect.SelectDefault}, - }) - - var closed bool - if winnerIndex == 0 { - closed = !open - } else if winnerIndex == 1 { - closed = false - } - - return closed, nil -} - -func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be closed") -} - -func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be open") -} diff --git a/kit/github.com/onsi/gomega/matchers/be_closed_matcher_test.go b/kit/github.com/onsi/gomega/matchers/be_closed_matcher_test.go deleted file mode 100644 index e4d20a7..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_closed_matcher_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("BeClosedMatcher", func() { - Context("when passed a channel", func() { - It("should do the right thing", func() { - openChannel := make(chan bool) - Ω(openChannel).ShouldNot(BeClosed()) - - var openReaderChannel <-chan bool - openReaderChannel = openChannel - Ω(openReaderChannel).ShouldNot(BeClosed()) - - closedChannel := make(chan bool) - close(closedChannel) - - Ω(closedChannel).Should(BeClosed()) - - var closedReaderChannel <-chan bool - closedReaderChannel = closedChannel - Ω(closedReaderChannel).Should(BeClosed()) - }) - }) - - Context("when passed a send-only channel", func() { - It("should error", func() { - openChannel := make(chan bool) - var openWriterChannel chan<- bool - openWriterChannel = openChannel - - success, err := (&BeClosedMatcher{}).Match(openWriterChannel) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - closedChannel := make(chan bool) - close(closedChannel) - - var closedWriterChannel chan<- bool - closedWriterChannel = closedChannel - - success, err = (&BeClosedMatcher{}).Match(closedWriterChannel) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - }) - }) - - Context("when passed something else", func() { - It("should error", func() { - var nilChannel chan bool - - success, err := (&BeClosedMatcher{}).Match(nilChannel) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&BeClosedMatcher{}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&BeClosedMatcher{}).Match(7) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/be_empty_matcher.go b/kit/github.com/onsi/gomega/matchers/be_empty_matcher.go deleted file mode 100644 index fcbadce..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_empty_matcher.go +++ /dev/null @@ -1,26 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" -) - -type BeEmptyMatcher struct { -} - -func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { - length, ok := lengthOf(actual) - if !ok { - return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) - } - - return length == 0, nil -} - -func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be empty") -} - -func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be empty") -} diff --git a/kit/github.com/onsi/gomega/matchers/be_empty_matcher_test.go b/kit/github.com/onsi/gomega/matchers/be_empty_matcher_test.go deleted file mode 100644 index 3474e5c..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_empty_matcher_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("BeEmpty", func() { - Context("when passed a supported type", func() { - It("should do the right thing", func() { - Ω("").Should(BeEmpty()) - Ω(" ").ShouldNot(BeEmpty()) - - Ω([0]int{}).Should(BeEmpty()) - Ω([1]int{1}).ShouldNot(BeEmpty()) - - Ω([]int{}).Should(BeEmpty()) - Ω([]int{1}).ShouldNot(BeEmpty()) - - Ω(map[string]int{}).Should(BeEmpty()) - Ω(map[string]int{"a": 1}).ShouldNot(BeEmpty()) - - c := make(chan bool, 1) - Ω(c).Should(BeEmpty()) - c <- true - Ω(c).ShouldNot(BeEmpty()) - }) - }) - - Context("when passed a correctly typed nil", func() { - It("should be true", func() { - var nilSlice []int - Ω(nilSlice).Should(BeEmpty()) - - var nilMap map[int]string - Ω(nilMap).Should(BeEmpty()) - }) - }) - - Context("when passed an unsupported type", func() { - It("should error", func() { - success, err := (&BeEmptyMatcher{}).Match(0) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&BeEmptyMatcher{}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/kit/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go deleted file mode 100644 index a2ade23..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go +++ /dev/null @@ -1,33 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type BeEquivalentToMatcher struct { - Expected interface{} -} - -func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) { - if actual == nil && matcher.Expected == nil { - return false, fmt.Errorf("Both actual and expected must not be nil.") - } - - convertedActual := actual - - if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) { - convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface() - } - - return reflect.DeepEqual(convertedActual, matcher.Expected), nil -} - -func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be equivalent to", matcher.Expected) -} - -func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be equivalent to", matcher.Expected) -} diff --git a/kit/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go b/kit/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go deleted file mode 100644 index 5045f67..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_equivalent_to_matcher_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("BeEquivalentTo", func() { - Context("when asserting that nil is equivalent to nil", func() { - It("should error", func() { - success, err := (&BeEquivalentToMatcher{Expected: nil}).Match(nil) - - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("When asserting on nil", func() { - It("should do the right thing", func() { - Ω("foo").ShouldNot(BeEquivalentTo(nil)) - Ω(nil).ShouldNot(BeEquivalentTo(3)) - Ω([]int{1, 2}).ShouldNot(BeEquivalentTo(nil)) - }) - }) - - Context("When asserting on type aliases", func() { - It("should the right thing", func() { - Ω(StringAlias("foo")).Should(BeEquivalentTo("foo")) - Ω("foo").Should(BeEquivalentTo(StringAlias("foo"))) - Ω(StringAlias("foo")).ShouldNot(BeEquivalentTo("bar")) - Ω("foo").ShouldNot(BeEquivalentTo(StringAlias("bar"))) - }) - }) - - Context("When asserting on numbers", func() { - It("should convert actual to expected and do the right thing", func() { - Ω(5).Should(BeEquivalentTo(5)) - Ω(5.0).Should(BeEquivalentTo(5.0)) - Ω(5).Should(BeEquivalentTo(5.0)) - - Ω(5).ShouldNot(BeEquivalentTo("5")) - Ω(5).ShouldNot(BeEquivalentTo(3)) - - //Here be dragons! - Ω(5.1).Should(BeEquivalentTo(5)) - Ω(5).ShouldNot(BeEquivalentTo(5.1)) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/be_false_matcher.go b/kit/github.com/onsi/gomega/matchers/be_false_matcher.go deleted file mode 100644 index ee9b5a1..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_false_matcher.go +++ /dev/null @@ -1,25 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" -) - -type BeFalseMatcher struct { -} - -func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { - if !isBool(actual) { - return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) - } - - return actual == false, nil -} - -func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be false") -} - -func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be false") -} diff --git a/kit/github.com/onsi/gomega/matchers/be_false_matcher_test.go b/kit/github.com/onsi/gomega/matchers/be_false_matcher_test.go deleted file mode 100644 index d76e06a..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_false_matcher_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("BeFalse", func() { - It("should handle true and false correctly", func() { - Ω(true).ShouldNot(BeFalse()) - Ω(false).Should(BeFalse()) - }) - - It("should only support booleans", func() { - success, err := (&BeFalseMatcher{}).Match("foo") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/be_nil_matcher.go b/kit/github.com/onsi/gomega/matchers/be_nil_matcher.go deleted file mode 100644 index 65b469a..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_nil_matcher.go +++ /dev/null @@ -1,18 +0,0 @@ -package matchers - -import "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - -type BeNilMatcher struct { -} - -func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) { - return isNil(actual), nil -} - -func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be nil") -} - -func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be nil") -} diff --git a/kit/github.com/onsi/gomega/matchers/be_nil_matcher_test.go b/kit/github.com/onsi/gomega/matchers/be_nil_matcher_test.go deleted file mode 100644 index 573c183..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_nil_matcher_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("BeNil", func() { - It("should succeed when passed nil", func() { - Ω(nil).Should(BeNil()) - }) - - It("should succeed when passed a typed nil", func() { - var a []int - Ω(a).Should(BeNil()) - }) - - It("should succeed when passing nil pointer", func() { - var f *struct{} - Ω(f).Should(BeNil()) - }) - - It("should not succeed when not passed nil", func() { - Ω(0).ShouldNot(BeNil()) - Ω(false).ShouldNot(BeNil()) - Ω("").ShouldNot(BeNil()) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/kit/github.com/onsi/gomega/matchers/be_numerically_matcher.go deleted file mode 100644 index 5a53c36..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_numerically_matcher.go +++ /dev/null @@ -1,119 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "math" -) - -type BeNumericallyMatcher struct { - Comparator string - CompareTo []interface{} -} - -func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo[0]) -} - -func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo[0]) -} - -func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) { - if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 { - return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1)) - } - if !isNumber(actual) { - return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1)) - } - if !isNumber(matcher.CompareTo[0]) { - return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) - } - if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) { - return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) - } - - switch matcher.Comparator { - case "==", "~", ">", ">=", "<", "<=": - default: - return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) - } - - if isFloat(actual) || isFloat(matcher.CompareTo[0]) { - var secondOperand float64 = 1e-8 - if len(matcher.CompareTo) == 2 { - secondOperand = toFloat(matcher.CompareTo[1]) - } - success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand) - } else if isInteger(actual) { - var secondOperand int64 = 0 - if len(matcher.CompareTo) == 2 { - secondOperand = toInteger(matcher.CompareTo[1]) - } - success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand) - } else if isUnsignedInteger(actual) { - var secondOperand uint64 = 0 - if len(matcher.CompareTo) == 2 { - secondOperand = toUnsignedInteger(matcher.CompareTo[1]) - } - success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand) - } else { - return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1)) - } - - return success, nil -} - -func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) { - switch matcher.Comparator { - case "==", "~": - diff := actual - compareTo - return -threshold <= diff && diff <= threshold - case ">": - return (actual > compareTo) - case ">=": - return (actual >= compareTo) - case "<": - return (actual < compareTo) - case "<=": - return (actual <= compareTo) - } - return false -} - -func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) { - switch matcher.Comparator { - case "==", "~": - if actual < compareTo { - actual, compareTo = compareTo, actual - } - return actual-compareTo <= threshold - case ">": - return (actual > compareTo) - case ">=": - return (actual >= compareTo) - case "<": - return (actual < compareTo) - case "<=": - return (actual <= compareTo) - } - return false -} - -func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) { - switch matcher.Comparator { - case "~": - return math.Abs(actual-compareTo) <= threshold - case "==": - return (actual == compareTo) - case ">": - return (actual > compareTo) - case ">=": - return (actual >= compareTo) - case "<": - return (actual < compareTo) - case "<=": - return (actual <= compareTo) - } - return false -} diff --git a/kit/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go b/kit/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go deleted file mode 100644 index 6f6cc94..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_numerically_matcher_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("BeNumerically", func() { - Context("when passed a number", func() { - It("should support ==", func() { - Ω(uint32(5)).Should(BeNumerically("==", 5)) - Ω(float64(5.0)).Should(BeNumerically("==", 5)) - Ω(int8(5)).Should(BeNumerically("==", 5)) - }) - - It("should not have false positives", func() { - Ω(5.1).ShouldNot(BeNumerically("==", 5)) - Ω(5).ShouldNot(BeNumerically("==", 5.1)) - }) - - It("should support >", func() { - Ω(uint32(5)).Should(BeNumerically(">", 4)) - Ω(float64(5.0)).Should(BeNumerically(">", 4.9)) - Ω(int8(5)).Should(BeNumerically(">", 4)) - - Ω(uint32(5)).ShouldNot(BeNumerically(">", 5)) - Ω(float64(5.0)).ShouldNot(BeNumerically(">", 5.0)) - Ω(int8(5)).ShouldNot(BeNumerically(">", 5)) - }) - - It("should support <", func() { - Ω(uint32(5)).Should(BeNumerically("<", 6)) - Ω(float64(5.0)).Should(BeNumerically("<", 5.1)) - Ω(int8(5)).Should(BeNumerically("<", 6)) - - Ω(uint32(5)).ShouldNot(BeNumerically("<", 5)) - Ω(float64(5.0)).ShouldNot(BeNumerically("<", 5.0)) - Ω(int8(5)).ShouldNot(BeNumerically("<", 5)) - }) - - It("should support >=", func() { - Ω(uint32(5)).Should(BeNumerically(">=", 4)) - Ω(float64(5.0)).Should(BeNumerically(">=", 4.9)) - Ω(int8(5)).Should(BeNumerically(">=", 4)) - - Ω(uint32(5)).Should(BeNumerically(">=", 5)) - Ω(float64(5.0)).Should(BeNumerically(">=", 5.0)) - Ω(int8(5)).Should(BeNumerically(">=", 5)) - - Ω(uint32(5)).ShouldNot(BeNumerically(">=", 6)) - Ω(float64(5.0)).ShouldNot(BeNumerically(">=", 5.1)) - Ω(int8(5)).ShouldNot(BeNumerically(">=", 6)) - }) - - It("should support <=", func() { - Ω(uint32(5)).Should(BeNumerically("<=", 6)) - Ω(float64(5.0)).Should(BeNumerically("<=", 5.1)) - Ω(int8(5)).Should(BeNumerically("<=", 6)) - - Ω(uint32(5)).Should(BeNumerically("<=", 5)) - Ω(float64(5.0)).Should(BeNumerically("<=", 5.0)) - Ω(int8(5)).Should(BeNumerically("<=", 5)) - - Ω(uint32(5)).ShouldNot(BeNumerically("<=", 4)) - Ω(float64(5.0)).ShouldNot(BeNumerically("<=", 4.9)) - Ω(int8(5)).Should(BeNumerically("<=", 5)) - }) - - Context("when passed ~", func() { - Context("when passed a float", func() { - Context("and there is no precision parameter", func() { - It("should default to 1e-8", func() { - Ω(5.00000001).Should(BeNumerically("~", 5.00000002)) - Ω(5.00000001).ShouldNot(BeNumerically("~", 5.0000001)) - }) - }) - - Context("and there is a precision parameter", func() { - It("should use the precision parameter", func() { - Ω(5.1).Should(BeNumerically("~", 5.19, 0.1)) - Ω(5.1).Should(BeNumerically("~", 5.01, 0.1)) - Ω(5.1).ShouldNot(BeNumerically("~", 5.22, 0.1)) - Ω(5.1).ShouldNot(BeNumerically("~", 4.98, 0.1)) - }) - }) - }) - - Context("when passed an int/uint", func() { - Context("and there is no precision parameter", func() { - It("should just do strict equality", func() { - Ω(5).Should(BeNumerically("~", 5)) - Ω(5).ShouldNot(BeNumerically("~", 6)) - Ω(uint(5)).ShouldNot(BeNumerically("~", 6)) - }) - }) - - Context("and there is a precision parameter", func() { - It("should use precision paramter", func() { - Ω(5).Should(BeNumerically("~", 6, 2)) - Ω(5).ShouldNot(BeNumerically("~", 8, 2)) - Ω(uint(5)).Should(BeNumerically("~", 6, 1)) - }) - }) - }) - }) - }) - - Context("when passed a non-number", func() { - It("should error", func() { - success, err := (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{5}}).Match("foo") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&BeNumericallyMatcher{Comparator: "=="}).Match(5) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&BeNumericallyMatcher{Comparator: "~", CompareTo: []interface{}{3.0, "foo"}}).Match(5.0) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{"bar"}}).Match(5) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{"bar"}}).Match("foo") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{nil}}).Match(0) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&BeNumericallyMatcher{Comparator: "==", CompareTo: []interface{}{0}}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when passed an unsupported comparator", func() { - It("should error", func() { - success, err := (&BeNumericallyMatcher{Comparator: "!=", CompareTo: []interface{}{5}}).Match(4) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/be_sent_matcher.go b/kit/github.com/onsi/gomega/matchers/be_sent_matcher.go deleted file mode 100644 index 89efe4e..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_sent_matcher.go +++ /dev/null @@ -1,71 +0,0 @@ -package matchers - -import ( - "fmt" - "reflect" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" -) - -type BeSentMatcher struct { - Arg interface{} - channelClosed bool -} - -func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) { - if !isChan(actual) { - return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1)) - } - - channelType := reflect.TypeOf(actual) - channelValue := reflect.ValueOf(actual) - - if channelType.ChanDir() == reflect.RecvDir { - return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1)) - } - - argType := reflect.TypeOf(matcher.Arg) - assignable := argType.AssignableTo(channelType.Elem()) - - if !assignable { - return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1)) - } - - argValue := reflect.ValueOf(matcher.Arg) - - defer func() { - if e := recover(); e != nil { - success = false - err = fmt.Errorf("Cannot send to a closed channel") - matcher.channelClosed = true - } - }() - - winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{ - reflect.SelectCase{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue}, - reflect.SelectCase{Dir: reflect.SelectDefault}, - }) - - var didSend bool - if winnerIndex == 0 { - didSend = true - } - - return didSend, nil -} - -func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to send:", matcher.Arg) -} - -func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to send:", matcher.Arg) -} - -func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { - if !isChan(actual) { - return false - } - - return !matcher.channelClosed -} diff --git a/kit/github.com/onsi/gomega/matchers/be_sent_matcher_test.go b/kit/github.com/onsi/gomega/matchers/be_sent_matcher_test.go deleted file mode 100644 index f3dc250..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_sent_matcher_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package matchers_test - -import ( - "time" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" - - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("BeSent", func() { - Context("when passed a channel and a matching type", func() { - Context("when the channel is ready to receive", func() { - It("should succeed and send the value down the channel", func() { - c := make(chan string) - d := make(chan string) - go func() { - val := <-c - d <- val - }() - - time.Sleep(10 * time.Millisecond) - - Ω(c).Should(BeSent("foo")) - Eventually(d).Should(Receive(Equal("foo"))) - }) - - It("should succeed (with a buffered channel)", func() { - c := make(chan string, 1) - Ω(c).Should(BeSent("foo")) - Ω(<-c).Should(Equal("foo")) - }) - }) - - Context("when the channel is not ready to receive", func() { - It("should fail and not send down the channel", func() { - c := make(chan string) - Ω(c).ShouldNot(BeSent("foo")) - Consistently(c).ShouldNot(Receive()) - }) - }) - - Context("when the channel is eventually ready to receive", func() { - It("should succeed", func() { - c := make(chan string) - d := make(chan string) - go func() { - time.Sleep(30 * time.Millisecond) - val := <-c - d <- val - }() - - Eventually(c).Should(BeSent("foo")) - Eventually(d).Should(Receive(Equal("foo"))) - }) - }) - - Context("when the channel is closed", func() { - It("should error", func() { - c := make(chan string) - close(c) - success, err := (&BeSentMatcher{Arg: "foo"}).Match(c) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - - It("should short-circuit Eventually", func() { - c := make(chan string) - close(c) - - t := time.Now() - failures := InterceptGomegaFailures(func() { - Eventually(c, 10.0).Should(BeSent("foo")) - }) - Ω(failures).Should(HaveLen(1)) - Ω(time.Since(t)).Should(BeNumerically("<", time.Second)) - }) - }) - }) - - Context("when passed a channel and a non-matching type", func() { - It("should error", func() { - success, err := (&BeSentMatcher{Arg: "foo"}).Match(make(chan int, 1)) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when passed a receive-only channel", func() { - It("should error", func() { - var c <-chan string - c = make(chan string, 1) - success, err := (&BeSentMatcher{Arg: "foo"}).Match(c) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when passed a nonchannel", func() { - It("should error", func() { - success, err := (&BeSentMatcher{Arg: "foo"}).Match("bar") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/kit/github.com/onsi/gomega/matchers/be_temporally_matcher.go deleted file mode 100644 index 5a112f9..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_temporally_matcher.go +++ /dev/null @@ -1,65 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "time" -) - -type BeTemporallyMatcher struct { - Comparator string - CompareTo time.Time - Threshold []time.Duration -} - -func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo) -} - -func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo) -} - -func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) { - // predicate to test for time.Time type - isTime := func(t interface{}) bool { - _, ok := t.(time.Time) - return ok - } - - if !isTime(actual) { - return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1)) - } - - switch matcher.Comparator { - case "==", "~", ">", ">=", "<", "<=": - default: - return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator) - } - - var threshold = time.Millisecond - if len(matcher.Threshold) == 1 { - threshold = matcher.Threshold[0] - } - - return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil -} - -func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) { - switch matcher.Comparator { - case "==": - return actual.Equal(compareTo) - case "~": - diff := actual.Sub(compareTo) - return -threshold <= diff && diff <= threshold - case ">": - return actual.After(compareTo) - case ">=": - return !actual.Before(compareTo) - case "<": - return actual.Before(compareTo) - case "<=": - return !actual.After(compareTo) - } - return false -} diff --git a/kit/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go b/kit/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go deleted file mode 100644 index e147325..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_temporally_matcher_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" - "time" -) - -var _ = Describe("BeTemporally", func() { - - var t0, t1, t2 time.Time - BeforeEach(func() { - t0 = time.Now() - t1 = t0.Add(time.Second) - t2 = t0.Add(-time.Second) - }) - - Context("When comparing times", func() { - - It("should support ==", func() { - Ω(t0).Should(BeTemporally("==", t0)) - Ω(t1).ShouldNot(BeTemporally("==", t0)) - Ω(t0).ShouldNot(BeTemporally("==", t1)) - Ω(t0).ShouldNot(BeTemporally("==", time.Time{})) - }) - - It("should support >", func() { - Ω(t0).Should(BeTemporally(">", t2)) - Ω(t0).ShouldNot(BeTemporally(">", t0)) - Ω(t2).ShouldNot(BeTemporally(">", t0)) - }) - - It("should support <", func() { - Ω(t0).Should(BeTemporally("<", t1)) - Ω(t0).ShouldNot(BeTemporally("<", t0)) - Ω(t1).ShouldNot(BeTemporally("<", t0)) - }) - - It("should support >=", func() { - Ω(t0).Should(BeTemporally(">=", t2)) - Ω(t0).Should(BeTemporally(">=", t0)) - Ω(t0).ShouldNot(BeTemporally(">=", t1)) - }) - - It("should support <=", func() { - Ω(t0).Should(BeTemporally("<=", t1)) - Ω(t0).Should(BeTemporally("<=", t0)) - Ω(t0).ShouldNot(BeTemporally("<=", t2)) - }) - - Context("when passed ~", func() { - Context("and there is no precision parameter", func() { - BeforeEach(func() { - t1 = t0.Add(time.Millisecond / 2) - t2 = t0.Add(-2 * time.Millisecond) - }) - It("should approximate", func() { - Ω(t0).Should(BeTemporally("~", t0)) - Ω(t0).Should(BeTemporally("~", t1)) - Ω(t0).ShouldNot(BeTemporally("~", t2)) - }) - }) - - Context("and there is a precision parameter", func() { - BeforeEach(func() { - t2 = t0.Add(3 * time.Second) - }) - It("should use precision paramter", func() { - d := 2 * time.Second - Ω(t0).Should(BeTemporally("~", t0, d)) - Ω(t0).Should(BeTemporally("~", t1, d)) - Ω(t0).ShouldNot(BeTemporally("~", t2, d)) - }) - }) - }) - }) - - Context("when passed a non-time", func() { - It("should error", func() { - success, err := (&BeTemporallyMatcher{Comparator: "==", CompareTo: t0}).Match("foo") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&BeTemporallyMatcher{Comparator: "=="}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when passed an unsupported comparator", func() { - It("should error", func() { - success, err := (&BeTemporallyMatcher{Comparator: "!=", CompareTo: t0}).Match(t2) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/be_true_matcher.go b/kit/github.com/onsi/gomega/matchers/be_true_matcher.go deleted file mode 100644 index 1d8f54e..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_true_matcher.go +++ /dev/null @@ -1,25 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" -) - -type BeTrueMatcher struct { -} - -func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { - if !isBool(actual) { - return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) - } - - return actual.(bool), nil -} - -func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be true") -} - -func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be true") -} diff --git a/kit/github.com/onsi/gomega/matchers/be_true_matcher_test.go b/kit/github.com/onsi/gomega/matchers/be_true_matcher_test.go deleted file mode 100644 index 8fd44ee..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_true_matcher_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("BeTrue", func() { - It("should handle true and false correctly", func() { - Ω(true).Should(BeTrue()) - Ω(false).ShouldNot(BeTrue()) - }) - - It("should only support booleans", func() { - success, err := (&BeTrueMatcher{}).Match("foo") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/be_zero_matcher.go b/kit/github.com/onsi/gomega/matchers/be_zero_matcher.go deleted file mode 100644 index f767168..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_zero_matcher.go +++ /dev/null @@ -1,27 +0,0 @@ -package matchers - -import ( - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type BeZeroMatcher struct { -} - -func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) { - if actual == nil { - return true, nil - } - zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() - - return reflect.DeepEqual(zeroValue, actual), nil - -} - -func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be zero-valued") -} - -func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be zero-valued") -} diff --git a/kit/github.com/onsi/gomega/matchers/be_zero_matcher_test.go b/kit/github.com/onsi/gomega/matchers/be_zero_matcher_test.go deleted file mode 100644 index 10f4223..0000000 --- a/kit/github.com/onsi/gomega/matchers/be_zero_matcher_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("BeZero", func() { - It("should succeed if the passed in object is the zero value for its type", func() { - Ω(nil).Should(BeZero()) - - Ω("").Should(BeZero()) - Ω(" ").ShouldNot(BeZero()) - - Ω(0).Should(BeZero()) - Ω(1).ShouldNot(BeZero()) - - Ω(0.0).Should(BeZero()) - Ω(0.1).ShouldNot(BeZero()) - - // Ω([]int{}).Should(BeZero()) - Ω([]int{1}).ShouldNot(BeZero()) - - // Ω(map[string]int{}).Should(BeZero()) - Ω(map[string]int{"a": 1}).ShouldNot(BeZero()) - - Ω(myCustomType{}).Should(BeZero()) - Ω(myCustomType{s: "a"}).ShouldNot(BeZero()) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/consist_of.go b/kit/github.com/onsi/gomega/matchers/consist_of.go deleted file mode 100644 index b09a263..0000000 --- a/kit/github.com/onsi/gomega/matchers/consist_of.go +++ /dev/null @@ -1,80 +0,0 @@ -package matchers - -import ( - "fmt" - "reflect" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" -) - -type ConsistOfMatcher struct { - Elements []interface{} -} - -func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) - } - - elements := matcher.Elements - if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) { - elements = []interface{}{} - value := reflect.ValueOf(matcher.Elements[0]) - for i := 0; i < value.Len(); i++ { - elements = append(elements, value.Index(i).Interface()) - } - } - - matchers := []interface{}{} - for _, element := range elements { - matcher, isMatcher := element.(omegaMatcher) - if !isMatcher { - matcher = &EqualMatcher{Expected: element} - } - matchers = append(matchers, matcher) - } - - values := matcher.valuesOf(actual) - - if len(values) != len(matchers) { - return false, nil - } - - neighbours := func(v, m interface{}) (bool, error) { - match, err := m.(omegaMatcher).Match(v) - return match && err == nil, nil - } - - bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours) - if err != nil { - return false, err - } - - return len(bipartiteGraph.LargestMatching()) == len(values), nil -} - -func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} { - value := reflect.ValueOf(actual) - values := []interface{}{} - if isMap(actual) { - keys := value.MapKeys() - for i := 0; i < value.Len(); i++ { - values = append(values, value.MapIndex(keys[i]).Interface()) - } - } else { - for i := 0; i < value.Len(); i++ { - values = append(values, value.Index(i).Interface()) - } - } - - return values -} - -func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to consist of", matcher.Elements) -} - -func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to consist of", matcher.Elements) -} diff --git a/kit/github.com/onsi/gomega/matchers/consist_of_test.go b/kit/github.com/onsi/gomega/matchers/consist_of_test.go deleted file mode 100644 index 749b698..0000000 --- a/kit/github.com/onsi/gomega/matchers/consist_of_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -var _ = Describe("ConsistOf", func() { - Context("with a slice", func() { - It("should do the right thing", func() { - Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) - Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) - Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("baz", "bar", "foo")) - Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) - Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "foo")) - }) - }) - - Context("with an array", func() { - It("should do the right thing", func() { - Ω([3]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", "bar", "baz")) - Ω([3]string{"foo", "bar", "baz"}).Should(ConsistOf("baz", "bar", "foo")) - Ω([3]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) - Ω([3]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("baz", "foo")) - }) - }) - - Context("with a map", func() { - It("should apply to the values", func() { - Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).Should(ConsistOf("foo", "bar", "baz")) - Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).Should(ConsistOf("baz", "bar", "foo")) - Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).ShouldNot(ConsistOf("baz", "bar", "foo", "foo")) - Ω(map[int]string{1: "foo", 2: "bar", 3: "baz"}).ShouldNot(ConsistOf("baz", "foo")) - }) - - }) - - Context("with anything else", func() { - It("should error", func() { - failures := InterceptGomegaFailures(func() { - Ω("foo").Should(ConsistOf("f", "o", "o")) - }) - - Ω(failures).Should(HaveLen(1)) - }) - }) - - Context("when passed matchers", func() { - It("should pass if the matchers pass", func() { - Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", MatchRegexp("^ba"), "baz")) - Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"))) - Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("foo"))) - Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("^ba"))) - Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf("foo", MatchRegexp("^ba"), MatchRegexp("turducken"))) - }) - - It("should not depend on the order of the matchers", func() { - Ω([][]int{[]int{1, 2}, []int{2}}).Should(ConsistOf(ContainElement(1), ContainElement(2))) - Ω([][]int{[]int{1, 2}, []int{2}}).Should(ConsistOf(ContainElement(2), ContainElement(1))) - }) - - Context("when a matcher errors", func() { - It("should soldier on", func() { - Ω([]string{"foo", "bar", "baz"}).ShouldNot(ConsistOf(BeFalse(), "foo", "bar")) - Ω([]interface{}{"foo", "bar", false}).Should(ConsistOf(BeFalse(), ContainSubstring("foo"), "bar")) - }) - }) - }) - - Context("when passed exactly one argument, and that argument is a slice", func() { - It("should match against the elements of that argument", func() { - Ω([]string{"foo", "bar", "baz"}).Should(ConsistOf([]string{"foo", "bar", "baz"})) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/contain_element_matcher.go b/kit/github.com/onsi/gomega/matchers/contain_element_matcher.go deleted file mode 100644 index 02c4a5a..0000000 --- a/kit/github.com/onsi/gomega/matchers/contain_element_matcher.go +++ /dev/null @@ -1,53 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type ContainElementMatcher struct { - Element interface{} -} - -func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) - } - - elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) - if !elementIsMatcher { - elemMatcher = &EqualMatcher{Expected: matcher.Element} - } - - value := reflect.ValueOf(actual) - var keys []reflect.Value - if isMap(actual) { - keys = value.MapKeys() - } - for i := 0; i < value.Len(); i++ { - var success bool - var err error - if isMap(actual) { - success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface()) - } else { - success, err = elemMatcher.Match(value.Index(i).Interface()) - } - if err != nil { - return false, fmt.Errorf("ContainElement's element matcher failed with:\n\t%s", err.Error()) - } - if success { - return true, nil - } - } - - return false, nil -} - -func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to contain element matching", matcher.Element) -} - -func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to contain element matching", matcher.Element) -} diff --git a/kit/github.com/onsi/gomega/matchers/contain_element_matcher_test.go b/kit/github.com/onsi/gomega/matchers/contain_element_matcher_test.go deleted file mode 100644 index 4a2e631..0000000 --- a/kit/github.com/onsi/gomega/matchers/contain_element_matcher_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("ContainElement", func() { - Context("when passed a supported type", func() { - Context("and expecting a non-matcher", func() { - It("should do the right thing", func() { - Ω([2]int{1, 2}).Should(ContainElement(2)) - Ω([2]int{1, 2}).ShouldNot(ContainElement(3)) - - Ω([]int{1, 2}).Should(ContainElement(2)) - Ω([]int{1, 2}).ShouldNot(ContainElement(3)) - - Ω(map[string]int{"foo": 1, "bar": 2}).Should(ContainElement(2)) - Ω(map[int]int{3: 1, 4: 2}).ShouldNot(ContainElement(3)) - - arr := make([]myCustomType, 2) - arr[0] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}} - arr[1] = myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "c"}} - Ω(arr).Should(ContainElement(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}})) - Ω(arr).ShouldNot(ContainElement(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"b", "c"}})) - }) - }) - - Context("and expecting a matcher", func() { - It("should pass each element through the matcher", func() { - Ω([]int{1, 2, 3}).Should(ContainElement(BeNumerically(">=", 3))) - Ω([]int{1, 2, 3}).ShouldNot(ContainElement(BeNumerically(">", 3))) - Ω(map[string]int{"foo": 1, "bar": 2}).Should(ContainElement(BeNumerically(">=", 2))) - Ω(map[string]int{"foo": 1, "bar": 2}).ShouldNot(ContainElement(BeNumerically(">", 2))) - }) - - It("should fail if the matcher ever fails", func() { - actual := []interface{}{1, 2, "3", 4} - success, err := (&ContainElementMatcher{Element: BeNumerically(">=", 3)}).Match(actual) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - }) - - Context("when passed a correctly typed nil", func() { - It("should operate succesfully on the passed in value", func() { - var nilSlice []int - Ω(nilSlice).ShouldNot(ContainElement(1)) - - var nilMap map[int]string - Ω(nilMap).ShouldNot(ContainElement("foo")) - }) - }) - - Context("when passed an unsupported type", func() { - It("should error", func() { - success, err := (&ContainElementMatcher{Element: 0}).Match(0) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&ContainElementMatcher{Element: 0}).Match("abc") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&ContainElementMatcher{Element: 0}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/kit/github.com/onsi/gomega/matchers/contain_substring_matcher.go deleted file mode 100644 index 1b83970..0000000 --- a/kit/github.com/onsi/gomega/matchers/contain_substring_matcher.go +++ /dev/null @@ -1,37 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "strings" -) - -type ContainSubstringMatcher struct { - Substr string - Args []interface{} -} - -func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) { - actualString, ok := toString(actual) - if !ok { - return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) - } - - return strings.Contains(actualString, matcher.stringToMatch()), nil -} - -func (matcher *ContainSubstringMatcher) stringToMatch() string { - stringToMatch := matcher.Substr - if len(matcher.Args) > 0 { - stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...) - } - return stringToMatch -} - -func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to contain substring", matcher.stringToMatch()) -} - -func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to contain substring", matcher.stringToMatch()) -} diff --git a/kit/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go b/kit/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go deleted file mode 100644 index f880271..0000000 --- a/kit/github.com/onsi/gomega/matchers/contain_substring_matcher_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("ContainSubstringMatcher", func() { - Context("when actual is a string", func() { - It("should match against the string", func() { - Ω("Marvelous").Should(ContainSubstring("rve")) - Ω("Marvelous").ShouldNot(ContainSubstring("boo")) - }) - }) - - Context("when the matcher is called with multiple arguments", func() { - It("should pass the string and arguments to sprintf", func() { - Ω("Marvelous3").Should(ContainSubstring("velous%d", 3)) - }) - }) - - Context("when actual is a stringer", func() { - It("should call the stringer and match agains the returned string", func() { - Ω(&myStringer{a: "Abc3"}).Should(ContainSubstring("bc3")) - }) - }) - - Context("when actual is neither a string nor a stringer", func() { - It("should error", func() { - success, err := (&ContainSubstringMatcher{Substr: "2"}).Match(2) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/equal_matcher.go b/kit/github.com/onsi/gomega/matchers/equal_matcher.go deleted file mode 100644 index aa5dc2d..0000000 --- a/kit/github.com/onsi/gomega/matchers/equal_matcher.go +++ /dev/null @@ -1,26 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type EqualMatcher struct { - Expected interface{} -} - -func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) { - if actual == nil && matcher.Expected == nil { - return false, fmt.Errorf("Refusing to compare to .") - } - return reflect.DeepEqual(actual, matcher.Expected), nil -} - -func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to equal", matcher.Expected) -} - -func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to equal", matcher.Expected) -} diff --git a/kit/github.com/onsi/gomega/matchers/equal_matcher_test.go b/kit/github.com/onsi/gomega/matchers/equal_matcher_test.go deleted file mode 100644 index 65bbf7e..0000000 --- a/kit/github.com/onsi/gomega/matchers/equal_matcher_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package matchers_test - -import ( - "errors" - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("Equal", func() { - Context("when asserting that nil equals nil", func() { - It("should error", func() { - success, err := (&EqualMatcher{Expected: nil}).Match(nil) - - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("When asserting equality between objects", func() { - It("should do the right thing", func() { - Ω(5).Should(Equal(5)) - Ω(5.0).Should(Equal(5.0)) - - Ω(5).ShouldNot(Equal("5")) - Ω(5).ShouldNot(Equal(5.0)) - Ω(5).ShouldNot(Equal(3)) - - Ω("5").Should(Equal("5")) - Ω([]int{1, 2}).Should(Equal([]int{1, 2})) - Ω([]int{1, 2}).ShouldNot(Equal([]int{2, 1})) - Ω(map[string]string{"a": "b", "c": "d"}).Should(Equal(map[string]string{"a": "b", "c": "d"})) - Ω(map[string]string{"a": "b", "c": "d"}).ShouldNot(Equal(map[string]string{"a": "b", "c": "e"})) - Ω(errors.New("foo")).Should(Equal(errors.New("foo"))) - Ω(errors.New("foo")).ShouldNot(Equal(errors.New("bar"))) - - Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).Should(Equal(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}})) - Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "bar", n: 3, f: 2.0, arr: []string{"a", "b"}})) - Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 2, f: 2.0, arr: []string{"a", "b"}})) - Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 3, f: 3.0, arr: []string{"a", "b"}})) - Ω(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b"}}).ShouldNot(Equal(myCustomType{s: "foo", n: 3, f: 2.0, arr: []string{"a", "b", "c"}})) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/have_key_matcher.go b/kit/github.com/onsi/gomega/matchers/have_key_matcher.go deleted file mode 100644 index 0b39c29..0000000 --- a/kit/github.com/onsi/gomega/matchers/have_key_matcher.go +++ /dev/null @@ -1,53 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type HaveKeyMatcher struct { - Key interface{} -} - -func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { - if !isMap(actual) { - return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1)) - } - - keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) - if !keyIsMatcher { - keyMatcher = &EqualMatcher{Expected: matcher.Key} - } - - keys := reflect.ValueOf(actual).MapKeys() - for i := 0; i < len(keys); i++ { - success, err := keyMatcher.Match(keys[i].Interface()) - if err != nil { - return false, fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) - } - if success { - return true, nil - } - } - - return false, nil -} - -func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) { - switch matcher.Key.(type) { - case omegaMatcher: - return format.Message(actual, "to have key matching", matcher.Key) - default: - return format.Message(actual, "to have key", matcher.Key) - } -} - -func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) { - switch matcher.Key.(type) { - case omegaMatcher: - return format.Message(actual, "not to have key matching", matcher.Key) - default: - return format.Message(actual, "not to have key", matcher.Key) - } -} diff --git a/kit/github.com/onsi/gomega/matchers/have_key_matcher_test.go b/kit/github.com/onsi/gomega/matchers/have_key_matcher_test.go deleted file mode 100644 index 95b8cf2..0000000 --- a/kit/github.com/onsi/gomega/matchers/have_key_matcher_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("HaveKey", func() { - var ( - stringKeys map[string]int - intKeys map[int]string - objKeys map[*myCustomType]string - - customA *myCustomType - customB *myCustomType - ) - BeforeEach(func() { - stringKeys = map[string]int{"foo": 2, "bar": 3} - intKeys = map[int]string{2: "foo", 3: "bar"} - - customA = &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}} - customB = &myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}} - objKeys = map[*myCustomType]string{customA: "aardvark", customB: "kangaroo"} - }) - - Context("when passed a map", func() { - It("should do the right thing", func() { - Ω(stringKeys).Should(HaveKey("foo")) - Ω(stringKeys).ShouldNot(HaveKey("baz")) - - Ω(intKeys).Should(HaveKey(2)) - Ω(intKeys).ShouldNot(HaveKey(4)) - - Ω(objKeys).Should(HaveKey(customA)) - Ω(objKeys).Should(HaveKey(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}})) - Ω(objKeys).ShouldNot(HaveKey(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"apple", "pie"}})) - }) - }) - - Context("when passed a correctly typed nil", func() { - It("should operate succesfully on the passed in value", func() { - var nilMap map[int]string - Ω(nilMap).ShouldNot(HaveKey("foo")) - }) - }) - - Context("when the passed in key is actually a matcher", func() { - It("should pass each element through the matcher", func() { - Ω(stringKeys).Should(HaveKey(ContainSubstring("oo"))) - Ω(stringKeys).ShouldNot(HaveKey(ContainSubstring("foobar"))) - }) - - It("should fail if the matcher ever fails", func() { - actual := map[int]string{1: "a", 3: "b", 2: "c"} - success, err := (&HaveKeyMatcher{Key: ContainSubstring("ar")}).Match(actual) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when passed something that is not a map", func() { - It("should error", func() { - success, err := (&HaveKeyMatcher{Key: "foo"}).Match([]string{"foo"}) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&HaveKeyMatcher{Key: "foo"}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/kit/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go deleted file mode 100644 index 5082ab3..0000000 --- a/kit/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go +++ /dev/null @@ -1,73 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type HaveKeyWithValueMatcher struct { - Key interface{} - Value interface{} -} - -func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { - if !isMap(actual) { - return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1)) - } - - keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) - if !keyIsMatcher { - keyMatcher = &EqualMatcher{Expected: matcher.Key} - } - - valueMatcher, valueIsMatcher := matcher.Value.(omegaMatcher) - if !valueIsMatcher { - valueMatcher = &EqualMatcher{Expected: matcher.Value} - } - - keys := reflect.ValueOf(actual).MapKeys() - for i := 0; i < len(keys); i++ { - success, err := keyMatcher.Match(keys[i].Interface()) - if err != nil { - return false, fmt.Errorf("HaveKeyWithValue's key matcher failed with:\n%s%s", format.Indent, err.Error()) - } - if success { - actualValue := reflect.ValueOf(actual).MapIndex(keys[i]) - success, err := valueMatcher.Match(actualValue.Interface()) - if err != nil { - return false, fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error()) - } - return success, nil - } - } - - return false, nil -} - -func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) { - str := "to have {key: value}" - if _, ok := matcher.Key.(omegaMatcher); ok { - str += " matching" - } else if _, ok := matcher.Value.(omegaMatcher); ok { - str += " matching" - } - - expect := make(map[interface{}]interface{}, 1) - expect[matcher.Key] = matcher.Value - return format.Message(actual, str, expect) -} - -func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { - kStr := "not to have key" - if _, ok := matcher.Key.(omegaMatcher); ok { - kStr = "not to have key matching" - } - - vStr := "or that key's value not be" - if _, ok := matcher.Value.(omegaMatcher); ok { - vStr = "or to have that key's value not matching" - } - - return format.Message(actual, kStr, matcher.Key, vStr, matcher.Value) -} diff --git a/kit/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go b/kit/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go deleted file mode 100644 index 247b785..0000000 --- a/kit/github.com/onsi/gomega/matchers/have_key_with_value_matcher_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("HaveKeyWithValue", func() { - var ( - stringKeys map[string]int - intKeys map[int]string - objKeys map[*myCustomType]*myCustomType - - customA *myCustomType - customB *myCustomType - ) - BeforeEach(func() { - stringKeys = map[string]int{"foo": 2, "bar": 3} - intKeys = map[int]string{2: "foo", 3: "bar"} - - customA = &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}} - customB = &myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}} - objKeys = map[*myCustomType]*myCustomType{customA: customA, customB: customA} - }) - - Context("when passed a map", func() { - It("should do the right thing", func() { - Ω(stringKeys).Should(HaveKeyWithValue("foo", 2)) - Ω(stringKeys).ShouldNot(HaveKeyWithValue("foo", 1)) - Ω(stringKeys).ShouldNot(HaveKeyWithValue("baz", 2)) - Ω(stringKeys).ShouldNot(HaveKeyWithValue("baz", 1)) - - Ω(intKeys).Should(HaveKeyWithValue(2, "foo")) - Ω(intKeys).ShouldNot(HaveKeyWithValue(4, "foo")) - Ω(intKeys).ShouldNot(HaveKeyWithValue(2, "baz")) - - Ω(objKeys).Should(HaveKeyWithValue(customA, customA)) - Ω(objKeys).Should(HaveKeyWithValue(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"cake"}}, &myCustomType{s: "a", n: 2, f: 2.3, arr: []string{"ice", "cream"}})) - Ω(objKeys).ShouldNot(HaveKeyWithValue(&myCustomType{s: "b", n: 4, f: 3.1, arr: []string{"apple", "pie"}}, customA)) - }) - }) - - Context("when passed a correctly typed nil", func() { - It("should operate succesfully on the passed in value", func() { - var nilMap map[int]string - Ω(nilMap).ShouldNot(HaveKeyWithValue("foo", "bar")) - }) - }) - - Context("when the passed in key or value is actually a matcher", func() { - It("should pass each element through the matcher", func() { - Ω(stringKeys).Should(HaveKeyWithValue(ContainSubstring("oo"), 2)) - Ω(intKeys).Should(HaveKeyWithValue(2, ContainSubstring("oo"))) - Ω(stringKeys).ShouldNot(HaveKeyWithValue(ContainSubstring("foobar"), 2)) - }) - - It("should fail if the matcher ever fails", func() { - actual := map[int]string{1: "a", 3: "b", 2: "c"} - success, err := (&HaveKeyWithValueMatcher{Key: ContainSubstring("ar"), Value: 2}).Match(actual) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - otherActual := map[string]int{"a": 1, "b": 2, "c": 3} - success, err = (&HaveKeyWithValueMatcher{Key: "a", Value: ContainSubstring("1")}).Match(otherActual) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when passed something that is not a map", func() { - It("should error", func() { - success, err := (&HaveKeyWithValueMatcher{Key: "foo", Value: "bar"}).Match([]string{"foo"}) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&HaveKeyWithValueMatcher{Key: "foo", Value: "bar"}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/have_len_matcher.go b/kit/github.com/onsi/gomega/matchers/have_len_matcher.go deleted file mode 100644 index 1ccbf06..0000000 --- a/kit/github.com/onsi/gomega/matchers/have_len_matcher.go +++ /dev/null @@ -1,27 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" -) - -type HaveLenMatcher struct { - Count int -} - -func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { - length, ok := lengthOf(actual) - if !ok { - return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) - } - - return length == matcher.Count, nil -} - -func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count) -} - -func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count) -} diff --git a/kit/github.com/onsi/gomega/matchers/have_len_matcher_test.go b/kit/github.com/onsi/gomega/matchers/have_len_matcher_test.go deleted file mode 100644 index 0cbf424..0000000 --- a/kit/github.com/onsi/gomega/matchers/have_len_matcher_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("HaveLen", func() { - Context("when passed a supported type", func() { - It("should do the right thing", func() { - Ω("").Should(HaveLen(0)) - Ω("AA").Should(HaveLen(2)) - - Ω([0]int{}).Should(HaveLen(0)) - Ω([2]int{1, 2}).Should(HaveLen(2)) - - Ω([]int{}).Should(HaveLen(0)) - Ω([]int{1, 2, 3}).Should(HaveLen(3)) - - Ω(map[string]int{}).Should(HaveLen(0)) - Ω(map[string]int{"a": 1, "b": 2, "c": 3, "d": 4}).Should(HaveLen(4)) - - c := make(chan bool, 3) - Ω(c).Should(HaveLen(0)) - c <- true - c <- true - Ω(c).Should(HaveLen(2)) - }) - }) - - Context("when passed a correctly typed nil", func() { - It("should operate succesfully on the passed in value", func() { - var nilSlice []int - Ω(nilSlice).Should(HaveLen(0)) - - var nilMap map[int]string - Ω(nilMap).Should(HaveLen(0)) - }) - }) - - Context("when passed an unsupported type", func() { - It("should error", func() { - success, err := (&HaveLenMatcher{Count: 0}).Match(0) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&HaveLenMatcher{Count: 0}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/kit/github.com/onsi/gomega/matchers/have_occurred_matcher.go deleted file mode 100644 index 7bcea7d..0000000 --- a/kit/github.com/onsi/gomega/matchers/have_occurred_matcher.go +++ /dev/null @@ -1,29 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" -) - -type HaveOccurredMatcher struct { -} - -func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) { - if actual == nil { - return false, nil - } - - if isError(actual) { - return true, nil - } - - return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1)) -} - -func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Expected an error to have occured. Got:\n%s", format.Object(actual, 1)) -} - -func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Expected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "not to have occurred") -} diff --git a/kit/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go b/kit/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go deleted file mode 100644 index 78b2285..0000000 --- a/kit/github.com/onsi/gomega/matchers/have_occurred_matcher_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package matchers_test - -import ( - "errors" - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("HaveOccurred", func() { - It("should succeed if matching an error", func() { - Ω(errors.New("Foo")).Should(HaveOccurred()) - }) - - It("should not succeed with nil", func() { - Ω(nil).ShouldNot(HaveOccurred()) - }) - - It("should only support errors and nil", func() { - success, err := (&HaveOccurredMatcher{}).Match("foo") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&HaveOccurredMatcher{}).Match("") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/match_error_matcher.go b/kit/github.com/onsi/gomega/matchers/match_error_matcher.go deleted file mode 100644 index 58d7cd1..0000000 --- a/kit/github.com/onsi/gomega/matchers/match_error_matcher.go +++ /dev/null @@ -1,41 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type MatchErrorMatcher struct { - Expected interface{} -} - -func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) { - if isNil(actual) { - return false, fmt.Errorf("Expected an error, got nil") - } - - if !isError(actual) { - return false, fmt.Errorf("Expected an error. Got:\n%s", format.Object(actual, 1)) - } - - actualErr := actual.(error) - - if isString(matcher.Expected) { - return reflect.DeepEqual(actualErr.Error(), matcher.Expected), nil - } - - if isError(matcher.Expected) { - return reflect.DeepEqual(actualErr, matcher.Expected), nil - } - - return false, fmt.Errorf("MatchError must be passed an error or string. Got:\n%s", format.Object(matcher.Expected, 1)) -} - -func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to match error", matcher.Expected) -} - -func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to match error", matcher.Expected) -} diff --git a/kit/github.com/onsi/gomega/matchers/match_error_matcher_test.go b/kit/github.com/onsi/gomega/matchers/match_error_matcher_test.go deleted file mode 100644 index c04b0b8..0000000 --- a/kit/github.com/onsi/gomega/matchers/match_error_matcher_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package matchers_test - -import ( - "errors" - "fmt" - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -type CustomError struct { -} - -func (c CustomError) Error() string { - return "an error" -} - -var _ = Describe("MatchErrorMatcher", func() { - Context("When asserting against an error", func() { - It("should succeed when matching with an error", func() { - err := errors.New("an error") - fmtErr := fmt.Errorf("an error") - customErr := CustomError{} - - Ω(err).Should(MatchError(errors.New("an error"))) - Ω(err).ShouldNot(MatchError(errors.New("another error"))) - - Ω(fmtErr).Should(MatchError(errors.New("an error"))) - Ω(customErr).Should(MatchError(CustomError{})) - }) - - It("should succeed when matching with a string", func() { - err := errors.New("an error") - fmtErr := fmt.Errorf("an error") - customErr := CustomError{} - - Ω(err).Should(MatchError("an error")) - Ω(err).ShouldNot(MatchError("another error")) - - Ω(fmtErr).Should(MatchError("an error")) - Ω(customErr).Should(MatchError("an error")) - }) - - It("should fail when passed anything else", func() { - actualErr := errors.New("an error") - _, err := (&MatchErrorMatcher{ - Expected: []byte("an error"), - }).Match(actualErr) - Ω(err).Should(HaveOccurred()) - - _, err = (&MatchErrorMatcher{ - Expected: 3, - }).Match(actualErr) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when passed nil", func() { - It("should fail", func() { - _, err := (&MatchErrorMatcher{ - Expected: "an error", - }).Match(nil) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when passed a non-error", func() { - It("should fail", func() { - _, err := (&MatchErrorMatcher{ - Expected: "an error", - }).Match("an error") - Ω(err).Should(HaveOccurred()) - - _, err = (&MatchErrorMatcher{ - Expected: "an error", - }).Match(3) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/match_json_matcher.go b/kit/github.com/onsi/gomega/matchers/match_json_matcher.go deleted file mode 100644 index c9f7072..0000000 --- a/kit/github.com/onsi/gomega/matchers/match_json_matcher.go +++ /dev/null @@ -1,61 +0,0 @@ -package matchers - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type MatchJSONMatcher struct { - JSONToMatch interface{} -} - -func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) { - actualString, expectedString, err := matcher.prettyPrint(actual) - if err != nil { - return false, err - } - - var aval interface{} - var eval interface{} - - // this is guarded by prettyPrint - json.Unmarshal([]byte(actualString), &aval) - json.Unmarshal([]byte(expectedString), &eval) - - return reflect.DeepEqual(aval, eval), nil -} - -func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) { - actualString, expectedString, _ := matcher.prettyPrint(actual) - return format.Message(actualString, "to match JSON of", expectedString) -} - -func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) { - actualString, expectedString, _ := matcher.prettyPrint(actual) - return format.Message(actualString, "not to match JSON of", expectedString) -} - -func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) { - actualString, aok := toString(actual) - expectedString, eok := toString(matcher.JSONToMatch) - - if !(aok && eok) { - return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) - } - - abuf := new(bytes.Buffer) - ebuf := new(bytes.Buffer) - - if err := json.Indent(abuf, []byte(actualString), "", " "); err != nil { - return "", "", err - } - - if err := json.Indent(ebuf, []byte(expectedString), "", " "); err != nil { - return "", "", err - } - - return actualString, expectedString, nil -} diff --git a/kit/github.com/onsi/gomega/matchers/match_json_matcher_test.go b/kit/github.com/onsi/gomega/matchers/match_json_matcher_test.go deleted file mode 100644 index cd09c91..0000000 --- a/kit/github.com/onsi/gomega/matchers/match_json_matcher_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("MatchJSONMatcher", func() { - Context("When passed stringifiables", func() { - It("should succeed if the JSON matches", func() { - Ω("{}").Should(MatchJSON("{}")) - Ω(`{"a":1}`).Should(MatchJSON(`{"a":1}`)) - Ω(`{ - "a":1 - }`).Should(MatchJSON(`{"a":1}`)) - Ω(`{"a":1, "b":2}`).Should(MatchJSON(`{"b":2, "a":1}`)) - Ω(`{"a":1}`).ShouldNot(MatchJSON(`{"b":2, "a":1}`)) - }) - - It("should work with byte arrays", func() { - Ω([]byte("{}")).Should(MatchJSON([]byte("{}"))) - Ω("{}").Should(MatchJSON([]byte("{}"))) - Ω([]byte("{}")).Should(MatchJSON("{}")) - }) - }) - - Context("when either side is not valid JSON", func() { - It("should error", func() { - success, err := (&MatchJSONMatcher{JSONToMatch: `oops`}).Match(`{}`) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&MatchJSONMatcher{JSONToMatch: `{}`}).Match(`oops`) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when either side is neither a string nor a stringer", func() { - It("should error", func() { - success, err := (&MatchJSONMatcher{JSONToMatch: "{}"}).Match(2) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&MatchJSONMatcher{JSONToMatch: 2}).Match("{}") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&MatchJSONMatcher{JSONToMatch: nil}).Match("{}") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&MatchJSONMatcher{JSONToMatch: 2}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/kit/github.com/onsi/gomega/matchers/match_regexp_matcher.go deleted file mode 100644 index 215d214..0000000 --- a/kit/github.com/onsi/gomega/matchers/match_regexp_matcher.go +++ /dev/null @@ -1,42 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "regexp" -) - -type MatchRegexpMatcher struct { - Regexp string - Args []interface{} -} - -func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) { - actualString, ok := toString(actual) - if !ok { - return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1)) - } - - match, err := regexp.Match(matcher.regexp(), []byte(actualString)) - if err != nil { - return false, fmt.Errorf("RegExp match failed to compile with error:\n\t%s", err.Error()) - } - - return match, nil -} - -func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to match regular expression", matcher.regexp()) -} - -func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to match regular expression", matcher.regexp()) -} - -func (matcher *MatchRegexpMatcher) regexp() string { - re := matcher.Regexp - if len(matcher.Args) > 0 { - re = fmt.Sprintf(matcher.Regexp, matcher.Args...) - } - return re -} diff --git a/kit/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go b/kit/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go deleted file mode 100644 index 8912be6..0000000 --- a/kit/github.com/onsi/gomega/matchers/match_regexp_matcher_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("MatchRegexp", func() { - Context("when actual is a string", func() { - It("should match against the string", func() { - Ω(" a2!bla").Should(MatchRegexp(`\d!`)) - Ω(" a2!bla").ShouldNot(MatchRegexp(`[A-Z]`)) - }) - }) - - Context("when actual is a stringer", func() { - It("should call the stringer and match agains the returned string", func() { - Ω(&myStringer{a: "Abc3"}).Should(MatchRegexp(`[A-Z][a-z]+\d`)) - }) - }) - - Context("when the matcher is called with multiple arguments", func() { - It("should pass the string and arguments to sprintf", func() { - Ω(" a23!bla").Should(MatchRegexp(`\d%d!`, 3)) - }) - }) - - Context("when actual is neither a string nor a stringer", func() { - It("should error", func() { - success, err := (&MatchRegexpMatcher{Regexp: `\d`}).Match(2) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when the passed in regexp fails to compile", func() { - It("should error", func() { - success, err := (&MatchRegexpMatcher{Regexp: "("}).Match("Foo") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go b/kit/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go deleted file mode 100644 index a6d5e0f..0000000 --- a/kit/github.com/onsi/gomega/matchers/matcher_tests_suite_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package matchers_test - -import ( - "testing" - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" -) - -type myStringer struct { - a string -} - -func (s *myStringer) String() string { - return s.a -} - -type StringAlias string - -type myCustomType struct { - s string - n int - f float32 - arr []string -} - -func Test(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Gomega") -} diff --git a/kit/github.com/onsi/gomega/matchers/panic_matcher.go b/kit/github.com/onsi/gomega/matchers/panic_matcher.go deleted file mode 100644 index 1d8a3b6..0000000 --- a/kit/github.com/onsi/gomega/matchers/panic_matcher.go +++ /dev/null @@ -1,42 +0,0 @@ -package matchers - -import ( - "fmt" - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" - "reflect" -) - -type PanicMatcher struct{} - -func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { - if actual == nil { - return false, fmt.Errorf("PanicMatcher expects a non-nil actual.") - } - - actualType := reflect.TypeOf(actual) - if actualType.Kind() != reflect.Func { - return false, fmt.Errorf("PanicMatcher expects a function. Got:\n%s", format.Object(actual, 1)) - } - if !(actualType.NumIn() == 0 && actualType.NumOut() == 0) { - return false, fmt.Errorf("PanicMatcher expects a function with no arguments and no return value. Got:\n%s", format.Object(actual, 1)) - } - - success = false - defer func() { - if e := recover(); e != nil { - success = true - } - }() - - reflect.ValueOf(actual).Call([]reflect.Value{}) - - return -} - -func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to panic") -} - -func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to panic") -} diff --git a/kit/github.com/onsi/gomega/matchers/panic_matcher_test.go b/kit/github.com/onsi/gomega/matchers/panic_matcher_test.go deleted file mode 100644 index e0ef3e8..0000000 --- a/kit/github.com/onsi/gomega/matchers/panic_matcher_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package matchers_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -var _ = Describe("Panic", func() { - Context("when passed something that's not a function that takes zero arguments and returns nothing", func() { - It("should error", func() { - success, err := (&PanicMatcher{}).Match("foo") - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&PanicMatcher{}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&PanicMatcher{}).Match(func(foo string) {}) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&PanicMatcher{}).Match(func() string { return "bar" }) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when passed a function of the correct type", func() { - It("should call the function and pass if the function panics", func() { - Ω(func() { panic("ack!") }).Should(Panic()) - Ω(func() {}).ShouldNot(Panic()) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/receive_matcher.go b/kit/github.com/onsi/gomega/matchers/receive_matcher.go deleted file mode 100644 index 47a6c47..0000000 --- a/kit/github.com/onsi/gomega/matchers/receive_matcher.go +++ /dev/null @@ -1,116 +0,0 @@ -package matchers - -import ( - "fmt" - "reflect" - - "github.com/gocircuit/escher/kit/github.com/onsi/gomega/format" -) - -type ReceiveMatcher struct { - Arg interface{} - receivedValue reflect.Value - channelClosed bool -} - -func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) { - if !isChan(actual) { - return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1)) - } - - channelType := reflect.TypeOf(actual) - channelValue := reflect.ValueOf(actual) - - if channelType.ChanDir() == reflect.SendDir { - return false, fmt.Errorf("ReceiveMatcher matcher cannot be passed a send-only channel. Got:\n%s", format.Object(actual, 1)) - } - - var subMatcher omegaMatcher - var hasSubMatcher bool - - if matcher.Arg != nil { - subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) - if !hasSubMatcher { - argType := reflect.TypeOf(matcher.Arg) - if argType.Kind() != reflect.Ptr { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) - } - - assignable := channelType.Elem().AssignableTo(argType.Elem()) - if !assignable { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(matcher.Arg, 1)) - } - } - } - - winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ - reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue}, - reflect.SelectCase{Dir: reflect.SelectDefault}, - }) - - var closed bool - var didReceive bool - if winnerIndex == 0 { - closed = !open - didReceive = open - } - matcher.channelClosed = closed - - if closed { - return false, fmt.Errorf("ReceiveMatcher was given a closed channel:\n%s", format.Object(actual, 1)) - } - - if hasSubMatcher { - if didReceive { - matcher.receivedValue = value - return subMatcher.Match(matcher.receivedValue.Interface()) - } else { - return false, nil - } - } - - if didReceive { - if matcher.Arg != nil { - outValue := reflect.ValueOf(matcher.Arg) - reflect.Indirect(outValue).Set(value) - } - - return true, nil - } else { - return false, nil - } -} - -func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) - - if hasSubMatcher { - if matcher.receivedValue.IsValid() { - return subMatcher.FailureMessage(matcher.receivedValue.Interface()) - } - return "When passed a matcher, ReceiveMatcher's channel *must* receive something." - } else { - return format.Message(actual, "to receive something") - } -} - -func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) - - if hasSubMatcher { - if matcher.receivedValue.IsValid() { - return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface()) - } - return "When passed a matcher, ReceiveMatcher's channel *must* receive something." - } else { - return format.Message(actual, "not to receive anything") - } -} - -func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { - if !isChan(actual) { - return false - } - - return !matcher.channelClosed -} diff --git a/kit/github.com/onsi/gomega/matchers/receive_matcher_test.go b/kit/github.com/onsi/gomega/matchers/receive_matcher_test.go deleted file mode 100644 index 59c187f..0000000 --- a/kit/github.com/onsi/gomega/matchers/receive_matcher_test.go +++ /dev/null @@ -1,284 +0,0 @@ -package matchers_test - -import ( - "time" - . "github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers" -) - -type kungFuActor interface { - DrunkenMaster() bool -} - -type jackie struct { - name string -} - -func (j *jackie) DrunkenMaster() bool { - return true -} - -var _ = Describe("ReceiveMatcher", func() { - Context("with no argument", func() { - Context("for a buffered channel", func() { - It("should succeed", func() { - channel := make(chan bool, 1) - - Ω(channel).ShouldNot(Receive()) - - channel <- true - - Ω(channel).Should(Receive()) - }) - }) - - Context("for an unbuffered channel", func() { - It("should succeed (eventually)", func() { - channel := make(chan bool) - - Ω(channel).ShouldNot(Receive()) - - go func() { - time.Sleep(10 * time.Millisecond) - channel <- true - }() - - Eventually(channel).Should(Receive()) - }) - }) - }) - - Context("with a pointer argument", func() { - Context("of the correct type", func() { - It("should write the value received on the channel to the pointer", func() { - channel := make(chan int, 1) - - var value int - - Ω(channel).ShouldNot(Receive(&value)) - Ω(value).Should(BeZero()) - - channel <- 17 - - Ω(channel).Should(Receive(&value)) - Ω(value).Should(Equal(17)) - }) - }) - - Context("to various types of objects", func() { - It("should work", func() { - //channels of strings - stringChan := make(chan string, 1) - stringChan <- "foo" - - var s string - Ω(stringChan).Should(Receive(&s)) - Ω(s).Should(Equal("foo")) - - //channels of slices - sliceChan := make(chan []bool, 1) - sliceChan <- []bool{true, true, false} - - var sl []bool - Ω(sliceChan).Should(Receive(&sl)) - Ω(sl).Should(Equal([]bool{true, true, false})) - - //channels of channels - chanChan := make(chan chan bool, 1) - c := make(chan bool) - chanChan <- c - - var receivedC chan bool - Ω(chanChan).Should(Receive(&receivedC)) - Ω(receivedC).Should(Equal(c)) - - //channels of interfaces - jackieChan := make(chan kungFuActor, 1) - aJackie := &jackie{name: "Jackie Chan"} - jackieChan <- aJackie - - var theJackie kungFuActor - Ω(jackieChan).Should(Receive(&theJackie)) - Ω(theJackie).Should(Equal(aJackie)) - }) - }) - - Context("of the wrong type", func() { - It("should error", func() { - channel := make(chan int) - var incorrectType bool - - success, err := (&ReceiveMatcher{Arg: &incorrectType}).Match(channel) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - var notAPointer int - success, err = (&ReceiveMatcher{Arg: notAPointer}).Match(channel) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - }) - - Context("with a matcher", func() { - It("should defer to the underlying matcher", func() { - intChannel := make(chan int, 1) - intChannel <- 3 - Ω(intChannel).Should(Receive(Equal(3))) - - intChannel <- 2 - Ω(intChannel).ShouldNot(Receive(Equal(3))) - - stringChannel := make(chan []string, 1) - stringChannel <- []string{"foo", "bar", "baz"} - Ω(stringChannel).Should(Receive(ContainElement(ContainSubstring("fo")))) - - stringChannel <- []string{"foo", "bar", "baz"} - Ω(stringChannel).ShouldNot(Receive(ContainElement(ContainSubstring("archipelago")))) - }) - - It("should defer to the underlying matcher for the message", func() { - matcher := Receive(Equal(3)) - channel := make(chan int, 1) - channel <- 2 - matcher.Match(channel) - Ω(matcher.FailureMessage(channel)).Should(MatchRegexp(`Expected\s+: 2\s+to equal\s+: 3`)) - - channel <- 3 - matcher.Match(channel) - Ω(matcher.NegatedFailureMessage(channel)).Should(MatchRegexp(`Expected\s+: 3\s+not to equal\s+: 3`)) - }) - - It("should work just fine with Eventually", func() { - stringChannel := make(chan string) - - go func() { - time.Sleep(5 * time.Millisecond) - stringChannel <- "A" - time.Sleep(5 * time.Millisecond) - stringChannel <- "B" - }() - - Eventually(stringChannel).Should(Receive(Equal("B"))) - }) - - Context("if the matcher errors", func() { - It("should error", func() { - channel := make(chan int, 1) - channel <- 3 - success, err := (&ReceiveMatcher{Arg: ContainSubstring("three")}).Match(channel) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("if nothing is received", func() { - It("should fail", func() { - channel := make(chan int, 1) - success, err := (&ReceiveMatcher{Arg: Equal(1)}).Match(channel) - Ω(success).Should(BeFalse()) - Ω(err).ShouldNot(HaveOccurred()) - }) - }) - }) - - Context("When actual is a *closed* channel", func() { - Context("for a buffered channel", func() { - It("should work until it hits the end of the buffer", func() { - channel := make(chan bool, 1) - channel <- true - - close(channel) - - Ω(channel).Should(Receive()) - - success, err := (&ReceiveMatcher{}).Match(channel) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("for an unbuffered channel", func() { - It("should error", func() { - channel := make(chan bool) - close(channel) - - success, err := (&ReceiveMatcher{}).Match(channel) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - }) - - Context("When actual is a send-only channel", func() { - It("should error", func() { - channel := make(chan bool) - - var writerChannel chan<- bool - writerChannel = channel - - success, err := (&ReceiveMatcher{}).Match(writerChannel) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Context("when acutal is a non-channel", func() { - It("should error", func() { - var nilChannel chan bool - - success, err := (&ReceiveMatcher{}).Match(nilChannel) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&ReceiveMatcher{}).Match(nil) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - - success, err = (&ReceiveMatcher{}).Match(3) - Ω(success).Should(BeFalse()) - Ω(err).Should(HaveOccurred()) - }) - }) - - Describe("when used with eventually and a custom matcher", func() { - It("should return the matcher's error when a failing value is received on the channel, instead of the must receive something failure", func() { - failures := InterceptGomegaFailures(func() { - c := make(chan string, 0) - Eventually(c, 0.01).Should(Receive(Equal("hello"))) - }) - Ω(failures[0]).Should(ContainSubstring("When passed a matcher, ReceiveMatcher's channel *must* receive something.")) - - failures = InterceptGomegaFailures(func() { - c := make(chan string, 1) - c <- "hi" - Eventually(c, 0.01).Should(Receive(Equal("hello"))) - }) - Ω(failures[0]).Should(ContainSubstring(": hello")) - }) - }) - - Describe("Bailing early", func() { - It("should bail early when passed a closed channel", func() { - c := make(chan bool) - close(c) - - t := time.Now() - failures := InterceptGomegaFailures(func() { - Eventually(c).Should(Receive()) - }) - Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) - Ω(failures).Should(HaveLen(1)) - }) - - It("should bail early when passed a non-channel", func() { - t := time.Now() - failures := InterceptGomegaFailures(func() { - Eventually(3).Should(Receive()) - }) - Ω(time.Since(t)).Should(BeNumerically("<", 500*time.Millisecond)) - Ω(failures).Should(HaveLen(1)) - }) - }) -}) diff --git a/kit/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE b/kit/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE deleted file mode 100644 index 8edd817..0000000 --- a/kit/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2014 Amit Kumar Gupta - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/kit/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/kit/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go deleted file mode 100644 index f98675b..0000000 --- a/kit/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go +++ /dev/null @@ -1,41 +0,0 @@ -package bipartitegraph - -import "errors" -import "fmt" - -import . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers/support/goraph/node" -import . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers/support/goraph/edge" - -type BipartiteGraph struct { - Left NodeOrderedSet - Right NodeOrderedSet - Edges EdgeSet -} - -func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { - left := NodeOrderedSet{} - for i, _ := range leftValues { - left = append(left, Node{i}) - } - - right := NodeOrderedSet{} - for j, _ := range rightValues { - right = append(right, Node{j + len(left)}) - } - - edges := EdgeSet{} - for i, leftValue := range leftValues { - for j, rightValue := range rightValues { - neighbours, err := neighbours(leftValue, rightValue) - if err != nil { - return nil, errors.New(fmt.Sprintf("error determining adjacency for %v and %v: %s", leftValue, rightValue, err.Error())) - } - - if neighbours { - edges = append(edges, Edge{left[i], right[j]}) - } - } - } - - return &BipartiteGraph{left, right, edges}, nil -} diff --git a/kit/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/kit/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go deleted file mode 100644 index 85ed169..0000000 --- a/kit/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go +++ /dev/null @@ -1,161 +0,0 @@ -package bipartitegraph - -import . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers/support/goraph/node" -import . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers/support/goraph/edge" -import "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers/support/goraph/util" - -func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) { - paths := bg.maximalDisjointSLAPCollection(matching) - - for len(paths) > 0 { - for _, path := range paths { - matching = matching.SymmetricDifference(path) - } - paths = bg.maximalDisjointSLAPCollection(matching) - } - - return -} - -func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (result []EdgeSet) { - guideLayers := bg.createSLAPGuideLayers(matching) - if len(guideLayers) == 0 { - return - } - - used := make(map[Node]bool) - - for _, u := range guideLayers[len(guideLayers)-1] { - slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used) - if found { - for _, edge := range slap { - used[edge.Node1] = true - used[edge.Node2] = true - } - result = append(result, slap) - } - } - - return -} - -func (bg *BipartiteGraph) findDisjointSLAP( - start Node, - matching EdgeSet, - guideLayers []NodeOrderedSet, - used map[Node]bool, -) ([]Edge, bool) { - return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used) -} - -func (bg *BipartiteGraph) findDisjointSLAPHelper( - currentNode Node, - currentSLAP EdgeSet, - currentLevel int, - matching EdgeSet, - guideLayers []NodeOrderedSet, - used map[Node]bool, -) (EdgeSet, bool) { - used[currentNode] = true - - if currentLevel == 0 { - return currentSLAP, true - } - - for _, nextNode := range guideLayers[currentLevel-1] { - if used[nextNode] { - continue - } - - edge, found := bg.Edges.FindByNodes(currentNode, nextNode) - if !found { - continue - } - - if matching.Contains(edge) == util.Odd(currentLevel) { - continue - } - - currentSLAP = append(currentSLAP, edge) - slap, found := bg.findDisjointSLAPHelper(nextNode, currentSLAP, currentLevel-1, matching, guideLayers, used) - if found { - return slap, true - } - currentSLAP = currentSLAP[:len(currentSLAP)-1] - } - - used[currentNode] = false - return nil, false -} - -func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) { - used := make(map[Node]bool) - currentLayer := NodeOrderedSet{} - - for _, node := range bg.Left { - if matching.Free(node) { - used[node] = true - currentLayer = append(currentLayer, node) - } - } - - if len(currentLayer) == 0 { - return []NodeOrderedSet{} - } else { - guideLayers = append(guideLayers, currentLayer) - } - - done := false - - for !done { - lastLayer := currentLayer - currentLayer = NodeOrderedSet{} - - if util.Odd(len(guideLayers)) { - for _, leftNode := range lastLayer { - for _, rightNode := range bg.Right { - if used[rightNode] { - continue - } - - edge, found := bg.Edges.FindByNodes(leftNode, rightNode) - if !found || matching.Contains(edge) { - continue - } - - currentLayer = append(currentLayer, rightNode) - used[rightNode] = true - - if matching.Free(rightNode) { - done = true - } - } - } - } else { - for _, rightNode := range lastLayer { - for _, leftNode := range bg.Left { - if used[leftNode] { - continue - } - - edge, found := bg.Edges.FindByNodes(leftNode, rightNode) - if !found || !matching.Contains(edge) { - continue - } - - currentLayer = append(currentLayer, leftNode) - used[leftNode] = true - } - } - - } - - if len(currentLayer) == 0 { - return []NodeOrderedSet{} - } else { - guideLayers = append(guideLayers, currentLayer) - } - } - - return -} diff --git a/kit/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/kit/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go deleted file mode 100644 index b02d481..0000000 --- a/kit/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go +++ /dev/null @@ -1,61 +0,0 @@ -package edge - -import . "github.com/gocircuit/escher/kit/github.com/onsi/gomega/matchers/support/goraph/node" - -type Edge struct { - Node1 Node - Node2 Node -} - -type EdgeSet []Edge - -func (ec EdgeSet) Free(node Node) bool { - for _, e := range ec { - if e.Node1 == node || e.Node2 == node { - return false - } - } - - return true -} - -func (ec EdgeSet) Contains(edge Edge) bool { - for _, e := range ec { - if e == edge { - return true - } - } - - return false -} - -func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) { - for _, e := range ec { - if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) { - return e, true - } - } - - return Edge{}, false -} - -func (ec EdgeSet) SymmetricDifference(ec2 EdgeSet) EdgeSet { - edgesToInclude := make(map[Edge]bool) - - for _, e := range ec { - edgesToInclude[e] = true - } - - for _, e := range ec2 { - edgesToInclude[e] = !edgesToInclude[e] - } - - result := EdgeSet{} - for e, include := range edgesToInclude { - if include { - result = append(result, e) - } - } - - return result -} diff --git a/kit/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/kit/github.com/onsi/gomega/matchers/support/goraph/node/node.go deleted file mode 100644 index 800c2ea..0000000 --- a/kit/github.com/onsi/gomega/matchers/support/goraph/node/node.go +++ /dev/null @@ -1,7 +0,0 @@ -package node - -type Node struct { - Id int -} - -type NodeOrderedSet []Node diff --git a/kit/github.com/onsi/gomega/matchers/support/goraph/util/util.go b/kit/github.com/onsi/gomega/matchers/support/goraph/util/util.go deleted file mode 100644 index a24cd27..0000000 --- a/kit/github.com/onsi/gomega/matchers/support/goraph/util/util.go +++ /dev/null @@ -1,7 +0,0 @@ -package util - -import "math" - -func Odd(n int) bool { - return math.Mod(float64(n), 2.0) == 1.0 -} diff --git a/kit/github.com/onsi/gomega/matchers/type_support.go b/kit/github.com/onsi/gomega/matchers/type_support.go deleted file mode 100644 index ef9b448..0000000 --- a/kit/github.com/onsi/gomega/matchers/type_support.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Gomega matchers - -This package implements the Gomega matchers and does not typically need to be imported. -See the docs for Gomega for documentation on the matchers - -http://onsi.github.io/gomega/ -*/ -package matchers - -import ( - "fmt" - "reflect" -) - -type omegaMatcher interface { - Match(actual interface{}) (success bool, err error) - FailureMessage(actual interface{}) (message string) - NegatedFailureMessage(actual interface{}) (message string) -} - -func isBool(a interface{}) bool { - return reflect.TypeOf(a).Kind() == reflect.Bool -} - -func isNumber(a interface{}) bool { - if a == nil { - return false - } - kind := reflect.TypeOf(a).Kind() - return reflect.Int <= kind && kind <= reflect.Float64 -} - -func isInteger(a interface{}) bool { - kind := reflect.TypeOf(a).Kind() - return reflect.Int <= kind && kind <= reflect.Int64 -} - -func isUnsignedInteger(a interface{}) bool { - kind := reflect.TypeOf(a).Kind() - return reflect.Uint <= kind && kind <= reflect.Uint64 -} - -func isFloat(a interface{}) bool { - kind := reflect.TypeOf(a).Kind() - return reflect.Float32 <= kind && kind <= reflect.Float64 -} - -func toInteger(a interface{}) int64 { - if isInteger(a) { - return reflect.ValueOf(a).Int() - } else if isUnsignedInteger(a) { - return int64(reflect.ValueOf(a).Uint()) - } else if isFloat(a) { - return int64(reflect.ValueOf(a).Float()) - } else { - panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) - } -} - -func toUnsignedInteger(a interface{}) uint64 { - if isInteger(a) { - return uint64(reflect.ValueOf(a).Int()) - } else if isUnsignedInteger(a) { - return reflect.ValueOf(a).Uint() - } else if isFloat(a) { - return uint64(reflect.ValueOf(a).Float()) - } else { - panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) - } -} - -func toFloat(a interface{}) float64 { - if isInteger(a) { - return float64(reflect.ValueOf(a).Int()) - } else if isUnsignedInteger(a) { - return float64(reflect.ValueOf(a).Uint()) - } else if isFloat(a) { - return reflect.ValueOf(a).Float() - } else { - panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) - } -} - -func isError(a interface{}) bool { - _, ok := a.(error) - return ok -} - -func isChan(a interface{}) bool { - if isNil(a) { - return false - } - return reflect.TypeOf(a).Kind() == reflect.Chan -} - -func isMap(a interface{}) bool { - if a == nil { - return false - } - return reflect.TypeOf(a).Kind() == reflect.Map -} - -func isArrayOrSlice(a interface{}) bool { - if a == nil { - return false - } - switch reflect.TypeOf(a).Kind() { - case reflect.Array, reflect.Slice: - return true - default: - return false - } -} - -func isString(a interface{}) bool { - if a == nil { - return false - } - return reflect.TypeOf(a).Kind() == reflect.String -} - -func toString(a interface{}) (string, bool) { - aString, isString := a.(string) - if isString { - return aString, true - } - - aBytes, isBytes := a.([]byte) - if isBytes { - return string(aBytes), true - } - - aStringer, isStringer := a.(fmt.Stringer) - if isStringer { - return aStringer.String(), true - } - - return "", false -} - -func lengthOf(a interface{}) (int, bool) { - if a == nil { - return 0, false - } - switch reflect.TypeOf(a).Kind() { - case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice: - return reflect.ValueOf(a).Len(), true - default: - return 0, false - } -} - -func isNil(a interface{}) bool { - if a == nil { - return true - } - - switch reflect.TypeOf(a).Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return reflect.ValueOf(a).IsNil() - } - - return false -} diff --git a/kit/github.com/onsi/gomega/types/types.go b/kit/github.com/onsi/gomega/types/types.go deleted file mode 100644 index 1c632ad..0000000 --- a/kit/github.com/onsi/gomega/types/types.go +++ /dev/null @@ -1,17 +0,0 @@ -package types - -type GomegaFailHandler func(message string, callerSkip ...int) - -//A simple *testing.T interface wrapper -type GomegaTestingT interface { - Errorf(format string, args ...interface{}) -} - -//All Gomega matchers must implement the GomegaMatcher interface -// -//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding_your_own_matchers -type GomegaMatcher interface { - Match(actual interface{}) (success bool, err error) - FailureMessage(actual interface{}) (message string) - NegatedFailureMessage(actual interface{}) (message string) -} diff --git a/kit/github.com/syndtr/goleveldb/.gitignore b/kit/github.com/syndtr/goleveldb/.gitignore deleted file mode 100644 index 8529853..0000000 --- a/kit/github.com/syndtr/goleveldb/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.git.genus diff --git a/kit/github.com/syndtr/goleveldb/.travis.yml b/kit/github.com/syndtr/goleveldb/.travis.yml deleted file mode 100644 index 65d1305..0000000 --- a/kit/github.com/syndtr/goleveldb/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.2 - - 1.3 - - tip - -script: go test ./... \ No newline at end of file diff --git a/kit/github.com/syndtr/goleveldb/LICENSE b/kit/github.com/syndtr/goleveldb/LICENSE deleted file mode 100644 index 4a772d1..0000000 --- a/kit/github.com/syndtr/goleveldb/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2012 Suryandaru Triandana -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/kit/github.com/syndtr/goleveldb/README.md b/kit/github.com/syndtr/goleveldb/README.md deleted file mode 100644 index 007b5f3..0000000 --- a/kit/github.com/syndtr/goleveldb/README.md +++ /dev/null @@ -1,93 +0,0 @@ -This is an implementation of the [LevelDB key/value database](http:code.google.com/p/leveldb) in the [Go programming language](http:golang.org). - -[![Build Status](https://travis-ci.org/syndtr/goleveldb.png?branch=master)](https://travis-ci.org/syndtr/goleveldb) - -Installation ------------ - - go get github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb - -Requirements ------------ - -* Need at least `go1.2` or newer. - -Usage ------------ - -Create or open a database: - - db, err := leveldb.OpenFile("path/to/db", nil) - ... - defer db.Close() - ... - -Read or modify the database content: - - // Remember that the contents of the returned slice should not be modified. - data, err := db.Get([]byte("key"), nil) - ... - err = db.Put([]byte("key"), []byte("value"), nil) - ... - err = db.Delete([]byte("key"), nil) - ... - -Iterate over database content: - - iter := db.NewIterator(nil, nil) - for iter.Next() { - // Remember that the contents of the returned slice should not be modified, and - // only valid until the next call to Next. - key := iter.Key() - value := iter.Value() - ... - } - iter.Release() - err = iter.Error() - ... - -Seek-then-Iterate: - - iter := db.NewIterator(nil, nil) - for ok := iter.Seek(key); ok; ok = iter.Next() { - // Use key/value. - ... - } - iter.Release() - err = iter.Error() - ... - -Iterate over subset of database content: - - iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) - for iter.Next() { - // Use key/value. - ... - } - iter.Release() - err = iter.Error() - ... - -Batch writes: - - batch := new(leveldb.Batch) - batch.Put([]byte("foo"), []byte("value")) - batch.Put([]byte("bar"), []byte("another value")) - batch.Delete([]byte("baz")) - err = db.Write(batch, nil) - ... - -Use bloom filter: - - o := &opt.Options{ - Filter: filter.NewBloomFilter(10), - } - db, err := leveldb.OpenFile("path/to/db", o) - ... - defer db.Close() - ... - -Documentation ------------ - -You can read package documentation [here](http:godoc.org/github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb). diff --git a/kit/github.com/syndtr/goleveldb/leveldb/batch.go b/kit/github.com/syndtr/goleveldb/leveldb/batch.go deleted file mode 100644 index 2039e58..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/batch.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "errors" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/memdb" -) - -var ( - errBatchTooShort = errors.New("leveldb: batch is too short") - errBatchBadRecord = errors.New("leveldb: bad record in batch") -) - -const kBatchHdrLen = 8 + 4 - -type batchReplay interface { - put(key, value []byte, seq uint64) - delete(key []byte, seq uint64) -} - -// Batch is a write batch. -type Batch struct { - buf []byte - rLen, bLen int - seq uint64 - sync bool -} - -func (b *Batch) grow(n int) { - off := len(b.buf) - if off == 0 { - // include headers - off = kBatchHdrLen - n += off - } - if cap(b.buf)-off >= n { - return - } - buf := make([]byte, 2*cap(b.buf)+n) - copy(buf, b.buf) - b.buf = buf[:off] -} - -func (b *Batch) appendRec(t vType, key, value []byte) { - n := 1 + binary.MaxVarintLen32 + len(key) - if t == tVal { - n += binary.MaxVarintLen32 + len(value) - } - b.grow(n) - off := len(b.buf) - buf := b.buf[:off+n] - buf[off] = byte(t) - off += 1 - off += binary.PutUvarint(buf[off:], uint64(len(key))) - copy(buf[off:], key) - off += len(key) - if t == tVal { - off += binary.PutUvarint(buf[off:], uint64(len(value))) - copy(buf[off:], value) - off += len(value) - } - b.buf = buf[:off] - b.rLen++ - // Include 8-byte ikey header - b.bLen += len(key) + len(value) + 8 -} - -// Put appends 'put operation' of the given key/value pair to the batch. -// It is safe to modify the contents of the argument after Put returns. -func (b *Batch) Put(key, value []byte) { - b.appendRec(tVal, key, value) -} - -// Delete appends 'delete operation' of the given key to the batch. -// It is safe to modify the contents of the argument after Delete returns. -func (b *Batch) Delete(key []byte) { - b.appendRec(tDel, key, nil) -} - -// Reset resets the batch. -func (b *Batch) Reset() { - b.buf = nil - b.seq = 0 - b.rLen = 0 - b.bLen = 0 - b.sync = false -} - -func (b *Batch) init(sync bool) { - b.sync = sync -} - -func (b *Batch) put(key, value []byte, seq uint64) { - if b.rLen == 0 { - b.seq = seq - } - b.Put(key, value) -} - -func (b *Batch) delete(key []byte, seq uint64) { - if b.rLen == 0 { - b.seq = seq - } - b.Delete(key) -} - -func (b *Batch) append(p *Batch) { - if p.rLen > 0 { - b.grow(len(p.buf) - kBatchHdrLen) - b.buf = append(b.buf, p.buf[kBatchHdrLen:]...) - b.rLen += p.rLen - } - if p.sync { - b.sync = true - } -} - -func (b *Batch) len() int { - return b.rLen -} - -func (b *Batch) size() int { - return b.bLen -} - -func (b *Batch) encode() []byte { - b.grow(0) - binary.LittleEndian.PutUint64(b.buf, b.seq) - binary.LittleEndian.PutUint32(b.buf[8:], uint32(b.rLen)) - - return b.buf -} - -func (b *Batch) decode(buf []byte) error { - if len(buf) < kBatchHdrLen { - return errBatchTooShort - } - - b.seq = binary.LittleEndian.Uint64(buf) - b.rLen = int(binary.LittleEndian.Uint32(buf[8:])) - // No need to be precise at this point, it won't be used anyway - b.bLen = len(buf) - kBatchHdrLen - b.buf = buf - - return nil -} - -func (b *Batch) decodeRec(f func(i int, t vType, key, value []byte)) error { - off := kBatchHdrLen - for i := 0; i < b.rLen; i++ { - if off >= len(b.buf) { - return errors.New("leveldb: invalid batch record length") - } - - t := vType(b.buf[off]) - if t > tVal { - return errors.New("leveldb: invalid batch record type in batch") - } - off += 1 - - x, n := binary.Uvarint(b.buf[off:]) - off += n - if n <= 0 || off+int(x) > len(b.buf) { - return errBatchBadRecord - } - key := b.buf[off : off+int(x)] - off += int(x) - - var value []byte - if t == tVal { - x, n := binary.Uvarint(b.buf[off:]) - off += n - if n <= 0 || off+int(x) > len(b.buf) { - return errBatchBadRecord - } - value = b.buf[off : off+int(x)] - off += int(x) - } - - f(i, t, key, value) - } - - return nil -} - -func (b *Batch) replay(to batchReplay) error { - return b.decodeRec(func(i int, t vType, key, value []byte) { - switch t { - case tVal: - to.put(key, value, b.seq+uint64(i)) - case tDel: - to.delete(key, b.seq+uint64(i)) - } - }) -} - -func (b *Batch) memReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, t vType, key, value []byte) { - ikey := newIKey(key, b.seq+uint64(i), t) - to.Put(ikey, value) - }) -} - -func (b *Batch) revertMemReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, t vType, key, value []byte) { - ikey := newIKey(key, b.seq+uint64(i), t) - to.Delete(ikey) - }) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/batch_test.go b/kit/github.com/syndtr/goleveldb/leveldb/batch_test.go deleted file mode 100644 index ad8e212..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/batch_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/memdb" -) - -type tbRec struct { - t vType - key, value []byte -} - -type testBatch struct { - rec []*tbRec -} - -func (p *testBatch) put(key, value []byte, seq uint64) { - p.rec = append(p.rec, &tbRec{tVal, key, value}) -} - -func (p *testBatch) delete(key []byte, seq uint64) { - p.rec = append(p.rec, &tbRec{tDel, key, nil}) -} - -func compareBatch(t *testing.T, b1, b2 *Batch) { - if b1.seq != b2.seq { - t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq) - } - if b1.len() != b2.len() { - t.Fatalf("invalid record length want %d, got %d", b1.len(), b2.len()) - } - p1, p2 := new(testBatch), new(testBatch) - err := b1.replay(p1) - if err != nil { - t.Fatal("error when replaying batch 1: ", err) - } - err = b2.replay(p2) - if err != nil { - t.Fatal("error when replaying batch 2: ", err) - } - for i := range p1.rec { - r1, r2 := p1.rec[i], p2.rec[i] - if r1.t != r2.t { - t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.t, r2.t) - } - if !bytes.Equal(r1.key, r2.key) { - t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key)) - } - if r1.t == tVal { - if !bytes.Equal(r1.value, r2.value) { - t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value)) - } - } - } -} - -func TestBatch_EncodeDecode(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("k"), []byte("")) - b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz")) - b1.Delete([]byte("key10000")) - b1.Delete([]byte("k")) - buf := b1.encode() - b2 := new(Batch) - err := b2.decode(buf) - if err != nil { - t.Error("error when decoding batch: ", err) - } - compareBatch(t, b1, b2) -} - -func TestBatch_Append(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("foo"), []byte("foovalue")) - b1.Put([]byte("bar"), []byte("barvalue")) - b2a := new(Batch) - b2a.seq = 10009 - b2a.Put([]byte("key1"), []byte("value1")) - b2a.Put([]byte("key2"), []byte("value2")) - b2a.Delete([]byte("key1")) - b2b := new(Batch) - b2b.Put([]byte("foo"), []byte("foovalue")) - b2b.Put([]byte("bar"), []byte("barvalue")) - b2a.append(b2b) - compareBatch(t, b1, b2a) -} - -func TestBatch_Size(t *testing.T) { - b := new(Batch) - for i := 0; i < 2; i++ { - b.Put([]byte("key1"), []byte("value1")) - b.Put([]byte("key2"), []byte("value2")) - b.Delete([]byte("key1")) - b.Put([]byte("foo"), []byte("foovalue")) - b.Put([]byte("bar"), []byte("barvalue")) - mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) - b.memReplay(mem) - if b.size() != mem.Size() { - t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) - } - b.Reset() - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/bench_test.go b/kit/github.com/syndtr/goleveldb/leveldb/bench_test.go deleted file mode 100644 index 0bb2b41..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/bench_test.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" -) - -func randomString(r *rand.Rand, n int) []byte { - b := new(bytes.Buffer) - for i := 0; i < n; i++ { - b.WriteByte(' ' + byte(r.Intn(95))) - } - return b.Bytes() -} - -func compressibleStr(r *rand.Rand, frac float32, n int) []byte { - nn := int(float32(n) * frac) - rb := randomString(r, nn) - b := make([]byte, 0, n+nn) - for len(b) < n { - b = append(b, rb...) - } - return b[:n] -} - -type valueGen struct { - src []byte - pos int -} - -func newValueGen(frac float32) *valueGen { - v := new(valueGen) - r := rand.New(rand.NewSource(301)) - v.src = make([]byte, 0, 1048576+100) - for len(v.src) < 1048576 { - v.src = append(v.src, compressibleStr(r, frac, 100)...) - } - return v -} - -func (v *valueGen) get(n int) []byte { - if v.pos+n > len(v.src) { - v.pos = 0 - } - v.pos += n - return v.src[v.pos-n : v.pos] -} - -var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) - -type dbBench struct { - b *testing.B - stor storage.Storage - db *DB - - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions - - keys, values [][]byte -} - -func openDBBench(b *testing.B, noCompress bool) *dbBench { - _, err := os.Stat(benchDB) - if err == nil { - err = os.RemoveAll(benchDB) - if err != nil { - b.Fatal("cannot remove old db: ", err) - } - } - - p := &dbBench{ - b: b, - o: &opt.Options{}, - ro: &opt.ReadOptions{}, - wo: &opt.WriteOptions{}, - } - p.stor, err = storage.OpenFile(benchDB) - if err != nil { - b.Fatal("cannot open stor: ", err) - } - if noCompress { - p.o.Compression = opt.NoCompression - } - - p.db, err = Open(p.stor, p.o) - if err != nil { - b.Fatal("cannot open db: ", err) - } - - runtime.GOMAXPROCS(runtime.NumCPU()) - return p -} - -func (p *dbBench) reopen() { - p.db.Close() - var err error - p.db, err = Open(p.stor, p.o) - if err != nil { - p.b.Fatal("Reopen: got error: ", err) - } -} - -func (p *dbBench) populate(n int) { - p.keys, p.values = make([][]byte, n), make([][]byte, n) - v := newValueGen(0.5) - for i := range p.keys { - p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) - } -} - -func (p *dbBench) randomize() { - m := len(p.keys) - times := m * 2 - r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) - for n := 0; n < times; n++ { - i, j := r1.Int()%m, r2.Int()%m - if i == j { - continue - } - p.keys[i], p.keys[j] = p.keys[j], p.keys[i] - p.values[i], p.values[j] = p.values[j], p.values[i] - } -} - -func (p *dbBench) writes(perBatch int) { - b := p.b - db := p.db - - n := len(p.keys) - m := n / perBatch - if n%perBatch > 0 { - m++ - } - batches := make([]Batch, m) - j := 0 - for i := range batches { - first := true - for ; j < n && ((j+1)%perBatch != 0 || first); j++ { - first = false - batches[i].Put(p.keys[j], p.values[j]) - } - } - runtime.GC() - - b.ResetTimer() - b.StartTimer() - for i := range batches { - err := db.Write(&(batches[i]), p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) gc() { - p.keys, p.values = nil, nil - runtime.GC() -} - -func (p *dbBench) puts() { - b := p.b - db := p.db - - b.ResetTimer() - b.StartTimer() - for i := range p.keys { - err := db.Put(p.keys[i], p.values[i], p.wo) - if err != nil { - b.Fatal("put failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) fill() { - b := p.b - db := p.db - - perBatch := 10000 - batch := new(Batch) - for i, n := 0, len(p.keys); i < n; { - first := true - for ; i < n && ((i+1)%perBatch != 0 || first); i++ { - first = false - batch.Put(p.keys[i], p.values[i]) - } - err := db.Write(batch, p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - batch.Reset() - } -} - -func (p *dbBench) gets() { - b := p.b - db := p.db - - b.ResetTimer() - for i := range p.keys { - _, err := db.Get(p.keys[i], p.ro) - if err != nil { - b.Error("got error: ", err) - } - } - b.StopTimer() -} - -func (p *dbBench) seeks() { - b := p.b - - iter := p.newIter() - defer iter.Release() - b.ResetTimer() - for i := range p.keys { - if !iter.Seek(p.keys[i]) { - b.Error("value not found for: ", string(p.keys[i])) - } - } - b.StopTimer() -} - -func (p *dbBench) newIter() iterator.Iterator { - iter := p.db.NewIterator(nil, p.ro) - err := iter.Error() - if err != nil { - p.b.Fatal("cannot create iterator: ", err) - } - return iter -} - -func (p *dbBench) close() { - if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { - p.b.Log("Block pool stats: ", bp) - } - p.db.Close() - p.stor.Close() - os.RemoveAll(benchDB) - p.db = nil - p.keys = nil - p.values = nil - runtime.GC() - runtime.GOMAXPROCS(1) -} - -func BenchmarkDBWrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatch(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatchUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBWriteRandomSync(b *testing.B) { - p := openDBBench(b, false) - p.wo.Sync = true - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBPut(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.puts() - p.close() -} - -func BenchmarkDBRead(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadGC(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverse(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverseTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBSeek(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.seeks() - p.close() -} - -func BenchmarkDBSeekRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.seeks() - p.close() -} - -func BenchmarkDBGet(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gets() - p.close() -} - -func BenchmarkDBGetRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.gets() - p.close() -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/kit/github.com/syndtr/goleveldb/leveldb/cache/cache.go deleted file mode 100644 index d71fe15..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/cache/cache.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package cache provides interface and implementation of a cache algorithms. -package cache - -import ( - "sync/atomic" -) - -// SetFunc used by Namespace.Get method to create a cache object. SetFunc -// may return ok false, in that case the cache object will not be created. -type SetFunc func() (ok bool, value interface{}, charge int, fin SetFin) - -// SetFin will be called when corresponding cache object are released. -type SetFin func() - -// DelFin will be called when corresponding cache object are released. -// DelFin will be called after SetFin. The exist is true if the corresponding -// cache object is actually exist in the cache tree. -type DelFin func(exist bool) - -// PurgeFin will be called when corresponding cache object are released. -// PurgeFin will be called after SetFin. If PurgeFin present DelFin will -// not be executed but passed to the PurgeFin, it is up to the caller -// to call it or not. -type PurgeFin func(ns, key uint64, delfin DelFin) - -// Cache is a cache tree. A cache instance must be goroutine-safe. -type Cache interface { - // SetCapacity sets cache capacity. - SetCapacity(capacity int) - - // GetNamespace gets or creates a cache namespace for the given id. - GetNamespace(id uint64) Namespace - - // Purge purges all cache namespaces, read Namespace.Purge method documentation. - Purge(fin PurgeFin) - - // Zap zaps all cache namespaces, read Namespace.Zap method documentation. - Zap(closed bool) -} - -// Namespace is a cache namespace. A namespace instance must be goroutine-safe. -type Namespace interface { - // Get gets cache object for the given key. The given SetFunc (if not nil) will - // be called if the given key does not exist. - // If the given key does not exist, SetFunc is nil or SetFunc return ok false, Get - // will return ok false. - Get(key uint64, setf SetFunc) (obj Object, ok bool) - - // Get deletes cache object for the given key. If exist the cache object will - // be deleted later when all of its handles have been released (i.e. no one use - // it anymore) and the given DelFin (if not nil) will finally be executed. If - // such cache object does not exist the given DelFin will be executed anyway. - // - // Delete returns true if such cache object exist. - Delete(key uint64, fin DelFin) bool - - // Purge deletes all cache objects, read Delete method documentation. - Purge(fin PurgeFin) - - // Zap detaches the namespace from the cache tree and delete all its cache - // objects. The cache objects deletion and finalizers execution are happen - // immediately, even if its existing handles haven't yet been released. - // A zapped namespace can't never be filled again. - // If closed is false then the Get function will always call the given SetFunc - // if it is not nil, but resultant of the SetFunc will not be cached. - Zap(closed bool) -} - -// Object is a cache object. -type Object interface { - // Release releases the cache object. Other methods should not be called - // after the cache object has been released. - Release() - - // Value returns value of the cache object. - Value() interface{} -} - -// Namespace state. -type nsState int - -const ( - nsEffective nsState = iota - nsZapped - nsClosed -) - -// Node state. -type nodeState int - -const ( - nodeEffective nodeState = iota - nodeEvicted - nodeRemoved -) - -// Fake object. -type fakeObject struct { - value interface{} - fin func() - once uint32 -} - -func (o *fakeObject) Value() interface{} { - if atomic.LoadUint32(&o.once) == 0 { - return o.value - } - return nil -} - -func (o *fakeObject) Release() { - if !atomic.CompareAndSwapUint32(&o.once, 0, 1) { - return - } - if o.fin != nil { - o.fin() - o.fin = nil - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/kit/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go deleted file mode 100644 index 07a9939..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "math/rand" - "testing" -) - -func set(ns Namespace, key uint64, value interface{}, charge int, fin func()) Object { - obj, _ := ns.Get(key, func() (bool, interface{}, int, SetFin) { - return true, value, charge, fin - }) - return obj -} - -func TestCache_HitMiss(t *testing.T) { - cases := []struct { - key uint64 - value string - }{ - {1, "vvvvvvvvv"}, - {100, "v1"}, - {0, "v2"}, - {12346, "v3"}, - {777, "v4"}, - {999, "v5"}, - {7654, "v6"}, - {2, "v7"}, - {3, "v8"}, - {9, "v9"}, - } - - setfin := 0 - c := NewLRUCache(1000) - ns := c.GetNamespace(0) - for i, x := range cases { - set(ns, x.key, x.value, len(x.value), func() { - setfin++ - }).Release() - for j, y := range cases { - r, ok := ns.Get(y.key, nil) - if j <= i { - // should hit - if !ok { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else if r.Value().(string) != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value) - } - } else { - // should miss - if ok { - t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, r.Value().(string)) - } - } - if ok { - r.Release() - } - } - } - - for i, x := range cases { - finalizerOk := false - ns.Delete(x.key, func(exist bool) { - finalizerOk = true - }) - - if !finalizerOk { - t.Errorf("case %d delete finalizer not executed", i) - } - - for j, y := range cases { - r, ok := ns.Get(y.key, nil) - if j > i { - // should hit - if !ok { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else if r.Value().(string) != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value) - } - } else { - // should miss - if ok { - t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, r.Value().(string)) - } - } - if ok { - r.Release() - } - } - } - - if setfin != len(cases) { - t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) - } -} - -func TestLRUCache_Eviction(t *testing.T) { - c := NewLRUCache(12) - ns := c.GetNamespace(0) - o1 := set(ns, 1, 1, 1, nil) - set(ns, 2, 2, 1, nil).Release() - set(ns, 3, 3, 1, nil).Release() - set(ns, 4, 4, 1, nil).Release() - set(ns, 5, 5, 1, nil).Release() - if r, ok := ns.Get(2, nil); ok { // 1,3,4,5,2 - r.Release() - } - set(ns, 9, 9, 10, nil).Release() // 5,2,9 - - for _, x := range []uint64{9, 2, 5, 1} { - r, ok := ns.Get(x, nil) - if !ok { - t.Errorf("miss for key '%d'", x) - } else { - if r.Value().(int) != int(x) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int)) - } - r.Release() - } - } - o1.Release() - for _, x := range []uint64{1, 2, 5} { - r, ok := ns.Get(x, nil) - if !ok { - t.Errorf("miss for key '%d'", x) - } else { - if r.Value().(int) != int(x) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int)) - } - r.Release() - } - } - for _, x := range []uint64{3, 4, 9} { - r, ok := ns.Get(x, nil) - if ok { - t.Errorf("hit for key '%d'", x) - if r.Value().(int) != int(x) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int)) - } - r.Release() - } - } -} - -func TestLRUCache_SetGet(t *testing.T) { - c := NewLRUCache(13) - ns := c.GetNamespace(0) - for i := 0; i < 200; i++ { - n := uint64(rand.Intn(99999) % 20) - set(ns, n, n, 1, nil).Release() - if p, ok := ns.Get(n, nil); ok { - if p.Value() == nil { - t.Errorf("key '%d' contains nil value", n) - } else { - got := p.Value().(uint64) - if got != n { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, got) - } - } - p.Release() - } else { - t.Errorf("key '%d' doesn't exist", n) - } - } -} - -func TestLRUCache_Purge(t *testing.T) { - c := NewLRUCache(3) - ns1 := c.GetNamespace(0) - o1 := set(ns1, 1, 1, 1, nil) - o2 := set(ns1, 2, 2, 1, nil) - ns1.Purge(nil) - set(ns1, 3, 3, 1, nil).Release() - for _, x := range []uint64{1, 2, 3} { - r, ok := ns1.Get(x, nil) - if !ok { - t.Errorf("miss for key '%d'", x) - } else { - if r.Value().(int) != int(x) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int)) - } - r.Release() - } - } - o1.Release() - o2.Release() - for _, x := range []uint64{1, 2} { - r, ok := ns1.Get(x, nil) - if ok { - t.Errorf("hit for key '%d'", x) - if r.Value().(int) != int(x) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int)) - } - r.Release() - } - } -} - -func BenchmarkLRUCache_SetRelease(b *testing.B) { - capacity := b.N / 100 - if capacity <= 0 { - capacity = 10 - } - c := NewLRUCache(capacity) - ns := c.GetNamespace(0) - b.ResetTimer() - for i := uint64(0); i < uint64(b.N); i++ { - set(ns, i, nil, 1, nil).Release() - } -} - -func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) { - capacity := b.N / 100 - if capacity <= 0 { - capacity = 10 - } - c := NewLRUCache(capacity) - ns := c.GetNamespace(0) - b.ResetTimer() - - na := b.N / 2 - nb := b.N - na - - for i := uint64(0); i < uint64(na); i++ { - set(ns, i, nil, 1, nil).Release() - } - - for i := uint64(0); i < uint64(nb); i++ { - set(ns, i, nil, 1, nil).Release() - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go b/kit/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go deleted file mode 100644 index 1fbf814..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "sync" - "sync/atomic" -) - -type emptyCache struct { - sync.Mutex - table map[uint64]*emptyNS -} - -// NewEmptyCache creates a new initialized empty cache. -func NewEmptyCache() Cache { - return &emptyCache{ - table: make(map[uint64]*emptyNS), - } -} - -func (c *emptyCache) GetNamespace(id uint64) Namespace { - c.Lock() - defer c.Unlock() - - if ns, ok := c.table[id]; ok { - return ns - } - - ns := &emptyNS{ - cache: c, - id: id, - table: make(map[uint64]*emptyNode), - } - c.table[id] = ns - return ns -} - -func (c *emptyCache) Purge(fin PurgeFin) { - c.Lock() - for _, ns := range c.table { - ns.purgeNB(fin) - } - c.Unlock() -} - -func (c *emptyCache) Zap(closed bool) { - c.Lock() - for _, ns := range c.table { - ns.zapNB(closed) - } - c.table = make(map[uint64]*emptyNS) - c.Unlock() -} - -func (*emptyCache) SetCapacity(capacity int) {} - -type emptyNS struct { - cache *emptyCache - id uint64 - table map[uint64]*emptyNode - state nsState -} - -func (ns *emptyNS) Get(key uint64, setf SetFunc) (o Object, ok bool) { - ns.cache.Lock() - - switch ns.state { - case nsZapped: - ns.cache.Unlock() - if setf == nil { - return - } - - var value interface{} - var fin func() - ok, value, _, fin = setf() - if ok { - o = &fakeObject{ - value: value, - fin: fin, - } - } - return - case nsClosed: - ns.cache.Unlock() - return - } - - n, ok := ns.table[key] - if ok { - n.ref++ - } else { - if setf == nil { - ns.cache.Unlock() - return - } - - var value interface{} - var fin func() - ok, value, _, fin = setf() - if !ok { - ns.cache.Unlock() - return - } - - n = &emptyNode{ - ns: ns, - key: key, - value: value, - setfin: fin, - ref: 1, - } - ns.table[key] = n - } - - ns.cache.Unlock() - o = &emptyObject{node: n} - return -} - -func (ns *emptyNS) Delete(key uint64, fin DelFin) bool { - ns.cache.Lock() - - if ns.state != nsEffective { - ns.cache.Unlock() - if fin != nil { - fin(false) - } - return false - } - - n, ok := ns.table[key] - if !ok { - ns.cache.Unlock() - if fin != nil { - fin(false) - } - return false - } - n.delfin = fin - ns.cache.Unlock() - return true -} - -func (ns *emptyNS) purgeNB(fin PurgeFin) { - if ns.state != nsEffective { - return - } - for _, n := range ns.table { - n.purgefin = fin - } -} - -func (ns *emptyNS) Purge(fin PurgeFin) { - ns.cache.Lock() - ns.purgeNB(fin) - ns.cache.Unlock() -} - -func (ns *emptyNS) zapNB(closed bool) { - if ns.state != nsEffective { - return - } - for _, n := range ns.table { - n.execFin() - } - if closed { - ns.state = nsClosed - } else { - ns.state = nsZapped - } - ns.table = nil -} - -func (ns *emptyNS) Zap(closed bool) { - ns.cache.Lock() - ns.zapNB(closed) - delete(ns.cache.table, ns.id) - ns.cache.Unlock() -} - -type emptyNode struct { - ns *emptyNS - key uint64 - value interface{} - ref int - setfin SetFin - delfin DelFin - purgefin PurgeFin -} - -func (n *emptyNode) execFin() { - if n.setfin != nil { - n.setfin() - n.setfin = nil - } - if n.purgefin != nil { - n.purgefin(n.ns.id, n.key, n.delfin) - n.delfin = nil - n.purgefin = nil - } else if n.delfin != nil { - n.delfin(true) - n.delfin = nil - } -} - -func (n *emptyNode) evict() { - n.ns.cache.Lock() - n.ref-- - if n.ref == 0 { - if n.ns.state == nsEffective { - // Remove elem. - delete(n.ns.table, n.key) - // Execute finalizer. - n.execFin() - } - } else if n.ref < 0 { - panic("leveldb/cache: emptyNode: negative node reference") - } - n.ns.cache.Unlock() -} - -type emptyObject struct { - node *emptyNode - once uint32 -} - -func (o *emptyObject) Value() interface{} { - if atomic.LoadUint32(&o.once) == 0 { - return o.node.value - } - return nil -} - -func (o *emptyObject) Release() { - if !atomic.CompareAndSwapUint32(&o.once, 0, 1) { - return - } - o.node.evict() - o.node = nil -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go b/kit/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go deleted file mode 100644 index 3c98e07..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "sync" - "sync/atomic" -) - -// lruCache represent a LRU cache state. -type lruCache struct { - sync.Mutex - - recent lruNode - table map[uint64]*lruNs - capacity int - size int -} - -// NewLRUCache creates a new initialized LRU cache with the given capacity. -func NewLRUCache(capacity int) Cache { - c := &lruCache{ - table: make(map[uint64]*lruNs), - capacity: capacity, - } - c.recent.rNext = &c.recent - c.recent.rPrev = &c.recent - return c -} - -// SetCapacity set cache capacity. -func (c *lruCache) SetCapacity(capacity int) { - c.Lock() - c.capacity = capacity - c.evict() - c.Unlock() -} - -// GetNamespace return namespace object for given id. -func (c *lruCache) GetNamespace(id uint64) Namespace { - c.Lock() - defer c.Unlock() - - if p, ok := c.table[id]; ok { - return p - } - - p := &lruNs{ - lru: c, - id: id, - table: make(map[uint64]*lruNode), - } - c.table[id] = p - return p -} - -// Purge purge entire cache. -func (c *lruCache) Purge(fin PurgeFin) { - c.Lock() - for _, ns := range c.table { - ns.purgeNB(fin) - } - c.Unlock() -} - -func (c *lruCache) Zap(closed bool) { - c.Lock() - for _, ns := range c.table { - ns.zapNB(closed) - } - c.table = make(map[uint64]*lruNs) - c.Unlock() -} - -func (c *lruCache) evict() { - top := &c.recent - for n := c.recent.rPrev; c.size > c.capacity && n != top; { - n.state = nodeEvicted - n.rRemove() - n.evictNB() - c.size -= n.charge - n = c.recent.rPrev - } -} - -type lruNs struct { - lru *lruCache - id uint64 - table map[uint64]*lruNode - state nsState -} - -func (ns *lruNs) Get(key uint64, setf SetFunc) (o Object, ok bool) { - lru := ns.lru - lru.Lock() - - switch ns.state { - case nsZapped: - lru.Unlock() - if setf == nil { - return - } - - var value interface{} - var fin func() - ok, value, _, fin = setf() - if ok { - o = &fakeObject{ - value: value, - fin: fin, - } - } - return - case nsClosed: - lru.Unlock() - return - } - - n, ok := ns.table[key] - if ok { - switch n.state { - case nodeEvicted: - // Insert to recent list. - n.state = nodeEffective - n.ref++ - lru.size += n.charge - lru.evict() - fallthrough - case nodeEffective: - // Bump to front - n.rRemove() - n.rInsert(&lru.recent) - } - n.ref++ - } else { - if setf == nil { - lru.Unlock() - return - } - - var value interface{} - var charge int - var fin func() - ok, value, charge, fin = setf() - if !ok { - lru.Unlock() - return - } - - n = &lruNode{ - ns: ns, - key: key, - value: value, - charge: charge, - setfin: fin, - ref: 2, - } - ns.table[key] = n - n.rInsert(&lru.recent) - - lru.size += charge - lru.evict() - } - - lru.Unlock() - o = &lruObject{node: n} - return -} - -func (ns *lruNs) Delete(key uint64, fin DelFin) bool { - lru := ns.lru - lru.Lock() - - if ns.state != nsEffective { - lru.Unlock() - if fin != nil { - fin(false) - } - return false - } - - n, ok := ns.table[key] - if !ok { - lru.Unlock() - if fin != nil { - fin(false) - } - return false - } - - n.delfin = fin - switch n.state { - case nodeRemoved: - lru.Unlock() - return false - case nodeEffective: - lru.size -= n.charge - n.rRemove() - n.evictNB() - } - n.state = nodeRemoved - - lru.Unlock() - return true -} - -func (ns *lruNs) purgeNB(fin PurgeFin) { - lru := ns.lru - if ns.state != nsEffective { - return - } - - for _, n := range ns.table { - n.purgefin = fin - if n.state == nodeEffective { - lru.size -= n.charge - n.rRemove() - n.evictNB() - } - n.state = nodeRemoved - } -} - -func (ns *lruNs) Purge(fin PurgeFin) { - ns.lru.Lock() - ns.purgeNB(fin) - ns.lru.Unlock() -} - -func (ns *lruNs) zapNB(closed bool) { - lru := ns.lru - if ns.state != nsEffective { - return - } - - if closed { - ns.state = nsClosed - } else { - ns.state = nsZapped - } - for _, n := range ns.table { - if n.state == nodeEffective { - lru.size -= n.charge - n.rRemove() - } - n.state = nodeRemoved - n.execFin() - } - ns.table = nil -} - -func (ns *lruNs) Zap(closed bool) { - ns.lru.Lock() - ns.zapNB(closed) - delete(ns.lru.table, ns.id) - ns.lru.Unlock() -} - -type lruNode struct { - ns *lruNs - - rNext, rPrev *lruNode - - key uint64 - value interface{} - charge int - ref int - state nodeState - setfin SetFin - delfin DelFin - purgefin PurgeFin -} - -func (n *lruNode) rInsert(at *lruNode) { - x := at.rNext - at.rNext = n - n.rPrev = at - n.rNext = x - x.rPrev = n -} - -func (n *lruNode) rRemove() bool { - // only remove if not already removed - if n.rPrev == nil { - return false - } - - n.rPrev.rNext = n.rNext - n.rNext.rPrev = n.rPrev - n.rPrev = nil - n.rNext = nil - - return true -} - -func (n *lruNode) execFin() { - if n.setfin != nil { - n.setfin() - n.setfin = nil - } - if n.purgefin != nil { - n.purgefin(n.ns.id, n.key, n.delfin) - n.delfin = nil - n.purgefin = nil - } else if n.delfin != nil { - n.delfin(true) - n.delfin = nil - } -} - -func (n *lruNode) evictNB() { - n.ref-- - if n.ref == 0 { - if n.ns.state == nsEffective { - // remove elem - delete(n.ns.table, n.key) - // execute finalizer - n.execFin() - } - } else if n.ref < 0 { - panic("leveldb/cache: lruCache: negative node reference") - } -} - -func (n *lruNode) evict() { - n.ns.lru.Lock() - n.evictNB() - n.ns.lru.Unlock() -} - -type lruObject struct { - node *lruNode - once uint32 -} - -func (o *lruObject) Value() interface{} { - if atomic.LoadUint32(&o.once) == 0 { - return o.node.value - } - return nil -} - -func (o *lruObject) Release() { - if !atomic.CompareAndSwapUint32(&o.once, 0, 1) { - return - } - - o.node.evict() - o.node = nil -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/comparer.go b/kit/github.com/syndtr/goleveldb/leveldb/comparer.go deleted file mode 100644 index d5699e3..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/comparer.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - -type iComparer struct { - ucmp comparer.Comparer -} - -func (icmp *iComparer) uName() string { - return icmp.ucmp.Name() -} - -func (icmp *iComparer) uCompare(a, b []byte) int { - return icmp.ucmp.Compare(a, b) -} - -func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { - return icmp.ucmp.Separator(dst, a, b) -} - -func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { - return icmp.ucmp.Successor(dst, b) -} - -func (icmp *iComparer) Name() string { - return icmp.uName() -} - -func (icmp *iComparer) Compare(a, b []byte) int { - x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey()) - if x == 0 { - if m, n := iKey(a).num(), iKey(b).num(); m > n { - x = -1 - } else if m < n { - x = 1 - } - } - return x -} - -func (icmp *iComparer) Separator(dst, a, b []byte) []byte { - ua, ub := iKey(a).ukey(), iKey(b).ukey() - dst = icmp.ucmp.Separator(dst, ua, ub) - if dst == nil { - return nil - } - if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { - dst = append(dst, kMaxNumBytes...) - } else { - // Did not close possibilities that n maybe longer than len(ub). - dst = append(dst, a[len(a)-8:]...) - } - return dst -} - -func (icmp *iComparer) Successor(dst, b []byte) []byte { - ub := iKey(b).ukey() - dst = icmp.ucmp.Successor(dst, ub) - if dst == nil { - return nil - } - if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { - dst = append(dst, kMaxNumBytes...) - } else { - // Did not close possibilities that n maybe longer than len(ub). - dst = append(dst, b[len(b)-8:]...) - } - return dst -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/kit/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go deleted file mode 100644 index 14dddf8..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package comparer - -import "bytes" - -type bytesComparer struct{} - -func (bytesComparer) Compare(a, b []byte) int { - return bytes.Compare(a, b) -} - -func (bytesComparer) Name() string { - return "leveldb.BytewiseComparator" -} - -func (bytesComparer) Separator(dst, a, b []byte) []byte { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && a[i] == b[i]; i++ { - } - if i >= n { - // Do not shorten if one string is a prefix of the other - } else if c := a[i]; c < 0xff && c+1 < b[i] { - dst = append(dst, a[:i+1]...) - dst[i]++ - return dst - } - return nil -} - -func (bytesComparer) Successor(dst, b []byte) []byte { - for i, c := range b { - if c != 0xff { - dst = append(dst, b[:i+1]...) - dst[i]++ - return dst - } - } - return nil -} - -// DefaultComparer are default implementation of the Comparer interface. -// It uses the natural ordering, consistent with bytes.Compare. -var DefaultComparer = bytesComparer{} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/kit/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go deleted file mode 100644 index 14a28f1..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package comparer provides interface and implementation for ordering -// sets of data. -package comparer - -// BasicComparer is the interface that wraps the basic Compare method. -type BasicComparer interface { - // Compare returns -1, 0, or +1 depending on whether a is 'less than', - // 'equal to' or 'greater than' b. The two arguments can only be 'equal' - // if their contents are exactly equal. Furthermore, the empty slice - // must be 'less than' any non-empty slice. - Compare(a, b []byte) int -} - -// Comparer defines a total ordering over the space of []byte keys: a 'less -// than' relationship. -type Comparer interface { - BasicComparer - - // Name returns name of the comparer. - // - // The Level-DB on-disk format stores the comparer name, and opening a - // database with a different comparer from the one it was created with - // will result in an error. - // - // An implementation to a new name whenever the comparer implementation - // changes in a way that will cause the relative ordering of any two keys - // to change. - // - // Names starting with "leveldb." are reserved and should not be used - // by any users of this package. - Name() string - - // Bellow are advanced functions used used to reduce the space requirements - // for internal data structures such as index blocks. - - // Separator appends a sequence of bytes x to dst such that a <= x && x < b, - // where 'less than' is consistent with Compare. An implementation should - // return nil if x equal to a. - // - // Either contents of a or b should not by any means modified. Doing so - // may cause corruption on the internal state. - Separator(dst, a, b []byte) []byte - - // Successor appends a sequence of bytes x to dst such that x >= b, where - // 'less than' is consistent with Compare. An implementation should return - // nil if x equal to b. - // - // Contents of b should not by any means modified. Doing so may cause - // corruption on the internal state. - Successor(dst, b []byte) []byte -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/config.go b/kit/github.com/syndtr/goleveldb/leveldb/config.go deleted file mode 100644 index 5110588..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/config.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -const ( - kNumLevels = 7 - - // Level-0 compaction is started when we hit this many files. - kL0_CompactionTrigger float64 = 4 - - // Soft limit on number of level-0 files. We slow down writes at this point. - kL0_SlowdownWritesTrigger = 8 - - // Maximum number of level-0 files. We stop writes at this point. - kL0_StopWritesTrigger = 12 - - // Maximum level to which a new compacted memdb is pushed if it - // does not create overlap. We try to push to level 2 to avoid the - // relatively expensive level 0=>1 compactions and to avoid some - // expensive manifest file operations. We do not push all the way to - // the largest level since that can generate a lot of wasted disk - // space if the same key space is being repeatedly overwritten. - kMaxMemCompactLevel = 2 - - // Maximum size of a table. - kMaxTableSize = 2 * 1048576 - - // Maximum bytes of overlaps in grandparent (i.e., level+2) before we - // stop building a single file in a level->level+1 compaction. - kMaxGrandParentOverlapBytes = 10 * kMaxTableSize - - // Maximum number of bytes in all compacted files. We avoid expanding - // the lower level file set of a compaction if it would make the - // total compaction cover more than this many bytes. - kExpCompactionMaxBytes = 25 * kMaxTableSize -) diff --git a/kit/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/kit/github.com/syndtr/goleveldb/leveldb/corrupt_test.go deleted file mode 100644 index 5c41bfa..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/corrupt_test.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "io" - "math/rand" - "testing" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" -) - -const ctValSize = 1000 - -type dbCorruptHarness struct { - dbHarness -} - -func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { - h := new(dbCorruptHarness) - h.init(t, o) - return h -} - -func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { - return newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCache: cache.NewLRUCache(100), - Strict: opt.StrictJournalChecksum, - }) -} - -func (h *dbCorruptHarness) recover() { - p := &h.dbHarness - t := p.t - - var err error - p.db, err = Recover(h.stor, h.o) - if err != nil { - t.Fatal("Repair: got error: ", err) - } -} - -func (h *dbCorruptHarness) build(n int) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := range rnd.Perm(n) { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Delete(tkey(rnd.Intn(max))) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) corrupt(ft storage.FileType, offset, n int) { - p := &h.dbHarness - t := p.t - - var file storage.File - ff, _ := p.stor.GetFiles(ft) - for _, f := range ff { - if file == nil || f.Num() > file.Num() { - file = f - } - } - if file == nil { - t.Fatalf("no such file with type %q", ft) - } - - r, err := file.Open() - if err != nil { - t.Fatal("cannot open file: ", err) - } - x, err := r.Seek(0, 2) - if err != nil { - t.Fatal("cannot query file size: ", err) - } - m := int(x) - if _, err := r.Seek(0, 0); err != nil { - t.Fatal(err) - } - - if offset < 0 { - if -offset > m { - offset = 0 - } else { - offset = m + offset - } - } - if offset > m { - offset = m - } - if offset+n > m { - n = m - offset - } - - buf := make([]byte, m) - _, err = io.ReadFull(r, buf) - if err != nil { - t.Fatal("cannot read file: ", err) - } - r.Close() - - for i := 0; i < n; i++ { - buf[offset+i] ^= 0x80 - } - - err = file.Remove() - if err != nil { - t.Fatal("cannot remove old file: ", err) - } - w, err := file.Create() - if err != nil { - t.Fatal("cannot create new file: ", err) - } - _, err = w.Write(buf) - if err != nil { - t.Fatal("cannot write new file: ", err) - } - w.Close() -} - -func (h *dbCorruptHarness) removeAll(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - for _, f := range ff { - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } - } -} - -func (h *dbCorruptHarness) removeOne(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - f := ff[rand.Intn(len(ff))] - h.t.Logf("removing file @%d", f.Num()) - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } -} - -func (h *dbCorruptHarness) check(min, max int) { - p := &h.dbHarness - t := p.t - db := p.db - - var n, badk, badv, missed, good int - iter := db.NewIterator(nil, p.ro) - for iter.Next() { - k := 0 - fmt.Sscanf(string(iter.Key()), "%d", &k) - if k < n { - badk++ - continue - } - missed += k - n - n = k + 1 - if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { - badv++ - } else { - good++ - } - } - err := iter.Error() - iter.Release() - t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", - min, max, good, badk, badv, missed, err) - if good < min || good > max { - t.Errorf("good entries number not in range") - } -} - -func TestCorruptDB_Journal(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.check(100, 100) - h.closeDB() - h.corrupt(storage.TypeJournal, 19, 1) - h.corrupt(storage.TypeJournal, 32*1024+1000, 1) - - h.openDB() - h.check(36, 36) - - h.close() -} - -func TestCorruptDB_Table(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - h.closeDB() - h.corrupt(storage.TypeTable, 100, 1) - - h.openDB() - h.check(99, 99) - - h.close() -} - -func TestCorruptDB_TableIndex(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10000) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -2000, 500) - - h.openDB() - h.check(5000, 9999) - - h.close() -} - -func TestCorruptDB_MissingManifest(t *testing.T) { - rnd := rand.New(rand.NewSource(0x0badda7a)) - h := newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCache: cache.NewLRUCache(100), - Strict: opt.StrictJournalChecksum, - WriteBuffer: 1000 * 60, - }) - - h.build(1000) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.closeDB() - - h.stor.SetIgnoreOpenErr(storage.TypeManifest) - h.removeAll(storage.TypeManifest) - h.openAssert(false) - h.stor.SetIgnoreOpenErr(0) - - h.recover() - h.check(1000, 1000) - h.build(1000) - h.compactMem() - h.compactRange("", "") - h.closeDB() - - h.recover() - h.check(1000, 1000) - - h.close() -} - -func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.put("foo", "v4") - h.put("foo", "v5") - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.compactMem() - h.put("foo", "v4") - h.put("foo", "v5") - h.compactMem() - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_CorruptedManifest(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "hello") - h.compactMem() - h.compactRange("", "") - h.closeDB() - h.corrupt(storage.TypeManifest, 0, 1000) - h.openAssert(false) - - h.recover() - h.getVal("foo", "hello") - - h.close() -} - -func TestCorruptDB_CompactionInputError(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, 100, 1) - - h.openDB() - h.check(9, 9) - - h.build(10000) - h.check(10000, 10000) - - h.close() -} - -func TestCorruptDB_UnrelatedKeys(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, 100, 1) - - h.openDB() - h.put(string(tkey(1000)), string(tval(1000, ctValSize))) - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - h.compactMem() - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - - h.close() -} - -func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(1, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(0, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_MissingTableFiles(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("c", "v2") - h.put("d", "v2") - h.compactMem() - h.put("e", "v3") - h.put("f", "v3") - h.closeDB() - - h.removeOne(storage.TypeTable) - h.openAssert(false) - - h.close() -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/db.go b/kit/github.com/syndtr/goleveldb/leveldb/db.go deleted file mode 100644 index b10b8f0..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/db.go +++ /dev/null @@ -1,809 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "fmt" - "io" - "os" - "runtime" - "strings" - "sync" - "time" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/table" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -// DB is a LevelDB database. -type DB struct { - // Need 64-bit alignment. - seq uint64 - - // Session. - s *session - - // MemDB. - memMu sync.RWMutex - memPool *util.Pool - mem, frozenMem *memDB - journal *journal.Writer - journalWriter storage.Writer - journalFile storage.File - frozenJournalFile storage.File - frozenSeq uint64 - - // Snapshot. - snapsMu sync.Mutex - snapsRoot snapshotElement - - // Write. - writeC chan *Batch - writeMergedC chan bool - writeLockC chan struct{} - writeAckC chan error - journalC chan *Batch - journalAckC chan error - - // Compaction. - tcompCmdC chan cCmd - tcompPauseC chan chan<- struct{} - tcompTriggerC chan struct{} - mcompCmdC chan cCmd - mcompTriggerC chan struct{} - compErrC chan error - compErrSetC chan error - compStats [kNumLevels]cStats - - // Close. - closeW sync.WaitGroup - closeC chan struct{} - closed uint32 - closer io.Closer -} - -func openDB(s *session) (*DB, error) { - s.log("db@open opening") - start := time.Now() - db := &DB{ - s: s, - // Initial sequence - seq: s.stSeq, - // MemDB - memPool: util.NewPool(1), - // Write - writeC: make(chan *Batch), - writeMergedC: make(chan bool), - writeLockC: make(chan struct{}, 1), - writeAckC: make(chan error), - journalC: make(chan *Batch), - journalAckC: make(chan error), - // Compaction - tcompCmdC: make(chan cCmd), - tcompPauseC: make(chan chan<- struct{}), - tcompTriggerC: make(chan struct{}, 1), - mcompCmdC: make(chan cCmd), - mcompTriggerC: make(chan struct{}, 1), - compErrC: make(chan error), - compErrSetC: make(chan error), - // Close - closeC: make(chan struct{}), - } - db.initSnapshot() - - if err := db.recoverJournal(); err != nil { - return nil, err - } - - // Remove any obsolete files. - if err := db.checkAndCleanFiles(); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return nil, err - } - - // Don't include compaction error goroutine into wait group. - go db.compactionError() - - db.closeW.Add(3) - go db.tCompaction() - go db.mCompaction() - go db.jWriter() - - s.logf("db@open done T·%v", time.Since(start)) - - runtime.SetFinalizer(db, (*DB).Close) - return db, nil -} - -// Open opens or creates a DB for the given storage. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist Open will returns -// os.ErrExist error. -// -// Open will return an error with type of ErrCorrupted if corruption -// detected in the DB. Corrupted DB can be recovered with Recover -// function. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = s.recover() - if err != nil { - if !os.IsNotExist(err) || s.o.GetErrorIfMissing() { - return - } - err = s.create() - if err != nil { - return - } - } else if s.o.GetErrorIfExist() { - err = os.ErrExist - return - } - - return openDB(s) -} - -// OpenFile opens or creates a DB for the given path. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist OpenFile will returns -// os.ErrExist error. -// -// OpenFile uses standard file-system backed storage implementation as -// desribed in the leveldb/storage package. -// -// OpenFile will return an error with type of ErrCorrupted if corruption -// detected in the DB. Corrupted DB can be recovered with Recover -// function. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func OpenFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) - if err != nil { - return - } - db, err = Open(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -// Recover recovers and opens a DB with missing or corrupted manifest files -// for the given storage. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = recoverTable(s, o) - if err != nil { - return - } - return openDB(s) -} - -// RecoverFile recovers and opens a DB with missing or corrupted manifest files -// for the given path. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// RecoverFile uses standard file-system backed storage implementation as desribed -// in the leveldb/storage package. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func RecoverFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) - if err != nil { - return - } - db, err = Recover(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -func recoverTable(s *session, o *opt.Options) error { - // Get all tables and sort it by file number. - tableFiles_, err := s.getFiles(storage.TypeTable) - if err != nil { - return err - } - tableFiles := files(tableFiles_) - tableFiles.sort() - - var mSeq uint64 - var good, corrupted int - rec := new(sessionRecord) - bpool := util.NewBufferPool(o.GetBlockSize() + 5) - buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { - tmp = s.newTemp() - writer, err := tmp.Create() - if err != nil { - return - } - defer func() { - writer.Close() - if err != nil { - tmp.Remove() - tmp = nil - } - }() - - // Copy entries. - tw := table.NewWriter(writer, o) - for iter.Next() { - key := iter.Key() - if validIkey(key) { - err = tw.Append(key, iter.Value()) - if err != nil { - return - } - } - } - err = iter.Error() - if err != nil { - return - } - err = tw.Close() - if err != nil { - return - } - err = writer.Sync() - if err != nil { - return - } - size = int64(tw.BytesLen()) - return - } - recoverTable := func(file storage.File) error { - s.logf("table@recovery recovering @%d", file.Num()) - reader, err := file.Open() - if err != nil { - return err - } - defer reader.Close() - - // Get file size. - size, err := reader.Seek(0, 2) - if err != nil { - return err - } - - var tSeq uint64 - var tgood, tcorrupted, blockerr int - var imin, imax []byte - tr := table.NewReader(reader, size, nil, bpool, o) - iter := tr.NewIterator(nil, nil) - iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) { - s.logf("table@recovery found error @%d %q", file.Num(), err) - blockerr++ - }) - - // Scan the table. - for iter.Next() { - key := iter.Key() - _, seq, _, ok := parseIkey(key) - if !ok { - tcorrupted++ - continue - } - tgood++ - if seq > tSeq { - tSeq = seq - } - if imin == nil { - imin = append([]byte{}, key...) - } - imax = append(imax[:0], key...) - } - if err := iter.Error(); err != nil { - iter.Release() - return err - } - iter.Release() - - if tgood > 0 { - if tcorrupted > 0 || blockerr > 0 { - // Rebuild the table. - s.logf("table@recovery rebuilding @%d", file.Num()) - iter := tr.NewIterator(nil, nil) - tmp, newSize, err := buildTable(iter) - iter.Release() - if err != nil { - return err - } - reader.Close() - if err := file.Replace(tmp); err != nil { - return err - } - size = newSize - } - if tSeq > mSeq { - mSeq = tSeq - } - // Add table to level 0. - rec.addTable(0, file.Num(), uint64(size), imin, imax) - s.logf("table@recovery recovered @%d N·%d C·%d B·%d S·%d Q·%d", file.Num(), tgood, tcorrupted, blockerr, size, tSeq) - } else { - s.logf("table@recovery unrecoverable @%d C·%d B·%d S·%d", file.Num(), tcorrupted, blockerr, size) - } - - good += tgood - corrupted += tcorrupted - - return nil - } - - // Recover all tables. - if len(tableFiles) > 0 { - s.logf("table@recovery F·%d", len(tableFiles)) - - // Mark file number as used. - s.markFileNum(tableFiles[len(tableFiles)-1].Num()) - - for _, file := range tableFiles { - if err := recoverTable(file); err != nil { - return err - } - } - - s.logf("table@recovery recovered F·%d N·%d C·%d Q·%d", len(tableFiles), good, corrupted, mSeq) - } - - // Set sequence number. - rec.setSeq(mSeq + 1) - - // Create new manifest. - if err := s.create(); err != nil { - return err - } - - // Commit. - return s.commit(rec) -} - -func (db *DB) recoverJournal() error { - // Get all tables and sort it by file number. - journalFiles_, err := db.s.getFiles(storage.TypeJournal) - if err != nil { - return err - } - journalFiles := files(journalFiles_) - journalFiles.sort() - - // Discard older journal. - prev := -1 - for i, file := range journalFiles { - if file.Num() >= db.s.stJournalNum { - if prev >= 0 { - i-- - journalFiles[i] = journalFiles[prev] - } - journalFiles = journalFiles[i:] - break - } else if file.Num() == db.s.stPrevJournalNum { - prev = i - } - } - - var jr *journal.Reader - var of storage.File - var mem *memdb.DB - batch := new(Batch) - cm := newCMem(db.s) - buf := new(util.Buffer) - // Options. - strict := db.s.o.GetStrict(opt.StrictJournal) - checksum := db.s.o.GetStrict(opt.StrictJournalChecksum) - writeBuffer := db.s.o.GetWriteBuffer() - recoverJournal := func(file storage.File) error { - db.logf("journal@recovery recovering @%d", file.Num()) - reader, err := file.Open() - if err != nil { - return err - } - defer reader.Close() - - // Create/reset journal reader instance. - if jr == nil { - jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum) - } else { - jr.Reset(reader, dropper{db.s, file}, strict, checksum) - } - - // Flush memdb and remove obsolete journal file. - if of != nil { - if mem.Len() > 0 { - if err := cm.flush(mem, 0); err != nil { - return err - } - } - if err := cm.commit(file.Num(), db.seq); err != nil { - return err - } - cm.reset() - of.Remove() - of = nil - } - - // Replay journal to memdb. - mem.Reset() - for { - r, err := jr.Next() - if err != nil { - if err == io.EOF { - break - } - return err - } - - buf.Reset() - if _, err := buf.ReadFrom(r); err != nil { - if err == io.ErrUnexpectedEOF { - continue - } else { - return err - } - } - if err := batch.decode(buf.Bytes()); err != nil { - return err - } - if err := batch.memReplay(mem); err != nil { - return err - } - - // Save sequence number. - db.seq = batch.seq + uint64(batch.len()) - - // Flush it if large enough. - if mem.Size() >= writeBuffer { - if err := cm.flush(mem, 0); err != nil { - return err - } - mem.Reset() - } - } - - of = file - return nil - } - - // Recover all journals. - if len(journalFiles) > 0 { - db.logf("journal@recovery F·%d", len(journalFiles)) - - // Mark file number as used. - db.s.markFileNum(journalFiles[len(journalFiles)-1].Num()) - - mem = memdb.New(db.s.icmp, writeBuffer) - for _, file := range journalFiles { - if err := recoverJournal(file); err != nil { - return err - } - } - - // Flush the last journal. - if mem.Len() > 0 { - if err := cm.flush(mem, 0); err != nil { - return err - } - } - } - - // Create a new journal. - if _, err := db.newMem(0); err != nil { - return err - } - - // Commit. - if err := cm.commit(db.journalFile.Num(), db.seq); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return err - } - - // Remove the last obsolete journal file. - if of != nil { - of.Remove() - } - - return nil -} - -func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { - ikey := newIKey(key, seq, tSeek) - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - mk, mv, me := m.db.Find(ikey) - if me == nil { - ukey, _, t, ok := parseIkey(mk) - if ok && db.s.icmp.uCompare(ukey, key) == 0 { - if t == tDel { - return nil, ErrNotFound - } - return append([]byte{}, mv...), nil - } - } else if me != ErrNotFound { - return nil, me - } - } - - v := db.s.version() - value, cSched, err := v.get(ikey, ro) - v.release() - if cSched { - // Trigger table compaction. - db.compTrigger(db.tcompTriggerC) - } - return -} - -// Get gets the value for the given key. It returns ErrNotFound if the -// DB does not contain the key. -// -// The returned slice is its own copy, it is safe to modify the contents -// of the returned slice. -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = db.ok() - if err != nil { - return - } - - return db.get(key, db.getSeq(), ro) -} - -// NewIterator returns an iterator for the latest snapshot of the -// uderlying DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - - snap := db.newSnapshot() - defer snap.Release() - return snap.NewIterator(slice, ro) -} - -// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot -// is a frozen snapshot of a DB state at a particular point in time. The -// content of snapshot are guaranteed to be consistent. -// -// The snapshot must be released after use, by calling Release method. -func (db *DB) GetSnapshot() (*Snapshot, error) { - if err := db.ok(); err != nil { - return nil, err - } - - return db.newSnapshot(), nil -} - -// GetProperty returns value of the given property name. -// -// Property names: -// leveldb.num-files-at-level{n} -// Returns the number of filer at level 'n'. -// leveldb.stats -// Returns statistics of the underlying DB. -// leveldb.sstables -// Returns sstables list for each level. -// leveldb.blockpool -// Returns block pool stats. -func (db *DB) GetProperty(name string) (value string, err error) { - err = db.ok() - if err != nil { - return - } - - const prefix = "leveldb." - if !strings.HasPrefix(name, prefix) { - return "", errors.New("leveldb: GetProperty: unknown property: " + name) - } - p := name[len(prefix):] - - v := db.s.version() - defer v.release() - - switch { - case strings.HasPrefix(p, "num-files-at-level"): - var level uint - var rest string - n, _ := fmt.Scanf("%d%s", &level, &rest) - if n != 1 || level >= kNumLevels { - err = errors.New("leveldb: GetProperty: invalid property: " + name) - } else { - value = fmt.Sprint(v.tLen(int(level))) - } - case p == "stats": - value = "Compactions\n" + - " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + - "-------+------------+---------------+---------------+---------------+---------------\n" - for level, tables := range v.tables { - duration, read, write := db.compStats[level].get() - if len(tables) == 0 && duration == 0 { - continue - } - value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", - level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), - float64(read)/1048576.0, float64(write)/1048576.0) - } - case p == "sstables": - for level, tables := range v.tables { - value += fmt.Sprintf("--- level %d ---\n", level) - for _, t := range tables { - value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax) - } - } - case p == "blockpool": - value = fmt.Sprintf("%v", db.s.tops.bpool) - default: - err = errors.New("leveldb: GetProperty: unknown property: " + name) - } - - return -} - -// SizeOf calculates approximate sizes of the given key ranges. -// The length of the returned sizes are equal with the length of the given -// ranges. The returned sizes measure storage space usage, so if the user -// data compresses by a factor of ten, the returned sizes will be one-tenth -// the size of the corresponding user data size. -// The results may not include the sizes of recently written data. -func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { - if err := db.ok(); err != nil { - return nil, err - } - - v := db.s.version() - defer v.release() - - sizes := make(Sizes, 0, len(ranges)) - for _, r := range ranges { - imin := newIKey(r.Start, kMaxSeq, tSeek) - imax := newIKey(r.Limit, kMaxSeq, tSeek) - start, err := v.offsetOf(imin) - if err != nil { - return nil, err - } - limit, err := v.offsetOf(imax) - if err != nil { - return nil, err - } - var size uint64 - if limit >= start { - size = limit - start - } - sizes = append(sizes, size) - } - - return sizes, nil -} - -// Close closes the DB. This will also releases any outstanding snapshot and -// abort any in-flight compaction. -// -// It is not safe to close a DB until all outstanding iterators are released. -// It is valid to call Close multiple times. Other methods should not be -// called after the DB has been closed. -func (db *DB) Close() error { - if !db.setClosed() { - return ErrClosed - } - - start := time.Now() - db.log("db@close closing") - - // Clear the finalizer. - runtime.SetFinalizer(db, nil) - - // Get compaction error. - var err error - select { - case err = <-db.compErrC: - default: - } - - close(db.closeC) - - // Wait for the close WaitGroup. - db.closeW.Wait() - - // Close journal. - db.writeLockC <- struct{}{} - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - - // Close session. - db.s.close() - db.logf("db@close done T·%v", time.Since(start)) - db.s.release() - - if db.closer != nil { - if err1 := db.closer.Close(); err == nil { - err = err1 - } - } - - // NIL'ing pointers. - db.s = nil - db.mem = nil - db.frozenMem = nil - db.journal = nil - db.journalWriter = nil - db.journalFile = nil - db.frozenJournalFile = nil - db.snapsRoot = snapshotElement{} - db.closer = nil - - return err -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/kit/github.com/syndtr/goleveldb/leveldb/db_compaction.go deleted file mode 100644 index 35a558c..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ /dev/null @@ -1,689 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "sync" - "time" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/memdb" -) - -var ( - errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") -) - -type cStats struct { - sync.Mutex - duration time.Duration - read uint64 - write uint64 -} - -func (p *cStats) add(n *cStatsStaging) { - p.Lock() - p.duration += n.duration - p.read += n.read - p.write += n.write - p.Unlock() -} - -func (p *cStats) get() (duration time.Duration, read, write uint64) { - p.Lock() - defer p.Unlock() - return p.duration, p.read, p.write -} - -type cStatsStaging struct { - start time.Time - duration time.Duration - on bool - read uint64 - write uint64 -} - -func (p *cStatsStaging) startTimer() { - if !p.on { - p.start = time.Now() - p.on = true - } -} - -func (p *cStatsStaging) stopTimer() { - if p.on { - p.duration += time.Since(p.start) - p.on = false - } -} - -type cMem struct { - s *session - level int - rec *sessionRecord -} - -func newCMem(s *session) *cMem { - return &cMem{s: s, rec: new(sessionRecord)} -} - -func (c *cMem) flush(mem *memdb.DB, level int) error { - s := c.s - - // Write memdb to table. - iter := mem.NewIterator(nil) - defer iter.Release() - t, n, err := s.tops.createFrom(iter) - if err != nil { - return err - } - - // Pick level. - if level < 0 { - level = s.version_NB().pickLevel(t.imin.ukey(), t.imax.ukey()) - } - c.rec.addTableFile(level, t) - - s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax) - - c.level = level - return nil -} - -func (c *cMem) reset() { - c.rec = new(sessionRecord) -} - -func (c *cMem) commit(journal, seq uint64) error { - c.rec.setJournalNum(journal) - c.rec.setSeq(seq) - - // Commit changes. - return c.s.commit(c.rec) -} - -func (db *DB) compactionError() { - var err error -noerr: - for { - select { - case err = <-db.compErrSetC: - if err != nil { - goto haserr - } - case _, _ = <-db.closeC: - return - } - } -haserr: - for { - select { - case db.compErrC <- err: - case err = <-db.compErrSetC: - if err == nil { - goto noerr - } - case _, _ = <-db.closeC: - return - } - } -} - -type compactionTransactCounter int - -func (cnt *compactionTransactCounter) incr() { - *cnt++ -} - -func (db *DB) compactionTransact(name string, exec func(cnt *compactionTransactCounter) error, rollback func() error) { - defer func() { - if x := recover(); x != nil { - if x == errCompactionTransactExiting && rollback != nil { - if err := rollback(); err != nil { - db.logf("%s rollback error %q", name, err) - } - } - panic(x) - } - }() - - const ( - backoffMin = 1 * time.Second - backoffMax = 8 * time.Second - backoffMul = 2 * time.Second - ) - backoff := backoffMin - backoffT := time.NewTimer(backoff) - lastCnt := compactionTransactCounter(0) - for n := 0; ; n++ { - // Check wether the DB is closed. - if db.isClosed() { - db.logf("%s exiting", name) - db.compactionExitTransact() - } else if n > 0 { - db.logf("%s retrying N·%d", name, n) - } - - // Execute. - cnt := compactionTransactCounter(0) - err := exec(&cnt) - - // Set compaction error status. - select { - case db.compErrSetC <- err: - case _, _ = <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - if err == nil { - return - } - db.logf("%s error I·%d %q", name, cnt, err) - - // Reset backoff duration if counter is advancing. - if cnt > lastCnt { - backoff = backoffMin - lastCnt = cnt - } - - // Backoff. - backoffT.Reset(backoff) - if backoff < backoffMax { - backoff *= backoffMul - if backoff > backoffMax { - backoff = backoffMax - } - } - select { - case <-backoffT.C: - case _, _ = <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - } -} - -func (db *DB) compactionExitTransact() { - panic(errCompactionTransactExiting) -} - -func (db *DB) memCompaction() { - mem := db.getFrozenMem() - if mem == nil { - return - } - defer mem.decref() - - c := newCMem(db.s) - stats := new(cStatsStaging) - - db.logf("mem@flush N·%d S·%s", mem.db.Len(), shortenb(mem.db.Size())) - - // Don't compact empty memdb. - if mem.db.Len() == 0 { - db.logf("mem@flush skipping") - // drop frozen mem - db.dropFrozenMem() - return - } - - // Pause table compaction. - ch := make(chan struct{}) - select { - case db.tcompPauseC <- (chan<- struct{})(ch): - case _, _ = <-db.closeC: - return - } - - db.compactionTransact("mem@flush", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - defer stats.stopTimer() - return c.flush(mem.db, -1) - }, func() error { - for _, r := range c.rec.addedTables { - db.logf("mem@flush rollback @%d", r.num) - f := db.s.getTableFile(r.num) - if err := f.Remove(); err != nil { - return err - } - } - return nil - }) - - db.compactionTransact("mem@commit", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - defer stats.stopTimer() - return c.commit(db.journalFile.Num(), db.frozenSeq) - }, nil) - - db.logf("mem@flush commited F·%d T·%v", len(c.rec.addedTables), stats.duration) - - for _, r := range c.rec.addedTables { - stats.write += r.size - } - db.compStats[c.level].add(stats) - - // Drop frozen mem. - db.dropFrozenMem() - - // Resume table compaction. - select { - case <-ch: - case _, _ = <-db.closeC: - return - } - - // Trigger table compaction. - db.compTrigger(db.mcompTriggerC) -} - -func (db *DB) tableCompaction(c *compaction, noTrivial bool) { - rec := new(sessionRecord) - rec.addCompactionPointer(c.level, c.imax) - - if !noTrivial && c.trivial() { - t := c.tables[0][0] - db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1) - rec.deleteTable(c.level, t.file.Num()) - rec.addTableFile(c.level+1, t) - db.compactionTransact("table@move", func(cnt *compactionTransactCounter) (err error) { - return db.s.commit(rec) - }, nil) - return - } - - var stats [2]cStatsStaging - for i, tables := range c.tables { - for _, t := range tables { - stats[i].read += t.size - // Insert deleted tables into record - rec.deleteTable(c.level+i, t.file.Num()) - } - } - sourceSize := int(stats[0].read + stats[1].read) - minSeq := db.minSeq() - db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq) - - var snapUkey []byte - var snapHasUkey bool - var snapSeq uint64 - var snapIter int - var snapDropCnt int - var dropCnt int - db.compactionTransact("table@build", func(cnt *compactionTransactCounter) (err error) { - ukey := append([]byte{}, snapUkey...) - hasUkey := snapHasUkey - lseq := snapSeq - dropCnt = snapDropCnt - snapSched := snapIter == 0 - - var tw *tWriter - finish := func() error { - t, err := tw.finish() - if err != nil { - return err - } - rec.addTableFile(c.level+1, t) - stats[1].write += t.size - db.logf("table@build created L%d@%d N·%d S·%s %q:%q", c.level+1, t.file.Num(), tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) - return nil - } - - defer func() { - stats[1].stopTimer() - if tw != nil { - tw.drop() - tw = nil - } - }() - - stats[1].startTimer() - iter := c.newIterator() - defer iter.Release() - for i := 0; iter.Next(); i++ { - // Incr transact counter. - cnt.incr() - - // Skip until last state. - if i < snapIter { - continue - } - - ikey := iKey(iter.Key()) - - if c.shouldStopBefore(ikey) && tw != nil { - err = finish() - if err != nil { - return - } - snapSched = true - tw = nil - } - - // Scheduled for snapshot, snapshot will used to retry compaction - // if error occured. - if snapSched { - snapUkey = append(snapUkey[:0], ukey...) - snapHasUkey = hasUkey - snapSeq = lseq - snapIter = i - snapDropCnt = dropCnt - snapSched = false - } - - if seq, vt, ok := ikey.parseNum(); !ok { - // Don't drop error keys - ukey = ukey[:0] - hasUkey = false - lseq = kMaxSeq - } else { - if !hasUkey || db.s.icmp.uCompare(ikey.ukey(), ukey) != 0 { - // First occurrence of this user key - ukey = append(ukey[:0], ikey.ukey()...) - hasUkey = true - lseq = kMaxSeq - } - - drop := false - if lseq <= minSeq { - // Dropped because newer entry for same user key exist - drop = true // (A) - } else if vt == tDel && seq <= minSeq && c.baseLevelForKey(ukey) { - // For this user key: - // (1) there is no data in higher levels - // (2) data in lower levels will have larger seq numbers - // (3) data in layers that are being compacted here and have - // smaller seq numbers will be dropped in the next - // few iterations of this loop (by rule (A) above). - // Therefore this deletion marker is obsolete and can be dropped. - drop = true - } - - lseq = seq - if drop { - dropCnt++ - continue - } - } - - // Create new table if not already - if tw == nil { - // Check for pause event. - select { - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - case _, _ = <-db.closeC: - db.compactionExitTransact() - default: - } - - // Create new table. - tw, err = db.s.tops.create() - if err != nil { - return - } - } - - // Write key/value into table - err = tw.append(ikey, iter.Value()) - if err != nil { - return - } - - // Finish table if it is big enough - if tw.tw.BytesLen() >= kMaxTableSize { - err = finish() - if err != nil { - return - } - snapSched = true - tw = nil - } - } - - err = iter.Error() - if err != nil { - return - } - - // Finish last table - if tw != nil && !tw.empty() { - err = finish() - if err != nil { - return - } - tw = nil - } - return - }, func() error { - for _, r := range rec.addedTables { - db.logf("table@build rollback @%d", r.num) - f := db.s.getTableFile(r.num) - if err := f.Remove(); err != nil { - return err - } - } - return nil - }) - - // Commit changes - db.compactionTransact("table@commit", func(cnt *compactionTransactCounter) (err error) { - stats[1].startTimer() - defer stats[1].stopTimer() - return db.s.commit(rec) - }, nil) - - resultSize := int(stats[1].write) - db.logf("table@compaction commited F%s S%s D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), dropCnt, stats[1].duration) - - // Save compaction stats - for i := range stats { - db.compStats[c.level+1].add(&stats[i]) - } -} - -func (db *DB) tableRangeCompaction(level int, umin, umax []byte) { - db.logf("table@compaction range L%d %q:%q", level, umin, umax) - - if level >= 0 { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) - } - } else { - v := db.s.version_NB() - - m := 1 - for i, t := range v.tables[1:] { - if t.overlaps(db.s.icmp, umin, umax, false) { - m = i + 1 - } - } - - for level := 0; level < m; level++ { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) - } - } - } -} - -func (db *DB) tableAutoCompaction() { - if c := db.s.pickCompaction(); c != nil { - db.tableCompaction(c, false) - } -} - -func (db *DB) tableNeedCompaction() bool { - return db.s.version_NB().needCompaction() -} - -func (db *DB) pauseCompaction(ch chan<- struct{}) { - select { - case ch <- struct{}{}: - case _, _ = <-db.closeC: - db.compactionExitTransact() - } -} - -type cCmd interface { - ack(err error) -} - -type cIdle struct { - ackC chan<- error -} - -func (r cIdle) ack(err error) { - r.ackC <- err -} - -type cRange struct { - level int - min, max []byte - ackC chan<- error -} - -func (r cRange) ack(err error) { - defer func() { - recover() - }() - if r.ackC != nil { - r.ackC <- err - } -} - -func (db *DB) compSendIdle(compC chan<- cCmd) error { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cIdle{ch}: - case err := <-db.compErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - // Wait cmd. - return <-ch -} - -func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cRange{level, min, max, ch}: - case err := <-db.compErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-db.compErrC: - case err = <-ch: - } - return err -} - -func (db *DB) compTrigger(compTriggerC chan struct{}) { - select { - case compTriggerC <- struct{}{}: - default: - } -} - -func (db *DB) mCompaction() { - var x cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - select { - case x = <-db.mcompCmdC: - db.memCompaction() - x.ack(nil) - x = nil - case <-db.mcompTriggerC: - db.memCompaction() - case _, _ = <-db.closeC: - return - } - } -} - -func (db *DB) tCompaction() { - var x cCmd - var ackQ []cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - for i := range ackQ { - ackQ[i].ack(ErrClosed) - ackQ[i] = nil - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - if db.tableNeedCompaction() { - select { - case x = <-db.tcompCmdC: - case <-db.tcompTriggerC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case _, _ = <-db.closeC: - return - default: - } - } else { - for i := range ackQ { - ackQ[i].ack(nil) - ackQ[i] = nil - } - ackQ = ackQ[:0] - select { - case x = <-db.tcompCmdC: - case <-db.tcompTriggerC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case _, _ = <-db.closeC: - return - } - } - if x != nil { - switch cmd := x.(type) { - case cIdle: - ackQ = append(ackQ, x) - case cRange: - db.tableRangeCompaction(cmd.level, cmd.min, cmd.max) - x.ack(nil) - } - x = nil - } - db.tableAutoCompaction() - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/db_iter.go b/kit/github.com/syndtr/goleveldb/leveldb/db_iter.go deleted file mode 100644 index 91edc65..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/db_iter.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "runtime" - "sync" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key") -) - -type memdbReleaser struct { - once sync.Once - m *memDB -} - -func (mr *memdbReleaser) Release() { - mr.once.Do(func() { - mr.m.decref() - }) -} - -func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - em, fm := db.getMems() - v := db.s.version() - - ti := v.getIterators(slice, ro) - n := len(ti) + 2 - i := make([]iterator.Iterator, 0, n) - emi := em.db.NewIterator(slice) - emi.SetReleaser(&memdbReleaser{m: em}) - i = append(i, emi) - if fm != nil { - fmi := fm.db.NewIterator(slice) - fmi.SetReleaser(&memdbReleaser{m: fm}) - i = append(i, fmi) - } - i = append(i, ti...) - strict := db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator) - mi := iterator.NewMergedIterator(i, db.s.icmp, strict) - mi.SetReleaser(&versionReleaser{v: v}) - return mi -} - -func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { - var islice *util.Range - if slice != nil { - islice = &util.Range{} - if slice.Start != nil { - islice.Start = newIKey(slice.Start, kMaxSeq, tSeek) - } - if slice.Limit != nil { - islice.Limit = newIKey(slice.Limit, kMaxSeq, tSeek) - } - } - rawIter := db.newRawIterator(islice, ro) - iter := &dbIter{ - icmp: db.s.icmp, - iter: rawIter, - seq: seq, - strict: db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator), - key: make([]byte, 0), - value: make([]byte, 0), - } - runtime.SetFinalizer(iter, (*dbIter).Release) - return iter -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -// dbIter represent an interator states over a database session. -type dbIter struct { - icmp *iComparer - iter iterator.Iterator - seq uint64 - strict bool - - dir dir - key []byte - value []byte - err error - releaser util.Releaser -} - -func (i *dbIter) setErr(err error) { - i.err = err - i.key = nil - i.value = nil -} - -func (i *dbIter) iterErr() { - if err := i.iter.Error(); err != nil { - i.setErr(err) - } -} - -func (i *dbIter) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *dbIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.First() { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.Last() { - return i.prev() - } - i.dir = dirSOI - i.iterErr() - return false -} - -func (i *dbIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ikey := newIKey(key, i.seq, tSeek) - if i.iter.Seek(ikey) { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) next() bool { - for { - ukey, seq, t, ok := parseIkey(i.iter.Key()) - if ok { - if seq <= i.seq { - switch t { - case tDel: - // Skip deleted key. - i.key = append(i.key[:0], ukey...) - i.dir = dirForward - case tVal: - if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - i.dir = dirForward - return true - } - } - } - } else if i.strict { - i.setErr(errInvalidIkey) - break - } - if !i.iter.Next() { - i.dir = dirEOI - i.iterErr() - break - } - } - return false -} - -func (i *dbIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { - i.dir = dirEOI - i.iterErr() - return false - } - return i.next() -} - -func (i *dbIter) prev() bool { - i.dir = dirBackward - del := true - if i.iter.Valid() { - for { - ukey, seq, t, ok := parseIkey(i.iter.Key()) - if ok { - if seq <= i.seq { - if !del && i.icmp.uCompare(ukey, i.key) < 0 { - return true - } - del = (t == tDel) - if !del { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - } - } - } else if i.strict { - i.setErr(errInvalidIkey) - return false - } - if !i.iter.Prev() { - break - } - } - } - if del { - i.dir = dirSOI - i.iterErr() - return false - } - return true -} - -func (i *dbIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - for i.iter.Prev() { - ukey, _, _, ok := parseIkey(i.iter.Key()) - if ok { - if i.icmp.uCompare(ukey, i.key) < 0 { - goto cont - } - } else if i.strict { - i.setErr(errInvalidIkey) - return false - } - } - i.dir = dirSOI - i.iterErr() - return false - } - -cont: - return i.prev() -} - -func (i *dbIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *dbIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *dbIter) Release() { - if i.dir != dirReleased { - // Clear the finalizer. - runtime.SetFinalizer(i, nil) - - if i.releaser != nil { - i.releaser.Release() - } - - i.dir = dirReleased - i.key = nil - i.value = nil - i.iter.Release() - i.iter = nil - } -} - -func (i *dbIter) SetReleaser(releaser util.Releaser) { - if i.dir != dirReleased { - i.releaser = releaser - } -} - -func (i *dbIter) Error() error { - return i.err -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/kit/github.com/syndtr/goleveldb/leveldb/db_snapshot.go deleted file mode 100644 index 095c3bc..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/db_snapshot.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "runtime" - "sync" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -type snapshotElement struct { - seq uint64 - ref int - // Next and previous pointers in the doubly-linked list of elements. - next, prev *snapshotElement -} - -// Initialize the snapshot. -func (db *DB) initSnapshot() { - db.snapsRoot.next = &db.snapsRoot - db.snapsRoot.prev = &db.snapsRoot -} - -// Acquires a snapshot, based on latest sequence. -func (db *DB) acquireSnapshot() *snapshotElement { - db.snapsMu.Lock() - seq := db.getSeq() - elem := db.snapsRoot.prev - if elem == &db.snapsRoot || elem.seq != seq { - at := db.snapsRoot.prev - next := at.next - elem = &snapshotElement{ - seq: seq, - prev: at, - next: next, - } - at.next = elem - next.prev = elem - } - elem.ref++ - db.snapsMu.Unlock() - return elem -} - -// Releases given snapshot element. -func (db *DB) releaseSnapshot(elem *snapshotElement) { - if !db.isClosed() { - db.snapsMu.Lock() - elem.ref-- - if elem.ref == 0 { - elem.prev.next = elem.next - elem.next.prev = elem.prev - elem.next = nil - elem.prev = nil - } else if elem.ref < 0 { - panic("leveldb: Snapshot: negative element reference") - } - db.snapsMu.Unlock() - } -} - -// Gets minimum sequence that not being snapshoted. -func (db *DB) minSeq() uint64 { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - elem := db.snapsRoot.prev - if elem != &db.snapsRoot { - return elem.seq - } - return db.getSeq() -} - -// Snapshot is a DB snapshot. -type Snapshot struct { - db *DB - elem *snapshotElement - mu sync.Mutex - released bool -} - -// Creates new snapshot object. -func (db *DB) newSnapshot() *Snapshot { - snap := &Snapshot{ - db: db, - elem: db.acquireSnapshot(), - } - runtime.SetFinalizer(snap, (*Snapshot).Release) - return snap -} - -// Get gets the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.Lock() - defer snap.mu.Unlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.get(key, snap.elem.seq, ro) -} - -// NewIterator returns an iterator for the snapshot of the uderlying DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// Releasing the snapshot doesn't mean releasing the iterator too, the -// iterator would be still valid until released. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := snap.db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - snap.mu.Lock() - defer snap.mu.Unlock() - if snap.released { - return iterator.NewEmptyIterator(ErrSnapshotReleased) - } - // Since iterator already hold version ref, it doesn't need to - // hold snapshot ref. - return snap.db.newIterator(snap.elem.seq, slice, ro) -} - -// Release releases the snapshot. This will not release any returned -// iterators, the iterators would still be valid until released or the -// underlying DB is closed. -// -// Other methods should not be called after the snapshot has been released. -func (snap *Snapshot) Release() { - snap.mu.Lock() - defer snap.mu.Unlock() - - if !snap.released { - // Clear the finalizer. - runtime.SetFinalizer(snap, nil) - - snap.released = true - snap.db.releaseSnapshot(snap.elem) - snap.db = nil - snap.elem = nil - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/db_state.go b/kit/github.com/syndtr/goleveldb/leveldb/db_state.go deleted file mode 100644 index e7fc89f..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/db_state.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -type memDB struct { - pool *util.Pool - db *memdb.DB - ref int32 -} - -func (m *memDB) incref() { - atomic.AddInt32(&m.ref, 1) -} - -func (m *memDB) decref() { - if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { - m.pool.Put(m) - } else if ref < 0 { - panic("negative memdb ref") - } -} - -// Get latest sequence number. -func (db *DB) getSeq() uint64 { - return atomic.LoadUint64(&db.seq) -} - -// Atomically adds delta to seq. -func (db *DB) addSeq(delta uint64) { - atomic.AddUint64(&db.seq, delta) -} - -// Create new memdb and froze the old one; need external synchronization. -// newMem only called synchronously by the writer. -func (db *DB) newMem(n int) (mem *memDB, err error) { - num := db.s.allocFileNum() - file := db.s.getJournalFile(num) - w, err := file.Create() - if err != nil { - db.s.reuseFileNum(num) - return - } - - db.memMu.Lock() - defer db.memMu.Unlock() - - if db.frozenMem != nil { - panic("still has frozen mem") - } - - if db.journal == nil { - db.journal = journal.NewWriter(w) - } else { - db.journal.Reset(w) - db.journalWriter.Close() - db.frozenJournalFile = db.journalFile - } - db.journalWriter = w - db.journalFile = file - db.frozenMem = db.mem - mem, ok := db.memPool.Get().(*memDB) - if ok && mem.db.Capacity() >= n { - mem.db.Reset() - mem.incref() - } else { - mem = &memDB{ - pool: db.memPool, - db: memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)), - ref: 1, - } - } - mem.incref() - db.mem = mem - // The seq only incremented by the writer. And whoever called newMem - // should hold write lock, so no need additional synchronization here. - db.frozenSeq = db.seq - return -} - -// Get all memdbs. -func (db *DB) getMems() (e, f *memDB) { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem == nil { - panic("nil effective mem") - } - db.mem.incref() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.mem, db.frozenMem -} - -// Get frozen memdb. -func (db *DB) getEffectiveMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem == nil { - panic("nil effective mem") - } - db.mem.incref() - return db.mem -} - -// Check whether we has frozen memdb. -func (db *DB) hasFrozenMem() bool { - db.memMu.RLock() - defer db.memMu.RUnlock() - return db.frozenMem != nil -} - -// Get frozen memdb. -func (db *DB) getFrozenMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.frozenMem -} - -// Drop frozen memdb; assume that frozen memdb isn't nil. -func (db *DB) dropFrozenMem() { - db.memMu.Lock() - if err := db.frozenJournalFile.Remove(); err != nil { - db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err) - } else { - db.logf("journal@remove removed @%d", db.frozenJournalFile.Num()) - } - db.frozenJournalFile = nil - db.frozenMem.decref() - db.frozenMem = nil - db.memMu.Unlock() -} - -// Set closed flag; return true if not already closed. -func (db *DB) setClosed() bool { - return atomic.CompareAndSwapUint32(&db.closed, 0, 1) -} - -// Check whether DB was closed. -func (db *DB) isClosed() bool { - return atomic.LoadUint32(&db.closed) != 0 -} - -// Check read ok status. -func (db *DB) ok() error { - if db.isClosed() { - return ErrClosed - } - return nil -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/db_test.go b/kit/github.com/syndtr/goleveldb/leveldb/db_test.go deleted file mode 100644 index b6e1cb1..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/db_test.go +++ /dev/null @@ -1,1886 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -func tkey(i int) []byte { - return []byte(fmt.Sprintf("%016d", i)) -} - -func tval(seed, n int) []byte { - r := rand.New(rand.NewSource(int64(seed))) - return randomString(r, n) -} - -type dbHarness struct { - t *testing.T - - stor *testStorage - db *DB - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions -} - -func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness { - h := new(dbHarness) - h.init(t, o) - return h -} - -func newDbHarness(t *testing.T) *dbHarness { - return newDbHarnessWopt(t, &opt.Options{}) -} - -func (h *dbHarness) init(t *testing.T, o *opt.Options) { - h.t = t - h.stor = newTestStorage(t) - h.o = o - h.ro = nil - h.wo = nil - - if err := h.openDB0(); err != nil { - // So that it will come after fatal message. - defer h.stor.Close() - h.t.Fatal("Open (init): got error: ", err) - } -} - -func (h *dbHarness) openDB0() (err error) { - h.t.Log("opening DB") - h.db, err = Open(h.stor, h.o) - return -} - -func (h *dbHarness) openDB() { - if err := h.openDB0(); err != nil { - h.t.Fatal("Open: got error: ", err) - } -} - -func (h *dbHarness) closeDB0() error { - h.t.Log("closing DB") - return h.db.Close() -} - -func (h *dbHarness) closeDB() { - if err := h.closeDB0(); err != nil { - h.t.Error("Close: got error: ", err) - } - h.stor.CloseCheck() - runtime.GC() -} - -func (h *dbHarness) reopenDB() { - h.closeDB() - h.openDB() -} - -func (h *dbHarness) close() { - h.closeDB0() - h.db = nil - h.stor.Close() - h.stor = nil - runtime.GC() -} - -func (h *dbHarness) openAssert(want bool) { - db, err := Open(h.stor, h.o) - if err != nil { - if want { - h.t.Error("Open: assert: got error: ", err) - } else { - h.t.Log("Open: assert: got error (expected): ", err) - } - } else { - if !want { - h.t.Error("Open: assert: expect error") - } - db.Close() - } -} - -func (h *dbHarness) write(batch *Batch) { - if err := h.db.Write(batch, h.wo); err != nil { - h.t.Error("Write: got error: ", err) - } -} - -func (h *dbHarness) put(key, value string) { - if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil { - h.t.Error("Put: got error: ", err) - } -} - -func (h *dbHarness) putMulti(n int, low, hi string) { - for i := 0; i < n; i++ { - h.put(low, "begin") - h.put(hi, "end") - h.compactMem() - } -} - -func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) { - t := h.t - db := h.db - - var res uint64 - v := db.s.version() - for i, tt := range v.tables[1 : len(v.tables)-1] { - level := i + 1 - next := v.tables[level+1] - for _, t := range tt { - r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) - sum := r.size() - if sum > res { - res = sum - } - } - } - v.release() - - if res > want { - t.Errorf("next level overlapping bytes is more than %d, got=%d", want, res) - } -} - -func (h *dbHarness) delete(key string) { - t := h.t - db := h.db - - err := db.Delete([]byte(key), h.wo) - if err != nil { - t.Error("Delete: got error: ", err) - } -} - -func (h *dbHarness) assertNumKeys(want int) { - iter := h.db.NewIterator(nil, h.ro) - defer iter.Release() - got := 0 - for iter.Next() { - got++ - } - if err := iter.Error(); err != nil { - h.t.Error("assertNumKeys: ", err) - } - if want != got { - h.t.Errorf("assertNumKeys: want=%d got=%d", want, got) - } -} - -func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) { - t := h.t - v, err := db.Get([]byte(key), h.ro) - switch err { - case ErrNotFound: - if expectFound { - t.Errorf("Get: key '%s' not found, want found", key) - } - case nil: - found = true - if !expectFound { - t.Errorf("Get: key '%s' found, want not found", key) - } - default: - t.Error("Get: got error: ", err) - } - return -} - -func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) { - return h.getr(h.db, key, expectFound) -} - -func (h *dbHarness) getValr(db Reader, key, value string) { - t := h.t - found, r := h.getr(db, key, true) - if !found { - return - } - rval := string(r) - if rval != value { - t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value) - } -} - -func (h *dbHarness) getVal(key, value string) { - h.getValr(h.db, key, value) -} - -func (h *dbHarness) allEntriesFor(key, want string) { - t := h.t - db := h.db - s := db.s - - ikey := newIKey([]byte(key), kMaxSeq, tVal) - iter := db.newRawIterator(nil, nil) - if !iter.Seek(ikey) && iter.Error() != nil { - t.Error("AllEntries: error during seek, err: ", iter.Error()) - return - } - res := "[ " - first := true - for iter.Valid() { - rkey := iKey(iter.Key()) - if _, t, ok := rkey.parseNum(); ok { - if s.icmp.uCompare(ikey.ukey(), rkey.ukey()) != 0 { - break - } - if !first { - res += ", " - } - first = false - switch t { - case tVal: - res += string(iter.Value()) - case tDel: - res += "DEL" - } - } else { - if !first { - res += ", " - } - first = false - res += "CORRUPTED" - } - iter.Next() - } - if !first { - res += " " - } - res += "]" - if res != want { - t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want) - } -} - -// Return a string that contains all key,value pairs in order, -// formatted like "(k1->v1)(k2->v2)". -func (h *dbHarness) getKeyVal(want string) { - t := h.t - db := h.db - - s, err := db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - res := "" - iter := s.NewIterator(nil, nil) - for iter.Next() { - res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value())) - } - iter.Release() - - if res != want { - t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want) - } - s.Release() -} - -func (h *dbHarness) waitCompaction() { - t := h.t - db := h.db - if err := db.compSendIdle(db.tcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) waitMemCompaction() { - t := h.t - db := h.db - - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) compactMem() { - t := h.t - db := h.db - - db.writeLockC <- struct{}{} - defer func() { - <-db.writeLockC - }() - - if _, err := db.rotateMem(0); err != nil { - t.Error("compaction error: ", err) - } - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } - - if h.totalTables() == 0 { - t.Error("zero tables after mem compaction") - } -} - -func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) { - t := h.t - db := h.db - - var _min, _max []byte - if min != "" { - _min = []byte(min) - } - if max != "" { - _max = []byte(max) - } - - if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil { - if wanterr { - t.Log("CompactRangeAt: got error (expected): ", err) - } else { - t.Error("CompactRangeAt: got error: ", err) - } - } else if wanterr { - t.Error("CompactRangeAt: expect error") - } -} - -func (h *dbHarness) compactRangeAt(level int, min, max string) { - h.compactRangeAtErr(level, min, max, false) -} - -func (h *dbHarness) compactRange(min, max string) { - t := h.t - db := h.db - - var r util.Range - if min != "" { - r.Start = []byte(min) - } - if max != "" { - r.Limit = []byte(max) - } - if err := db.CompactRange(r); err != nil { - t.Error("CompactRange: got error: ", err) - } -} - -func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) { - t := h.t - db := h.db - - s, err := db.SizeOf([]util.Range{ - {[]byte(start), []byte(limit)}, - }) - if err != nil { - t.Error("SizeOf: got error: ", err) - } - if s.Sum() < low || s.Sum() > hi { - t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d", - shorten(start), shorten(limit), low, hi, s.Sum()) - } -} - -func (h *dbHarness) getSnapshot() (s *Snapshot) { - s, err := h.db.GetSnapshot() - if err != nil { - h.t.Fatal("GetSnapshot: got error: ", err) - } - return -} -func (h *dbHarness) tablesPerLevel(want string) { - res := "" - nz := 0 - v := h.db.s.version() - for level, tt := range v.tables { - if level > 0 { - res += "," - } - res += fmt.Sprint(len(tt)) - if len(tt) > 0 { - nz = len(res) - } - } - v.release() - res = res[:nz] - if res != want { - h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) - } -} - -func (h *dbHarness) totalTables() (n int) { - v := h.db.s.version() - for _, tt := range v.tables { - n += len(tt) - } - v.release() - return -} - -type keyValue interface { - Key() []byte - Value() []byte -} - -func testKeyVal(t *testing.T, kv keyValue, want string) { - res := string(kv.Key()) + "->" + string(kv.Value()) - if res != want { - t.Errorf("invalid key/value, want=%q, got=%q", want, res) - } -} - -func numKey(num int) string { - return fmt.Sprintf("key%06d", num) -} - -var _bloom_filter = filter.NewBloomFilter(10) - -func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { - for i := 0; i < 4; i++ { - func() { - switch i { - case 0: - case 1: - if o == nil { - o = &opt.Options{Filter: _bloom_filter} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Filter = _bloom_filter - } - case 2: - if o == nil { - o = &opt.Options{Compression: opt.NoCompression} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Compression = opt.NoCompression - } - } - h := newDbHarnessWopt(t, o) - defer h.close() - switch i { - case 3: - h.reopenDB() - } - f(h) - }() - } -} - -func trun(t *testing.T, f func(h *dbHarness)) { - truno(t, nil, f) -} - -func testAligned(t *testing.T, name string, offset uintptr) { - if offset%8 != 0 { - t.Errorf("field %s offset is not 64-bit aligned", name) - } -} - -func Test_FieldsAligned(t *testing.T) { - p1 := new(DB) - testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq)) - p2 := new(session) - testAligned(t, "session.stFileNum", unsafe.Offsetof(p2.stFileNum)) - testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum)) - testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum)) - testAligned(t, "session.stSeq", unsafe.Offsetof(p2.stSeq)) -} - -func TestDb_Locking(t *testing.T) { - h := newDbHarness(t) - defer h.stor.Close() - h.openAssert(false) - h.closeDB() - h.openAssert(true) -} - -func TestDb_Empty(t *testing.T) { - trun(t, func(h *dbHarness) { - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDb_ReadWrite(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("bar", "v2") - h.put("foo", "v3") - h.getVal("foo", "v3") - h.getVal("bar", "v2") - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "v2") - }) -} - -func TestDb_PutDeleteGet(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.getVal("foo", "v2") - h.delete("foo") - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDb_EmptyBatch(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.get("foo", false) - err := h.db.Write(new(Batch), h.wo) - if err != nil { - t.Error("writing empty batch yield error: ", err) - } - h.get("foo", false) -} - -func TestDb_GetFromFrozen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100}) - defer h.close() - - h.put("foo", "v1") - h.getVal("foo", "v1") - - h.stor.DelaySync(storage.TypeTable) // Block sync calls - h.put("k1", strings.Repeat("x", 100000)) // Fill memtable - h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction - for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { - time.Sleep(10 * time.Microsecond) - } - if h.db.getFrozenMem() == nil { - h.stor.ReleaseSync(storage.TypeTable) - t.Fatal("No frozen mem") - } - h.getVal("foo", "v1") - h.stor.ReleaseSync(storage.TypeTable) // Release sync calls - - h.reopenDB() - h.getVal("foo", "v1") - h.get("k1", true) - h.get("k2", true) -} - -func TestDb_GetFromTable(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.getVal("foo", "v1") - }) -} - -func TestDb_GetSnapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - bar := strings.Repeat("b", 200) - h.put("foo", "v1") - h.put(bar, "v1") - - snap, err := h.db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.put("foo", "v2") - h.put(bar, "v2") - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - h.compactMem() - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - snap.Release() - - h.reopenDB() - h.getVal("foo", "v2") - h.getVal(bar, "v2") - }) -} - -func TestDb_GetLevel0Ordering(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 4; i++ { - h.put("bar", fmt.Sprintf("b%d", i)) - h.put("foo", fmt.Sprintf("v%d", i)) - h.compactMem() - } - h.getVal("foo", "v3") - h.getVal("bar", "b3") - - v := h.db.s.version() - t0len := v.tLen(0) - v.release() - if t0len < 2 { - t.Errorf("level-0 tables is less than 2, got %d", t0len) - } - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "b3") - }) -} - -func TestDb_GetOrderedByLevels(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.compactRange("a", "z") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.compactMem() - h.getVal("foo", "v2") - }) -} - -func TestDb_GetPicksCorrectFile(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange to have multiple files in a non-level-0 level. - h.put("a", "va") - h.compactMem() - h.compactRange("a", "b") - h.put("x", "vx") - h.compactMem() - h.compactRange("x", "y") - h.put("f", "vf") - h.compactMem() - h.compactRange("f", "g") - - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - - h.compactRange("", "") - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - }) -} - -func TestDb_GetEncountersEmptyLevel(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange for the following to happen: - // * sstable A in level 0 - // * nothing in level 1 - // * sstable B in level 2 - // Then do enough Get() calls to arrange for an automatic compaction - // of sstable A. A bug would cause the compaction to be marked as - // occuring at level 1 (instead of the correct level 0). - - // Step 1: First place sstables in levels 0 and 2 - for i := 0; ; i++ { - if i >= 100 { - t.Fatal("could not fill levels-0 and level-2") - } - v := h.db.s.version() - if v.tLen(0) > 0 && v.tLen(2) > 0 { - v.release() - break - } - v.release() - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - - h.getVal("a", "begin") - h.getVal("z", "end") - } - - // Step 2: clear level 1 if necessary. - h.compactRangeAt(1, "", "") - h.tablesPerLevel("1,0,1") - - h.getVal("a", "begin") - h.getVal("z", "end") - - // Step 3: read a bunch of times - for i := 0; i < 200; i++ { - h.get("missing", false) - } - - // Step 4: Wait for compaction to finish - h.waitCompaction() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - h.getVal("a", "begin") - h.getVal("z", "end") - }) -} - -func TestDb_IterMultiWithDelete(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("a", "va") - h.put("b", "vb") - h.put("c", "vc") - h.delete("b") - h.get("b", false) - - iter := h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - - h.compactMem() - - iter = h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - }) -} - -func TestDb_IteratorPinsRef(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "hello") - - // Get iterator that will yield the current contents of the DB. - iter := h.db.NewIterator(nil, nil) - - // Write to force compactions - h.put("foo", "newvalue1") - for i := 0; i < 100; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - h.put("foo", "newvalue2") - - iter.First() - testKeyVal(t, iter, "foo->hello") - if iter.Next() { - t.Errorf("expect eof") - } - iter.Release() -} - -func TestDb_Recover(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("baz", "v5") - - h.reopenDB() - h.getVal("foo", "v1") - - h.getVal("foo", "v1") - h.getVal("baz", "v5") - h.put("bar", "v2") - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - h.put("foo", "v4") - h.getVal("foo", "v4") - h.getVal("bar", "v2") - h.getVal("baz", "v5") - }) -} - -func TestDb_RecoverWithEmptyJournal(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("foo", "v2") - - h.reopenDB() - h.reopenDB() - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - }) -} - -func TestDb_RecoverDuringMemtableCompaction(t *testing.T) { - truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) { - - h.stor.DelaySync(storage.TypeTable) - h.put("big1", strings.Repeat("x", 10000000)) - h.put("big2", strings.Repeat("y", 1000)) - h.put("bar", "v2") - h.stor.ReleaseSync(storage.TypeTable) - - h.reopenDB() - h.getVal("bar", "v2") - h.getVal("big1", strings.Repeat("x", 10000000)) - h.getVal("big2", strings.Repeat("y", 1000)) - }) -} - -func TestDb_MinorCompactionsHappen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000}) - defer h.close() - - n := 500 - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - for i := 0; i < n; i++ { - h.put(key(i), key(i)+strings.Repeat("v", 1000)) - } - - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } - - h.reopenDB() - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } -} - -func TestDb_RecoverWithLargeJournal(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("big1", strings.Repeat("1", 200000)) - h.put("big2", strings.Repeat("2", 200000)) - h.put("small3", strings.Repeat("3", 10)) - h.put("small4", strings.Repeat("4", 10)) - h.tablesPerLevel("") - - // Make sure that if we re-open with a small write buffer size that - // we flush table files in the middle of a large journal file. - h.o.WriteBuffer = 100000 - h.reopenDB() - h.getVal("big1", strings.Repeat("1", 200000)) - h.getVal("big2", strings.Repeat("2", 200000)) - h.getVal("small3", strings.Repeat("3", 10)) - h.getVal("small4", strings.Repeat("4", 10)) - v := h.db.s.version() - if v.tLen(0) <= 1 { - t.Errorf("tables-0 less than one") - } - v.release() -} - -func TestDb_CompactionsGenerateMultipleFiles(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 10000000, - Compression: opt.NoCompression, - }) - defer h.close() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - n := 80 - - // Write 8MB (80 values, each 100K) - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - - // Reopening moves updates to level-0 - h.reopenDB() - h.compactRangeAt(0, "", "") - - v = h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - if v.tLen(1) <= 1 { - t.Errorf("level-1 tables less than 1, got %d", v.tLen(1)) - } - v.release() - - for i := 0; i < n; i++ { - h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } -} - -func TestDb_RepeatedWritesToSameKey(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - maxTables := kNumLevels + kL0_StopWritesTrigger - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - h.reopenDB() - - maxTables := kNumLevels + kL0_StopWritesTrigger - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDb_SparseMerge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - h.putMulti(kNumLevels, "A", "Z") - - // Suppose there is: - // small amount of data with prefix A - // large amount of data with prefix B - // small amount of data with prefix C - // and that recent updates have made small changes to all three prefixes. - // Check that we do not do a compaction that merges all of B in one shot. - h.put("A", "va") - value := strings.Repeat("x", 1000) - for i := 0; i < 100000; i++ { - h.put(fmt.Sprintf("B%010d", i), value) - } - h.put("C", "vc") - h.compactMem() - h.compactRangeAt(0, "", "") - h.waitCompaction() - - // Make sparse update - h.put("A", "va2") - h.put("B100", "bvalue2") - h.put("C", "vc2") - h.compactMem() - - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(0, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(1, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) -} - -func TestDb_SizeOf(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - WriteBuffer: 10000000, - }) - defer h.close() - - h.sizeAssert("", "xyz", 0, 0) - h.reopenDB() - h.sizeAssert("", "xyz", 0, 0) - - // Write 8MB (80 values, each 100K) - n := 80 - s1 := 100000 - s2 := 105000 - - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) - } - - // 0 because SizeOf() does not account for memtable space - h.sizeAssert("", numKey(50), 0, 0) - - for r := 0; r < 3; r++ { - h.reopenDB() - - for cs := 0; cs < n; cs += 10 { - for i := 0; i < n; i += 10 { - h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i)) - h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1))) - h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10)) - } - - h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50)) - h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50)) - - h.compactRangeAt(0, numKey(cs), numKey(cs+9)) - } - - v := h.db.s.version() - if v.tLen(0) != 0 { - t.Errorf("level-0 tables was not zero, got %d", v.tLen(0)) - } - if v.tLen(1) == 0 { - t.Error("level-1 tables was zero") - } - v.release() - } -} - -func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - sizes := []uint64{ - 10000, - 10000, - 100000, - 10000, - 100000, - 10000, - 300000, - 10000, - } - - for i, n := range sizes { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10)) - } - - for r := 0; r < 3; r++ { - h.reopenDB() - - var x uint64 - for i, n := range sizes { - y := x - if i > 0 { - y += 1000 - } - h.sizeAssert("", numKey(i), x, y) - x += n - } - - h.sizeAssert(numKey(3), numKey(5), 110000, 111000) - - h.compactRangeAt(0, "", "") - } -} - -func TestDb_Snapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - s1 := h.getSnapshot() - h.put("foo", "v2") - s2 := h.getSnapshot() - h.put("foo", "v3") - s3 := h.getSnapshot() - h.put("foo", "v4") - - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getValr(s3, "foo", "v3") - h.getVal("foo", "v4") - - s3.Release() - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s1.Release() - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s2.Release() - h.getVal("foo", "v4") - }) -} - -func TestDb_HiddenValuesAreRemoved(t *testing.T) { - trun(t, func(h *dbHarness) { - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := kMaxMemCompactLevel - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.put("foo", "v2") - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactMem() - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactRangeAt(m-2, "", "z") - // DEL eliminated, but v1 remains because we aren't compacting that level - // (DEL can be eliminated because v2 hides v1). - h.allEntriesFor("foo", "[ v2, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ v2 ]") - }) -} - -func TestDb_DeletionMarkers2(t *testing.T) { - h := newDbHarness(t) - defer h.close() - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := kMaxMemCompactLevel - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactMem() // Moves to level last-2 - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-2, "", "") - // DEL kept: "last" file overlaps - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ ]") -} - -func TestDb_CompactionTableOpenError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{MaxOpenFiles: 0}) - defer h.close() - - im := 10 - jm := 10 - for r := 0; r < 2; r++ { - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - h.compactMem() - } - } - - if n := h.totalTables(); n != im*2 { - t.Errorf("total tables is %d, want %d", n, im) - } - - h.stor.SetOpenErr(storage.TypeTable) - go h.db.CompactRange(util.Range{}) - if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil { - t.Log("compaction error: ", err) - } - h.closeDB0() - h.openDB() - h.stor.SetOpenErr(0) - - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - } -} - -func TestDb_OverlapInLevel0(t *testing.T) { - trun(t, func(h *dbHarness) { - if kMaxMemCompactLevel != 2 { - t.Fatal("fix test to reflect the config") - } - - // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. - h.put("100", "v100") - h.put("999", "v999") - h.compactMem() - h.delete("100") - h.delete("999") - h.compactMem() - h.tablesPerLevel("0,1,1") - - // Make files spanning the following ranges in level-0: - // files[0] 200 .. 900 - // files[1] 300 .. 500 - // Note that files are sorted by min key. - h.put("300", "v300") - h.put("500", "v500") - h.compactMem() - h.put("200", "v200") - h.put("600", "v600") - h.put("900", "v900") - h.compactMem() - h.tablesPerLevel("2,1,1") - - // Compact away the placeholder files we created initially - h.compactRangeAt(1, "", "") - h.compactRangeAt(2, "", "") - h.tablesPerLevel("2") - - // Do a memtable compaction. Before bug-fix, the compaction would - // not detect the overlap with level-0 files and would incorrectly place - // the deletion in a deeper level. - h.delete("600") - h.compactMem() - h.tablesPerLevel("3") - h.get("600", false) - }) -} - -func TestDb_L0_CompactionBug_Issue44_a(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("b", "v") - h.reopenDB() - h.delete("b") - h.delete("a") - h.reopenDB() - h.delete("a") - h.reopenDB() - h.put("a", "v") - h.reopenDB() - h.reopenDB() - h.getKeyVal("(a->v)") - h.waitCompaction() - h.getKeyVal("(a->v)") -} - -func TestDb_L0_CompactionBug_Issue44_b(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("e") - h.put("", "") - h.reopenDB() - h.put("c", "cv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.put("", "") - h.waitCompaction() - h.reopenDB() - h.put("d", "dv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("d") - h.delete("b") - h.reopenDB() - h.getKeyVal("(->)(c->cv)") - h.waitCompaction() - h.getKeyVal("(->)(c->cv)") -} - -func TestDb_SingleEntryMemCompaction(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 10; i++ { - h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer)) - h.compactMem() - h.put("key", strings.Repeat("v", opt.DefaultBlockSize)) - h.compactMem() - h.put("k", "v") - h.compactMem() - h.put("", "") - h.compactMem() - h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2)) - h.compactMem() - } - }) -} - -func TestDb_ManifestWriteError(t *testing.T) { - for i := 0; i < 2; i++ { - func() { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "bar") - h.getVal("foo", "bar") - - // Mem compaction (will succeed) - h.compactMem() - h.getVal("foo", "bar") - v := h.db.s.version() - if n := v.tLen(kMaxMemCompactLevel); n != 1 { - t.Errorf("invalid total tables, want=1 got=%d", n) - } - v.release() - - if i == 0 { - h.stor.SetWriteErr(storage.TypeManifest) - } else { - h.stor.SetSyncErr(storage.TypeManifest) - } - - // Merging compaction (will fail) - h.compactRangeAtErr(kMaxMemCompactLevel, "", "", true) - - h.db.Close() - h.stor.SetWriteErr(0) - h.stor.SetSyncErr(0) - - // Should not lose data - h.openDB() - h.getVal("foo", "bar") - }() - } -} - -func assertErr(t *testing.T, err error, wanterr bool) { - if err != nil { - if wanterr { - t.Log("AssertErr: got error (expected): ", err) - } else { - t.Error("AssertErr: got error: ", err) - } - } else if wanterr { - t.Error("AssertErr: expect error") - } -} - -func TestDb_ClosedIsClosed(t *testing.T) { - h := newDbHarness(t) - db := h.db - - var iter, iter2 iterator.Iterator - var snap *Snapshot - func() { - defer h.close() - - h.put("k", "v") - h.getVal("k", "v") - - iter = db.NewIterator(nil, h.ro) - iter.Seek([]byte("k")) - testKeyVal(t, iter, "k->v") - - var err error - snap, err = db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.getValr(snap, "k", "v") - - iter2 = snap.NewIterator(nil, h.ro) - iter2.Seek([]byte("k")) - testKeyVal(t, iter2, "k->v") - - h.put("foo", "v2") - h.delete("foo") - - // closing DB - iter.Release() - iter2.Release() - }() - - assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) - _, err := db.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - if iter.Valid() { - t.Errorf("iter.Valid should false") - } - assertErr(t, iter.Error(), false) - testKeyVal(t, iter, "->") - if iter.Seek([]byte("k")) { - t.Errorf("iter.Seek should false") - } - assertErr(t, iter.Error(), true) - - assertErr(t, iter2.Error(), false) - - _, err = snap.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - _, err = db.GetSnapshot() - assertErr(t, err, true) - - iter3 := db.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - iter3 = snap.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - assertErr(t, db.Delete([]byte("k"), h.wo), true) - - _, err = db.GetProperty("leveldb.stats") - assertErr(t, err, true) - - _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) - assertErr(t, err, true) - - assertErr(t, db.CompactRange(util.Range{}), true) - - assertErr(t, db.Close(), true) -} - -type numberComparer struct{} - -func (numberComparer) num(x []byte) (n int) { - fmt.Sscan(string(x[1:len(x)-1]), &n) - return -} - -func (numberComparer) Name() string { - return "test.NumberComparer" -} - -func (p numberComparer) Compare(a, b []byte) int { - return p.num(a) - p.num(b) -} - -func (numberComparer) Separator(dst, a, b []byte) []byte { return nil } -func (numberComparer) Successor(dst, b []byte) []byte { return nil } - -func TestDb_CustomComparer(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Comparer: numberComparer{}, - WriteBuffer: 1000, - }) - defer h.close() - - h.put("[10]", "ten") - h.put("[0x14]", "twenty") - for i := 0; i < 2; i++ { - h.getVal("[10]", "ten") - h.getVal("[0xa]", "ten") - h.getVal("[20]", "twenty") - h.getVal("[0x14]", "twenty") - h.get("[15]", false) - h.get("[0xf]", false) - h.compactMem() - h.compactRange("[0]", "[9999]") - } - - for n := 0; n < 2; n++ { - for i := 0; i < 100; i++ { - v := fmt.Sprintf("[%d]", i*10) - h.put(v, v) - } - h.compactMem() - h.compactRange("[0]", "[1000000]") - } -} - -func TestDb_ManualCompaction(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - if kMaxMemCompactLevel != 2 { - t.Fatal("fix test to reflect the config") - } - - h.putMulti(3, "p", "q") - h.tablesPerLevel("1,1,1") - - // Compaction range falls before files - h.compactRange("", "c") - h.tablesPerLevel("1,1,1") - - // Compaction range falls after files - h.compactRange("r", "z") - h.tablesPerLevel("1,1,1") - - // Compaction range overlaps files - h.compactRange("p1", "p9") - h.tablesPerLevel("0,0,1") - - // Populate a different range - h.putMulti(3, "c", "e") - h.tablesPerLevel("1,1,2") - - // Compact just the new range - h.compactRange("b", "f") - h.tablesPerLevel("0,0,2") - - // Compact all - h.putMulti(1, "a", "z") - h.tablesPerLevel("0,1,2") - h.compactRange("", "") - h.tablesPerLevel("0,0,1") -} - -func TestDb_BloomFilter(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - BlockCache: opt.NoCache, - Filter: filter.NewBloomFilter(10), - }) - defer h.close() - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - n := 10000 - - // Populate multiple layers - for i := 0; i < n; i++ { - h.put(key(i), key(i)) - } - h.compactMem() - h.compactRange("a", "z") - for i := 0; i < n; i += 100 { - h.put(key(i), key(i)) - } - h.compactMem() - - // Prevent auto compactions triggered by seeks - h.stor.DelaySync(storage.TypeTable) - - // Lookup present keys. Should rarely read from small sstable. - h.stor.SetReadCounter(storage.TypeTable) - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)) - } - cnt := int(h.stor.ReadCounter()) - t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) - - if min, max := n, n+2*n/100; cnt < min || cnt > max { - t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) - } - - // Lookup missing keys. Should rarely read from either sstable. - h.stor.ResetReadCounter() - for i := 0; i < n; i++ { - h.get(key(i)+".missing", false) - } - cnt = int(h.stor.ReadCounter()) - t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) - if max := 3 * n / 100; cnt > max { - t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) - } - - h.stor.ReleaseSync(storage.TypeTable) -} - -func TestDb_Concurrent(t *testing.T) { - const n, secs, maxkey = 4, 2, 1000 - - runtime.GOMAXPROCS(n) - trun(t, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - var cnt [n]uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - var put, get, found uint - defer func() { - t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", - i, cnt[i], put, get, found, get-found) - closeWg.Done() - }() - - rnd := rand.New(rand.NewSource(int64(1000 + i))) - for atomic.LoadUint32(&stop) == 0 { - x := cnt[i] - - k := rnd.Intn(maxkey) - kstr := fmt.Sprintf("%016d", k) - - if (rnd.Int() % 2) > 0 { - put++ - h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) - } else { - get++ - v, err := h.db.Get([]byte(kstr), h.ro) - if err == nil { - found++ - rk, ri, rx := 0, -1, uint32(0) - fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) - if rk != k { - t.Errorf("invalid key want=%d got=%d", k, rk) - } - if ri < 0 || ri >= n { - t.Error("invalid goroutine number: ", ri) - } else { - tx := atomic.LoadUint32(&(cnt[ri])) - if rx > tx { - t.Errorf("invalid seq number, %d > %d ", rx, tx) - } - } - } else if err != ErrNotFound { - t.Error("Get: got error: ", err) - return - } - } - atomic.AddUint32(&cnt[i], 1) - } - }(i) - } - - time.Sleep(secs * time.Second) - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDb_Concurrent2(t *testing.T) { - const n, n2 = 4, 4000 - - runtime.GOMAXPROCS(n*2 + 2) - truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 0; atomic.LoadUint32(&stop) == 0; k++ { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - cmp := comparer.DefaultComparer - for i := 0; i < n2; i++ { - closeWg.Add(1) - go func(i int) { - it := h.db.NewIterator(nil, nil) - var pk []byte - for it.Next() { - kk := it.Key() - if cmp.Compare(kk, pk) <= 0 { - t.Errorf("iter %d: %q is successor of %q", i, pk, kk) - } - pk = append(pk[:0], kk...) - var k, vk, vi int - if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { - t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) - } else if n < 1 { - t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) - } - if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { - t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) - } else if n < 2 { - t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) - } - - if vk != k { - t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) - } - } - if err := it.Error(); err != nil { - t.Errorf("iter %d: Got error: %v", i, err) - } - it.Release() - closeWg.Done() - }(i) - } - - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDb_CreateReopenDbOnFile(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - stor, err := storage.OpenFile(dbpath) - if err != nil { - t.Fatalf("(%d) cannot open storage: %s", i, err) - } - db, err := Open(stor, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - if err := stor.Close(); err != nil { - t.Fatalf("(%d) cannot close storage: %s", i, err) - } - } -} - -func TestDb_CreateReopenDbOnFile2(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - db, err := OpenFile(dbpath, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - } -} - -func TestDb_DeletionMarkersOnMemdb(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "v1") - h.compactMem() - h.delete("foo") - h.get("foo", false) - h.getKeyVal("") -} - -func TestDb_LeveldbIssue178(t *testing.T) { - nKeys := (kMaxTableSize / 30) * 5 - key1 := func(i int) string { - return fmt.Sprintf("my_key_%d", i) - } - key2 := func(i int) string { - return fmt.Sprintf("my_key_%d_xxx", i) - } - - // Disable compression since it affects the creation of layers and the - // code below is trying to test against a very specific scenario. - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - // Create first key range. - batch := new(Batch) - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key1(i)), []byte("value for range 1 key")) - } - h.write(batch) - - // Create second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key2(i)), []byte("value for range 2 key")) - } - h.write(batch) - - // Delete second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Delete([]byte(key2(i))) - } - h.write(batch) - h.waitMemCompaction() - - // Run manual compaction. - h.compactRange(key1(0), key1(nKeys-1)) - - // Checking the keys. - h.assertNumKeys(nKeys) -} - -func TestDb_LeveldbIssue200(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("1", "b") - h.put("2", "c") - h.put("3", "d") - h.put("4", "e") - h.put("5", "f") - - iter := h.db.NewIterator(nil, h.ro) - - // Add an element that should not be reflected in the iterator. - h.put("25", "cd") - - iter.Seek([]byte("5")) - assertBytes(t, []byte("5"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("4"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("3"), iter.Key()) - iter.Next() - assertBytes(t, []byte("4"), iter.Key()) - iter.Next() - assertBytes(t, []byte("5"), iter.Key()) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/db_util.go b/kit/github.com/syndtr/goleveldb/leveldb/db_util.go deleted file mode 100644 index 0b2a893..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/db_util.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -// Reader is the interface that wraps basic Get and NewIterator methods. -// This interface implemented by both DB and Snapshot. -type Reader interface { - Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) - NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator -} - -type Sizes []uint64 - -// Sum returns sum of the sizes. -func (p Sizes) Sum() (n uint64) { - for _, s := range p { - n += s - } - return n -} - -// Logging. -func (db *DB) log(v ...interface{}) { db.s.log(v...) } -func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } - -// Check and clean files. -func (db *DB) checkAndCleanFiles() error { - v := db.s.version_NB() - tablesMap := make(map[uint64]bool) - for _, tables := range v.tables { - for _, t := range tables { - tablesMap[t.file.Num()] = false - } - } - - files, err := db.s.getFiles(storage.TypeAll) - if err != nil { - return err - } - - var nTables int - var rem []storage.File - for _, f := range files { - keep := true - switch f.Type() { - case storage.TypeManifest: - keep = f.Num() >= db.s.manifestFile.Num() - case storage.TypeJournal: - if db.frozenJournalFile != nil { - keep = f.Num() >= db.frozenJournalFile.Num() - } else { - keep = f.Num() >= db.journalFile.Num() - } - case storage.TypeTable: - _, keep = tablesMap[f.Num()] - if keep { - tablesMap[f.Num()] = true - nTables++ - } - } - - if !keep { - rem = append(rem, f) - } - } - - if nTables != len(tablesMap) { - for num, present := range tablesMap { - if !present { - db.logf("db@janitor table missing @%d", num) - } - } - return ErrCorrupted{Type: MissingFiles, Err: errors.New("leveldb: table files missing")} - } - - db.logf("db@janitor F·%d G·%d", len(files), len(rem)) - for _, f := range rem { - db.logf("db@janitor removing %s-%d", f.Type(), f.Num()) - if err := f.Remove(); err != nil { - return err - } - } - return nil -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/db_write.go b/kit/github.com/syndtr/goleveldb/leveldb/db_write.go deleted file mode 100644 index 528cc11..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/db_write.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "time" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -func (db *DB) writeJournal(b *Batch) error { - w, err := db.journal.Next() - if err != nil { - return err - } - if _, err := w.Write(b.encode()); err != nil { - return err - } - if err := db.journal.Flush(); err != nil { - return err - } - if b.sync { - return db.journalWriter.Sync() - } - return nil -} - -func (db *DB) jWriter() { - defer db.closeW.Done() - for { - select { - case b := <-db.journalC: - if b != nil { - db.journalAckC <- db.writeJournal(b) - } - case _, _ = <-db.closeC: - return - } - } -} - -func (db *DB) rotateMem(n int) (mem *memDB, err error) { - // Wait for pending memdb compaction. - err = db.compSendIdle(db.mcompCmdC) - if err != nil { - return - } - - // Create new memdb and journal. - mem, err = db.newMem(n) - if err != nil { - return - } - - // Schedule memdb compaction. - db.compTrigger(db.mcompTriggerC) - return -} - -func (db *DB) flush(n int) (mem *memDB, nn int, err error) { - delayed := false - flush := func() (retry bool) { - v := db.s.version() - defer v.release() - mem = db.getEffectiveMem() - defer func() { - if retry { - mem.decref() - mem = nil - } - }() - nn = mem.db.Free() - switch { - case v.tLen(0) >= kL0_SlowdownWritesTrigger && !delayed: - delayed = true - time.Sleep(time.Millisecond) - case nn >= n: - return false - case v.tLen(0) >= kL0_StopWritesTrigger: - delayed = true - err = db.compSendIdle(db.tcompCmdC) - if err != nil { - return false - } - default: - // Allow memdb to grow if it has no entry. - if mem.db.Len() == 0 { - nn = n - } else { - mem.decref() - mem, err = db.rotateMem(n) - if err == nil { - nn = mem.db.Free() - } else { - nn = 0 - } - } - return false - } - return true - } - start := time.Now() - for flush() { - } - if delayed { - db.logf("db@write delayed T·%v", time.Since(start)) - } - return -} - -// Write apply the given batch to the DB. The batch will be applied -// sequentially. -// -// It is safe to modify the contents of the arguments after Write returns. -func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) { - err = db.ok() - if err != nil || b == nil || b.len() == 0 { - return - } - - b.init(wo.GetSync()) - - // The write happen synchronously. -retry: - select { - case db.writeC <- b: - if <-db.writeMergedC { - return <-db.writeAckC - } - goto retry - case db.writeLockC <- struct{}{}: - case _, _ = <-db.closeC: - return ErrClosed - } - - merged := 0 - defer func() { - <-db.writeLockC - for i := 0; i < merged; i++ { - db.writeAckC <- err - } - }() - - mem, memFree, err := db.flush(b.size()) - if err != nil { - return - } - defer mem.decref() - - // Calculate maximum size of the batch. - m := 1 << 20 - if x := b.size(); x <= 128<<10 { - m = x + (128 << 10) - } - m = minInt(m, memFree) - - // Merge with other batch. -drain: - for b.size() < m && !b.sync { - select { - case nb := <-db.writeC: - if b.size()+nb.size() <= m { - b.append(nb) - db.writeMergedC <- true - merged++ - } else { - db.writeMergedC <- false - break drain - } - default: - break drain - } - } - - // Set batch first seq number relative from last seq. - b.seq = db.seq + 1 - - // Write journal concurrently if it is large enough. - if b.size() >= (128 << 10) { - // Push the write batch to the journal writer - select { - case _, _ = <-db.closeC: - err = ErrClosed - return - case db.journalC <- b: - // Write into memdb - b.memReplay(mem.db) - } - // Wait for journal writer - select { - case _, _ = <-db.closeC: - err = ErrClosed - return - case err = <-db.journalAckC: - if err != nil { - // Revert memdb if error detected - b.revertMemReplay(mem.db) - return - } - } - } else { - err = db.writeJournal(b) - if err != nil { - return - } - b.memReplay(mem.db) - } - - // Set last seq number. - db.addSeq(uint64(b.len())) - - if b.size() >= memFree { - db.rotateMem(0) - } - return -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { - b := new(Batch) - b.Put(key, value) - return db.Write(b, wo) -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { - b := new(Batch) - b.Delete(key) - return db.Write(b, wo) -} - -func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { - iter := mem.NewIterator(nil) - defer iter.Release() - return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) && - (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0)) -} - -// CompactRange compacts the underlying DB for the given key range. -// In particular, deleted and overwritten versions are discarded, -// and the data is rearranged to reduce the cost of operations -// needed to access the data. This operation should typically only -// be invoked by users who understand the underlying implementation. -// -// A nil Range.Start is treated as a key before all keys in the DB. -// And a nil Range.Limit is treated as a key after all keys in the DB. -// Therefore if both is nil then it will compact entire DB. -func (db *DB) CompactRange(r util.Range) error { - if err := db.ok(); err != nil { - return err - } - - select { - case db.writeLockC <- struct{}{}: - case _, _ = <-db.closeC: - return ErrClosed - } - - // Check for overlaps in memdb. - mem := db.getEffectiveMem() - defer mem.decref() - if isMemOverlaps(db.s.icmp, mem.db, r.Start, r.Limit) { - // Memdb compaction. - if _, err := db.rotateMem(0); err != nil { - <-db.writeLockC - return err - } - <-db.writeLockC - if err := db.compSendIdle(db.mcompCmdC); err != nil { - return err - } - } else { - <-db.writeLockC - } - - // Table compaction. - return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/doc.go b/kit/github.com/syndtr/goleveldb/leveldb/doc.go deleted file mode 100644 index ac9ea3d..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/doc.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package leveldb provides implementation of LevelDB key/value database. -// -// Create or open a database: -// -// db, err := leveldb.OpenFile("path/to/db", nil) -// ... -// defer db.Close() -// ... -// -// Read or modify the database content: -// -// // Remember that the contents of the returned slice should not be modified. -// data, err := db.Get([]byte("key"), nil) -// ... -// err = db.Put([]byte("key"), []byte("value"), nil) -// ... -// err = db.Delete([]byte("key"), nil) -// ... -// -// Iterate over database content: -// -// iter := db.NewIterator(nil, nil) -// for iter.Next() { -// // Remember that the contents of the returned slice should not be modified, and -// // only valid until the next call to Next. -// key := iter.Key() -// value := iter.Value() -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Seek-then-Iterate: -// -// iter := db.NewIterator(nil, nil) -// for ok := iter.Seek(key); ok; ok = iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content: -// -// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Batch writes: -// -// batch := new(leveldb.Batch) -// batch.Put([]byte("foo"), []byte("value")) -// batch.Put([]byte("bar"), []byte("another value")) -// batch.Delete([]byte("baz")) -// err = db.Write(batch, nil) -// ... -// -// Use bloom filter: -// -// o := &opt.Options{ -// Filter: filter.NewBloomFilter(10), -// } -// db, err := leveldb.OpenFile("path/to/db", o) -// ... -// defer db.Close() -// ... -package leveldb diff --git a/kit/github.com/syndtr/goleveldb/leveldb/error.go b/kit/github.com/syndtr/goleveldb/leveldb/error.go deleted file mode 100644 index 8d278df..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/error.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = util.ErrNotFound - ErrSnapshotReleased = errors.New("leveldb: snapshot released") - ErrIterReleased = errors.New("leveldb: iterator released") - ErrClosed = errors.New("leveldb: closed") -) - -type CorruptionType int - -const ( - CorruptedManifest CorruptionType = iota - MissingFiles -) - -// ErrCorrupted is the type that wraps errors that indicate corruption in -// the database. -type ErrCorrupted struct { - Type CorruptionType - Err error -} - -func (e ErrCorrupted) Error() string { - return e.Err.Error() -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/external_test.go b/kit/github.com/syndtr/goleveldb/leveldb/external_test.go deleted file mode 100644 index ba8ce07..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/external_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Leveldb external", func() { - o := &opt.Options{ - BlockCache: opt.NoCache, - BlockRestartInterval: 5, - BlockSize: 50, - Compression: opt.NoCompression, - MaxOpenFiles: 0, - Strict: opt.StrictAll, - WriteBuffer: 1000, - } - - Describe("write test", func() { - It("should do write correctly", func(done Done) { - db := newTestingDB(o, nil, nil) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(), - } - testutil.DoDBTesting(&t) - db.TestClose() - done <- true - }, 20.0) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := newTestingDB(o, nil, nil) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - err := db.TestPut(key, value) - Expect(err).NotTo(HaveOccurred()) - }) - testutil.Defer("teardown", func() { - db.TestClose() - }) - - return db - }) - }) - }) -}) diff --git a/kit/github.com/syndtr/goleveldb/leveldb/filter.go b/kit/github.com/syndtr/goleveldb/leveldb/filter.go deleted file mode 100644 index f291095..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/filter.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/filter" -) - -type iFilter struct { - filter.Filter -} - -func (f iFilter) Contains(filter, key []byte) bool { - return f.Filter.Contains(filter, iKey(key).ukey()) -} - -func (f iFilter) NewGenerator() filter.FilterGenerator { - return iFilterGenerator{f.Filter.NewGenerator()} -} - -type iFilterGenerator struct { - filter.FilterGenerator -} - -func (g iFilterGenerator) Add(key []byte) { - g.FilterGenerator.Add(iKey(key).ukey()) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/kit/github.com/syndtr/goleveldb/leveldb/filter/bloom.go deleted file mode 100644 index 03b4694..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/filter/bloom.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -func bloomHash(key []byte) uint32 { - return util.Hash(key, 0xbc9f1d34) -} - -type bloomFilter int - -// The bloom filter serializes its parameters and is backward compatible -// with respect to them. Therefor, its parameters are not added to its -// name. -func (bloomFilter) Name() string { - return "leveldb.BuiltinBloomFilter" -} - -func (f bloomFilter) Contains(filter, key []byte) bool { - nBytes := len(filter) - 1 - if nBytes < 1 { - return false - } - nBits := uint32(nBytes * 8) - - // Use the encoded k so that we can read filters generated by - // bloom filters created using different parameters. - k := filter[nBytes] - if k > 30 { - // Reserved for potentially new encodings for short bloom filters. - // Consider it a match. - return true - } - - kh := bloomHash(key) - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < k; j++ { - bitpos := kh % nBits - if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { - return false - } - kh += delta - } - return true -} - -func (f bloomFilter) NewGenerator() FilterGenerator { - // Round down to reduce probing cost a little bit. - k := uint8(f * 69 / 100) // 0.69 =~ ln(2) - if k < 1 { - k = 1 - } else if k > 30 { - k = 30 - } - return &bloomFilterGenerator{ - n: int(f), - k: k, - } -} - -type bloomFilterGenerator struct { - n int - k uint8 - - keyHashes []uint32 -} - -func (g *bloomFilterGenerator) Add(key []byte) { - // Use double-hashing to generate a sequence of hash values. - // See analysis in [Kirsch,Mitzenmacher 2006]. - g.keyHashes = append(g.keyHashes, bloomHash(key)) -} - -func (g *bloomFilterGenerator) Generate(b Buffer) { - // Compute bloom filter size (in both bits and bytes) - nBits := uint32(len(g.keyHashes) * g.n) - // For small n, we can see a very high false positive rate. Fix it - // by enforcing a minimum bloom filter length. - if nBits < 64 { - nBits = 64 - } - nBytes := (nBits + 7) / 8 - nBits = nBytes * 8 - - dest := b.Alloc(int(nBytes) + 1) - dest[nBytes] = g.k - for _, kh := range g.keyHashes { - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < g.k; j++ { - bitpos := kh % nBits - dest[bitpos/8] |= (1 << (bitpos % 8)) - kh += delta - } - } - - g.keyHashes = g.keyHashes[:0] -} - -// NewBloomFilter creates a new initialized bloom filter for given -// bitsPerKey. -// -// Since bitsPerKey is persisted individually for each bloom filter -// serialization, bloom filters are backwards compatible with respect to -// changing bitsPerKey. This means that no big performance penalty will -// be experienced when changing the parameter. See documentation for -// opt.Options.Filter for more information. -func NewBloomFilter(bitsPerKey int) Filter { - return bloomFilter(bitsPerKey) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/kit/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go deleted file mode 100644 index cbcc824..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "encoding/binary" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" - "testing" -) - -type harness struct { - t *testing.T - - bloom Filter - generator FilterGenerator - filter []byte -} - -func newHarness(t *testing.T) *harness { - bloom := NewBloomFilter(10) - return &harness{ - t: t, - bloom: bloom, - generator: bloom.NewGenerator(), - } -} - -func (h *harness) add(key []byte) { - h.generator.Add(key) -} - -func (h *harness) addNum(key uint32) { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - h.add(b[:]) -} - -func (h *harness) build() { - b := &util.Buffer{} - h.generator.Generate(b) - h.filter = b.Bytes() -} - -func (h *harness) reset() { - h.filter = nil -} - -func (h *harness) filterLen() int { - return len(h.filter) -} - -func (h *harness) assert(key []byte, want, silent bool) bool { - got := h.bloom.Contains(h.filter, key) - if !silent && got != want { - h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want) - } - return got -} - -func (h *harness) assertNum(key uint32, want, silent bool) bool { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - return h.assert(b[:], want, silent) -} - -func TestBloomFilter_Empty(t *testing.T) { - h := newHarness(t) - h.build() - h.assert([]byte("hello"), false, false) - h.assert([]byte("world"), false, false) -} - -func TestBloomFilter_Small(t *testing.T) { - h := newHarness(t) - h.add([]byte("hello")) - h.add([]byte("world")) - h.build() - h.assert([]byte("hello"), true, false) - h.assert([]byte("world"), true, false) - h.assert([]byte("x"), false, false) - h.assert([]byte("foo"), false, false) -} - -func nextN(n int) int { - switch { - case n < 10: - n += 1 - case n < 100: - n += 10 - case n < 1000: - n += 100 - default: - n += 1000 - } - return n -} - -func TestBloomFilter_VaryingLengths(t *testing.T) { - h := newHarness(t) - var mediocre, good int - for n := 1; n < 10000; n = nextN(n) { - h.reset() - for i := 0; i < n; i++ { - h.addNum(uint32(i)) - } - h.build() - - got := h.filterLen() - want := (n * 10 / 8) + 40 - if got > want { - t.Errorf("filter len test failed, '%d' > '%d'", got, want) - } - - for i := 0; i < n; i++ { - h.assertNum(uint32(i), true, false) - } - - var rate float32 - for i := 0; i < 10000; i++ { - if h.assertNum(uint32(i+1000000000), true, true) { - rate++ - } - } - rate /= 10000 - if rate > 0.02 { - t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n) - } - if rate > 0.0125 { - mediocre++ - } else { - good++ - } - } - t.Logf("false positive rate: %d good, %d mediocre", good, mediocre) - if mediocre > good/5 { - t.Error("mediocre false positive rate is more than expected") - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/kit/github.com/syndtr/goleveldb/leveldb/filter/filter.go deleted file mode 100644 index 7a925c5..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/filter/filter.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package filter provides interface and implementation of probabilistic -// data structure. -// -// The filter is resposible for creating small filter from a set of keys. -// These filter will then used to test whether a key is a member of the set. -// In many cases, a filter can cut down the number of disk seeks from a -// handful to a single disk seek per DB.Get call. -package filter - -// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. -type Buffer interface { - // Alloc allocs n bytes of slice from the buffer. This also advancing - // write offset. - Alloc(n int) []byte - - // Write appends the contents of p to the buffer. - Write(p []byte) (n int, err error) - - // WriteByte appends the byte c to the buffer. - WriteByte(c byte) error -} - -// Filter is the filter. -type Filter interface { - // Name returns the name of this policy. - // - // Note that if the filter encoding changes in an incompatible way, - // the name returned by this method must be changed. Otherwise, old - // incompatible filters may be passed to methods of this type. - Name() string - - // NewGenerator creates a new filter generator. - NewGenerator() FilterGenerator - - // Contains returns true if the filter contains the given key. - // - // The filter are filters generated by the filter generator. - Contains(filter, key []byte) bool -} - -// FilterGenerator is the filter generator. -type FilterGenerator interface { - // Add adds a key to the filter generator. - // - // The key may become invalid after call to this method end, therefor - // key must be copied if implementation require keeping key for later - // use. The key should not modified directly, doing so may cause - // undefined results. - Add(key []byte) - - // Generate generates filters based on keys passed so far. After call - // to Generate the filter generator maybe resetted, depends on implementation. - Generate(b Buffer) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go b/kit/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go deleted file mode 100644 index e76657e..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build go1.3 - -package leveldb - -import ( - "sync/atomic" - "testing" -) - -func BenchmarkDBReadConcurrent(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - for pb.Next() && iter.Next() { - } - }) -} - -func BenchmarkDBReadConcurrent2(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - var dir uint32 - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - if atomic.AddUint32(&dir, 1)%2 == 0 { - for pb.Next() && iter.Next() { - } - } else { - if pb.Next() && iter.Last() { - for pb.Next() && iter.Prev() { - } - } - } - }) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/kit/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go deleted file mode 100644 index 2444096..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -// BasicArray is the interface that wraps basic Len and Search method. -type BasicArray interface { - // Len returns length of the array. - Len() int - - // Search finds smallest index that point to a key that is greater - // than or equal to the given key. - Search(key []byte) int -} - -// Array is the interface that wraps BasicArray and basic Index method. -type Array interface { - BasicArray - - // Index returns key/value pair with index of i. - Index(i int) (key, value []byte) -} - -// Array is the interface that wraps BasicArray and basic Get method. -type ArrayIndexer interface { - BasicArray - - // Get returns a new data iterator with index of i. - Get(i int) Iterator -} - -type basicArrayIterator struct { - util.BasicReleaser - array BasicArray - pos int -} - -func (i *basicArrayIterator) Valid() bool { - return i.pos >= 0 && i.pos < i.array.Len() -} - -func (i *basicArrayIterator) First() bool { - if i.array.Len() == 0 { - i.pos = -1 - return false - } - i.pos = 0 - return true -} - -func (i *basicArrayIterator) Last() bool { - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = n - 1 - return true -} - -func (i *basicArrayIterator) Seek(key []byte) bool { - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = i.array.Search(key) - if i.pos >= n { - return false - } - return true -} - -func (i *basicArrayIterator) Next() bool { - i.pos++ - if n := i.array.Len(); i.pos >= n { - i.pos = n - return false - } - return true -} - -func (i *basicArrayIterator) Prev() bool { - i.pos-- - if i.pos < 0 { - i.pos = -1 - return false - } - return true -} - -func (i *basicArrayIterator) Error() error { return nil } - -type arrayIterator struct { - basicArrayIterator - array Array - pos int - key, value []byte -} - -func (i *arrayIterator) updateKV() { - if i.pos == i.basicArrayIterator.pos { - return - } - i.pos = i.basicArrayIterator.pos - if i.Valid() { - i.key, i.value = i.array.Index(i.pos) - } else { - i.key = nil - i.value = nil - } -} - -func (i *arrayIterator) Key() []byte { - i.updateKV() - return i.key -} - -func (i *arrayIterator) Value() []byte { - i.updateKV() - return i.value -} - -type arrayIteratorIndexer struct { - basicArrayIterator - array ArrayIndexer -} - -func (i *arrayIteratorIndexer) Get() Iterator { - if i.Valid() { - return i.array.Get(i.basicArrayIterator.pos) - } - return nil -} - -// NewArrayIterator returns an iterator from the given array. -func NewArrayIterator(array Array) Iterator { - return &arrayIterator{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - pos: -1, - } -} - -// NewArrayIndexer returns an index iterator from the given array. -func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { - return &arrayIteratorIndexer{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/kit/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go deleted file mode 100644 index 02d4ac1..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - - . "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Array iterator", func() { - It("Should iterates and seeks correctly", func() { - // Build key/value. - kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3) - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewArrayIterator(kv), - } - testutil.DoIteratorTesting(&t) - }) - }) -}) diff --git a/kit/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/kit/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go deleted file mode 100644 index b4053bf..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -// IteratorIndexer is the interface that wraps CommonIterator and basic Get -// method. IteratorIndexer provides index for indexed iterator. -type IteratorIndexer interface { - CommonIterator - - // Get returns a new data iterator for the current position, or nil if - // done. - Get() Iterator -} - -type indexedIterator struct { - util.BasicReleaser - index IteratorIndexer - strict bool - strictGet bool - - data Iterator - err error - errf func(err error) -} - -func (i *indexedIterator) setData() { - if i.data != nil { - i.data.Release() - } - i.data = i.index.Get() - if i.strictGet { - if err := i.data.Error(); err != nil { - i.err = err - } - } -} - -func (i *indexedIterator) clearData() { - if i.data != nil { - i.data.Release() - } - i.data = nil -} - -func (i *indexedIterator) dataErr() bool { - if i.errf != nil { - if err := i.data.Error(); err != nil { - i.errf(err) - } - } - if i.strict { - if err := i.data.Error(); err != nil { - i.err = err - return true - } - } - return false -} - -func (i *indexedIterator) Valid() bool { - return i.data != nil && i.data.Valid() -} - -func (i *indexedIterator) First() bool { - if i.err != nil { - return false - } - - if !i.index.First() { - i.clearData() - return false - } - i.setData() - return i.Next() -} - -func (i *indexedIterator) Last() bool { - if i.err != nil { - return false - } - - if !i.index.Last() { - i.clearData() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - return true -} - -func (i *indexedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } - - if !i.index.Seek(key) { - i.clearData() - return false - } - i.setData() - if !i.data.Seek(key) { - if i.dataErr() { - return false - } - i.clearData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Next() bool { - if i.err != nil { - return false - } - - switch { - case i.data != nil && !i.data.Next(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Next() { - return false - } - i.setData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Prev() bool { - if i.err != nil { - return false - } - - switch { - case i.data != nil && !i.data.Prev(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Prev() { - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - } - return true -} - -func (i *indexedIterator) Key() []byte { - if i.data == nil { - return nil - } - return i.data.Key() -} - -func (i *indexedIterator) Value() []byte { - if i.data == nil { - return nil - } - return i.data.Value() -} - -func (i *indexedIterator) Release() { - i.clearData() - i.index.Release() - i.BasicReleaser.Release() -} - -func (i *indexedIterator) Error() error { - if i.err != nil { - return i.err - } - if err := i.index.Error(); err != nil { - return err - } - return nil -} - -func (i *indexedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewIndexedIterator returns an indexed iterator. An index is iterator -// that returns another iterator, a data iterator. A data iterator is the -// iterator that contains actual key/value pairs. -// -// If strict is true then error yield by data iterator will halt the indexed -// iterator, on contrary if strict is false then the indexed iterator will -// ignore those error and move on to the next index. If strictGet is true and -// index.Get() yield an 'error iterator' then the indexed iterator will be halted. -// An 'error iterator' is iterator which its Error() method always return non-nil -// even before any 'seeks method' is called. -func NewIndexedIterator(index IteratorIndexer, strict, strictGet bool) Iterator { - return &indexedIterator{index: index, strict: strict, strictGet: strictGet} -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/kit/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go deleted file mode 100644 index 6893bf5..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - "sort" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" -) - -type keyValue struct { - key []byte - testutil.KeyValue -} - -type keyValueIndex []keyValue - -func (x keyValueIndex) Search(key []byte) int { - return sort.Search(x.Len(), func(i int) bool { - return comparer.DefaultComparer.Compare(x[i].key, key) >= 0 - }) -} - -func (x keyValueIndex) Len() int { return len(x) } -func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil } -func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) } - -var _ = testutil.Defer(func() { - Describe("Indexed iterator", func() { - Test := func(n ...int) func() { - if len(n) == 0 { - rnd := testutil.NewRand() - n = make([]int, rnd.Intn(17)+3) - for i := range n { - n[i] = rnd.Intn(19) + 1 - } - } - - return func() { - It("Should iterates and seeks correctly", func(done Done) { - // Build key/value. - index := make(keyValueIndex, len(n)) - sum := 0 - for _, x := range n { - sum += x - } - kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4) - for i, j := 0, 0; i < len(n); i++ { - for x := n[i]; x > 0; x-- { - key, value := kv.Index(j) - index[i].key = key - index[i].Put(key, value) - j++ - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewIndexedIterator(NewArrayIndexer(index), true, true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with 100 keys", Test(100)) - Describe("with 50-50 keys", Test(50, 50)) - Describe("with 50-1 keys", Test(50, 1)) - Describe("with 50-1-50 keys", Test(50, 1, 50)) - Describe("with 1-50 keys", Test(1, 50)) - Describe("with random N-keys", Test()) - }) -}) diff --git a/kit/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/kit/github.com/syndtr/goleveldb/leveldb/iterator/iter.go deleted file mode 100644 index 02e560b..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/iterator/iter.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package iterator provides interface and implementation to traverse over -// contents of a database. -package iterator - -import ( - "errors" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -// IteratorSeeker is the interface that wraps the 'seeks method'. -type IteratorSeeker interface { - // First moves the iterator to the first key/value pair. If the iterator - // only contains one key/value pair then First and Last whould moves - // to the same key/value pair. - // It returns whether such pair exist. - First() bool - - // Last moves the iterator to the last key/value pair. If the iterator - // only contains one key/value pair then First and Last whould moves - // to the same key/value pair. - // It returns whether such pair exist. - Last() bool - - // Seek moves the iterator to the first key/value pair whose key is greater - // than or equal to the given key. - // It returns whether such pair exist. - // - // It is safe to modify the contents of the argument after Seek returns. - Seek(key []byte) bool - - // Next moves the iterator to the next key/value pair. - // It returns whether the iterator is exhausted. - Next() bool - - // Prev moves the iterator to the previous key/value pair. - // It returns whether the iterator is exhausted. - Prev() bool -} - -// CommonIterator is the interface that wraps common interator methods. -type CommonIterator interface { - IteratorSeeker - - // util.Releaser is the interface that wraps basic Release method. - // When called Release will releases any resources associated with the - // iterator. - util.Releaser - - // util.ReleaseSetter is the interface that wraps the basic SetReleaser - // method. - util.ReleaseSetter - - // TODO: Remove this when ready. - Valid() bool - - // Error returns any accumulated error. Exhausting all the key/value pairs - // is not considered to be an error. - Error() error -} - -// Iterator iterates over a DB's key/value pairs in key order. -// -// When encouter an error any 'seeks method' will return false and will -// yield no key/value pairs. The error can be queried by calling the Error -// method. Calling Release is still necessary. -// -// An iterator must be released after use, but it is not necessary to read -// an iterator until exhaustion. -// Also, an iterator is not necessarily goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -type Iterator interface { - CommonIterator - - // Key returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Key() []byte - - // Value returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Value() []byte -} - -// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback -// method. -// -// ErrorCallbackSetter implemented by indexed and merged iterator. -type ErrorCallbackSetter interface { - // SetErrorCallback allows set an error callback of the coresponding - // iterator. Use nil to clear the callback. - SetErrorCallback(f func(err error)) -} - -type emptyIterator struct { - releaser util.Releaser - released bool - err error -} - -func (i *emptyIterator) rErr() { - if i.err == nil && i.released { - i.err = errors.New("leveldb/iterator: iterator released") - } -} - -func (i *emptyIterator) Release() { - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - i.released = true -} - -func (i *emptyIterator) SetReleaser(releaser util.Releaser) { - if !i.released { - i.releaser = releaser - } -} - -func (*emptyIterator) Valid() bool { return false } -func (i *emptyIterator) First() bool { i.rErr(); return false } -func (i *emptyIterator) Last() bool { i.rErr(); return false } -func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } -func (i *emptyIterator) Next() bool { i.rErr(); return false } -func (i *emptyIterator) Prev() bool { i.rErr(); return false } -func (*emptyIterator) Key() []byte { return nil } -func (*emptyIterator) Value() []byte { return nil } -func (i *emptyIterator) Error() error { return i.err } - -// NewEmptyIterator creates an empty iterator. The err parameter can be -// nil, but if not nil the given err will be returned by Error method. -func NewEmptyIterator(err error) Iterator { - return &emptyIterator{err: err} -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/kit/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go deleted file mode 100644 index b99f01a..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package iterator_test - -import ( - "testing" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestIterator(t *testing.T) { - testutil.RunDefer() - - RegisterFailHandler(Fail) - RunSpecs(t, "Iterator Suite") -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/kit/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go deleted file mode 100644 index 225c513..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "errors" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrIterReleased = errors.New("leveldb/iterator: iterator released") -) - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type mergedIterator struct { - cmp comparer.Comparer - iters []Iterator - strict bool - - keys [][]byte - index int - dir dir - err error - errf func(err error) - releaser util.Releaser -} - -func assertKey(key []byte) []byte { - if key == nil { - panic("leveldb/iterator: nil key") - } - return key -} - -func (i *mergedIterator) iterErr(iter Iterator) bool { - if i.errf != nil { - if err := iter.Error(); err != nil { - i.errf(err) - } - } - if i.strict { - if err := iter.Error(); err != nil { - i.err = err - return true - } - } - return false -} - -func (i *mergedIterator) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *mergedIterator) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.First(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirEOI - return i.prev() -} - -func (i *mergedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Seek(key): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) next() bool { - var key []byte - if i.dir == dirForward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirEOI - return false - } - i.dir = dirForward - return true -} - -func (i *mergedIterator) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirSOI: - return i.First() - case dirBackward: - key := append([]byte{}, i.keys[i.index]...) - if !i.Seek(key) { - return false - } - return i.Next() - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Next(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.next() -} - -func (i *mergedIterator) prev() bool { - var key []byte - if i.dir == dirBackward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirSOI - return false - } - i.dir = dirBackward - return true -} - -func (i *mergedIterator) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - key := append([]byte{}, i.keys[i.index]...) - for x, iter := range i.iters { - if x == i.index { - continue - } - seek := iter.Seek(key) - switch { - case seek && iter.Prev(), !seek && iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Prev(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.prev() -} - -func (i *mergedIterator) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.keys[i.index] -} - -func (i *mergedIterator) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.iters[i.index].Value() -} - -func (i *mergedIterator) Release() { - if i.dir != dirReleased { - i.dir = dirReleased - for _, iter := range i.iters { - iter.Release() - } - i.iters = nil - i.keys = nil - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *mergedIterator) SetReleaser(releaser util.Releaser) { - if i.dir != dirReleased { - i.releaser = releaser - } -} - -func (i *mergedIterator) Error() error { - return i.err -} - -func (i *mergedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewMergedIterator returns an iterator that merges its input. Walking the -// resultant iterator will return all key/value pairs of all input iterators -// in strictly increasing key order, as defined by cmp. -// The input's key ranges may overlap, but there are assumed to be no duplicate -// keys: if iters[i] contains a key k then iters[j] will not contain that key k. -// None of the iters may be nil. -// -// If strict is true then error yield by any iterators will halt the merged -// iterator, on contrary if strict is false then the merged iterator will -// ignore those error and move on to the next iterator. -func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { - return &mergedIterator{ - iters: iters, - cmp: cmp, - strict: strict, - keys: make([][]byte, len(iters)), - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/kit/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go deleted file mode 100644 index 3996211..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Merged iterator", func() { - Test := func(filled int, empty int) func() { - return func() { - It("Should iterates and seeks correctly", func(done Done) { - rnd := testutil.NewRand() - - // Build key/value. - filledKV := make([]testutil.KeyValue, filled) - kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4) - kv.Iterate(func(i int, key, value []byte) { - filledKV[rnd.Intn(filled)].Put(key, value) - }) - - // Create itearators. - iters := make([]Iterator, filled+empty) - for i := range iters { - if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) { - filled-- - Expect(filledKV[filled].Len()).ShouldNot(BeZero()) - iters[i] = NewArrayIterator(filledKV[filled]) - } else { - empty-- - iters[i] = NewEmptyIterator(nil) - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewMergedIterator(iters, comparer.DefaultComparer, true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with three, all filled iterators", Test(3, 0)) - Describe("with one filled, one empty iterators", Test(1, 1)) - Describe("with one filled, two empty iterators", Test(1, 2)) - }) -}) diff --git a/kit/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/kit/github.com/syndtr/goleveldb/leveldb/journal/journal.go deleted file mode 100644 index cb9860d..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/journal/journal.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -// Package journal reads and writes sequences of journals. Each journal is a stream -// of bytes that completes before the next journal starts. -// -// When reading, call Next to obtain an io.Reader for the next journal. Next will -// return io.EOF when there are no more journals. It is valid to call Next -// without reading the current journal to exhaustion. -// -// When writing, call Next to obtain an io.Writer for the next journal. Calling -// Next finishes the current journal. Call Close to finish the final journal. -// -// Optionally, call Flush to finish the current journal and flush the underlying -// writer without starting a new journal. To start a new journal after flushing, -// call Next. -// -// Neither Readers or Writers are safe to use concurrently. -// -// Example code: -// func read(r io.Reader) ([]string, error) { -// var ss []string -// journals := journal.NewReader(r, nil, true, true) -// for { -// j, err := journals.Next() -// if err == io.EOF { -// break -// } -// if err != nil { -// return nil, err -// } -// s, err := ioutil.ReadAll(j) -// if err != nil { -// return nil, err -// } -// ss = append(ss, string(s)) -// } -// return ss, nil -// } -// -// func write(w io.Writer, ss []string) error { -// journals := journal.NewWriter(w) -// for _, s := range ss { -// j, err := journals.Next() -// if err != nil { -// return err -// } -// if _, err := j.Write([]byte(s)), err != nil { -// return err -// } -// } -// return journals.Close() -// } -// -// The wire format is that the stream is divided into 32KiB blocks, and each -// block contains a number of tightly packed chunks. Chunks cannot cross block -// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a -// block must be zero. -// -// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 -// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) -// followed by a payload. The checksum is over the chunk type and the payload. -// -// There are four chunk types: whether the chunk is the full journal, or the -// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal -// has one first chunk, zero or more middle chunks, and one last chunk. -// -// The wire format allows for limited recovery in the face of data corruption: -// on a format error (such as a checksum mismatch), the reader moves to the -// next block and looks for the next full or first chunk. -package journal - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -// These constants are part of the wire format and should not be changed. -const ( - fullChunkType = 1 - firstChunkType = 2 - middleChunkType = 3 - lastChunkType = 4 -) - -const ( - blockSize = 32 * 1024 - headerSize = 7 -) - -type flusher interface { - Flush() error -} - -// ErrCorrupted is the error type that generated by corrupted block or chunk. -type ErrCorrupted struct { - Size int - Reason string -} - -func (e ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) -} - -// Dropper is the interface that wrap simple Drop method. The Drop -// method will be called when the journal reader dropping a block or chunk. -type Dropper interface { - Drop(err error) -} - -// Reader reads journals from an underlying io.Reader. -type Reader struct { - // r is the underlying reader. - r io.Reader - // the dropper. - dropper Dropper - // strict flag. - strict bool - // checksum flag. - checksum bool - // seq is the sequence number of the current journal. - seq int - // buf[i:j] is the unread portion of the current chunk's payload. - // The low bound, i, excludes the chunk header. - i, j int - // n is the number of bytes of buf that are valid. Once reading has started, - // only the final block can have n < blockSize. - n int - // last is whether the current chunk is the last chunk of the journal. - last bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewReader returns a new reader. The dropper may be nil, and if -// strict is true then corrupted or invalid chunk will halt the journal -// reader entirely. -func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { - return &Reader{ - r: r, - dropper: dropper, - strict: strict, - checksum: checksum, - last: true, - } -} - -var errSkip = errors.New("leveldb/journal: skipped") - -func (r *Reader) corrupt(n int, reason string, skip bool) error { - if r.dropper != nil { - r.dropper.Drop(ErrCorrupted{n, reason}) - } - if r.strict && !skip { - r.err = ErrCorrupted{n, reason} - return r.err - } - return errSkip -} - -// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the -// next block into the buffer if necessary. -func (r *Reader) nextChunk(first bool) error { - for { - if r.j+headerSize <= r.n { - checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) - length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) - chunkType := r.buf[r.j+6] - - if checksum == 0 && length == 0 && chunkType == 0 { - // Drop entire block. - m := r.n - r.j - r.i = r.n - r.j = r.n - return r.corrupt(m, "zero header", false) - } else { - m := r.n - r.j - r.i = r.j + headerSize - r.j = r.j + headerSize + int(length) - if r.j > r.n { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(m, "chunk length overflows block", false) - } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(m, "checksum mismatch", false) - } - } - if first && chunkType != fullChunkType && chunkType != firstChunkType { - m := r.j - r.i - r.i = r.j - // Report the error, but skip it. - return r.corrupt(m+headerSize, "orphan chunk", true) - } - r.last = chunkType == fullChunkType || chunkType == lastChunkType - return nil - } - - // The last block. - if r.n < blockSize && r.n > 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - - // Read block. - n, err := io.ReadFull(r.r, r.buf[:]) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return err - } - if n == 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - r.i, r.j, r.n = 0, 0, n - } -} - -// Next returns a reader for the next journal. It returns io.EOF if there are no -// more journals. The reader returned becomes stale after the next Next call, -// and should no longer be used. If strict is false, the reader will returns -// io.ErrUnexpectedEOF error when found corrupted journal. -func (r *Reader) Next() (io.Reader, error) { - r.seq++ - if r.err != nil { - return nil, r.err - } - r.i = r.j - for { - if err := r.nextChunk(true); err == nil { - break - } else if err != errSkip { - return nil, err - } - } - return &singleReader{r, r.seq, nil}, nil -} - -// Reset resets the journal reader, allows reuse of the journal reader. Reset returns -// last accumulated error. -func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { - r.seq++ - err := r.err - r.r = reader - r.dropper = dropper - r.strict = strict - r.checksum = checksum - r.i = 0 - r.j = 0 - r.n = 0 - r.last = true - r.err = nil - return err -} - -type singleReader struct { - r *Reader - seq int - err error -} - -func (x *singleReader) Read(p []byte) (int, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - n := copy(p, r.buf[r.i:r.j]) - r.i += n - return n, nil -} - -func (x *singleReader) ReadByte() (byte, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - c := r.buf[r.i] - r.i++ - return c, nil -} - -// Writer writes journals to an underlying io.Writer. -type Writer struct { - // w is the underlying writer. - w io.Writer - // seq is the sequence number of the current journal. - seq int - // f is w as a flusher. - f flusher - // buf[i:j] is the bytes that will become the current chunk. - // The low bound, i, includes the chunk header. - i, j int - // buf[:written] has already been written to w. - // written is zero unless Flush has been called. - written int - // first is whether the current chunk is the first chunk of the journal. - first bool - // pending is whether a chunk is buffered but not yet written. - pending bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewWriter returns a new Writer. -func NewWriter(w io.Writer) *Writer { - f, _ := w.(flusher) - return &Writer{ - w: w, - f: f, - } -} - -// fillHeader fills in the header for the pending chunk. -func (w *Writer) fillHeader(last bool) { - if w.i+headerSize > w.j || w.j > blockSize { - panic("leveldb/journal: bad writer state") - } - if last { - if w.first { - w.buf[w.i+6] = fullChunkType - } else { - w.buf[w.i+6] = lastChunkType - } - } else { - if w.first { - w.buf[w.i+6] = firstChunkType - } else { - w.buf[w.i+6] = middleChunkType - } - } - binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) - binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) -} - -// writeBlock writes the buffered block to the underlying writer, and reserves -// space for the next chunk's header. -func (w *Writer) writeBlock() { - _, w.err = w.w.Write(w.buf[w.written:]) - w.i = 0 - w.j = headerSize - w.written = 0 -} - -// writePending finishes the current journal and writes the buffer to the -// underlying writer. -func (w *Writer) writePending() { - if w.err != nil { - return - } - if w.pending { - w.fillHeader(true) - w.pending = false - } - _, w.err = w.w.Write(w.buf[w.written:w.j]) - w.written = w.j -} - -// Close finishes the current journal and closes the writer. -func (w *Writer) Close() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - w.err = errors.New("leveldb/journal: closed Writer") - return nil -} - -// Flush finishes the current journal, writes to the underlying writer, and -// flushes it if that writer implements interface{ Flush() error }. -func (w *Writer) Flush() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - if w.f != nil { - w.err = w.f.Flush() - return w.err - } - return nil -} - -// Reset resets the journal writer, allows reuse of the journal writer. Reset -// will also closes the journal writer if not already. -func (w *Writer) Reset(writer io.Writer) (err error) { - w.seq++ - if w.err == nil { - w.writePending() - err = w.err - } - w.w = writer - w.f, _ = writer.(flusher) - w.i = 0 - w.j = 0 - w.written = 0 - w.first = false - w.pending = false - w.err = nil - return -} - -// Next returns a writer for the next journal. The writer returned becomes stale -// after the next Close, Flush or Next call, and should no longer be used. -func (w *Writer) Next() (io.Writer, error) { - w.seq++ - if w.err != nil { - return nil, w.err - } - if w.pending { - w.fillHeader(true) - } - w.i = w.j - w.j = w.j + headerSize - // Check if there is room in the block for the header. - if w.j > blockSize { - // Fill in the rest of the block with zeroes. - for k := w.i; k < blockSize; k++ { - w.buf[k] = 0 - } - w.writeBlock() - if w.err != nil { - return nil, w.err - } - } - w.first = true - w.pending = true - return singleWriter{w, w.seq}, nil -} - -type singleWriter struct { - w *Writer - seq int -} - -func (x singleWriter) Write(p []byte) (int, error) { - w := x.w - if w.seq != x.seq { - return 0, errors.New("leveldb/journal: stale writer") - } - if w.err != nil { - return 0, w.err - } - n0 := len(p) - for len(p) > 0 { - // Write a block, if it is full. - if w.j == blockSize { - w.fillHeader(false) - w.writeBlock() - if w.err != nil { - return 0, w.err - } - w.first = false - } - // Copy bytes into the buffer. - n := copy(w.buf[w.j:], p) - w.j += n - p = p[n:] - } - return n0, nil -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/kit/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go deleted file mode 100644 index 0fcf225..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go +++ /dev/null @@ -1,818 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -package journal - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math/rand" - "strings" - "testing" -) - -type dropper struct { - t *testing.T -} - -func (d dropper) Drop(err error) { - d.t.Log(err) -} - -func short(s string) string { - if len(s) < 64 { - return s - } - return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) -} - -// big returns a string of length n, composed of repetitions of partial. -func big(partial string, n int) string { - return strings.Repeat(partial, n/len(partial)+1)[:n] -} - -func TestEmpty(t *testing.T) { - buf := new(bytes.Buffer) - r := NewReader(buf, dropper{t}, true, true) - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { - buf := new(bytes.Buffer) - - reset() - w := NewWriter(buf) - for { - s, ok := gen() - if !ok { - break - } - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write([]byte(s)); err != nil { - t.Fatal(err) - } - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - reset() - r := NewReader(buf, dropper{t}, true, true) - for { - s, ok := gen() - if !ok { - break - } - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - x, err := ioutil.ReadAll(rr) - if err != nil { - t.Fatal(err) - } - if string(x) != s { - t.Fatalf("got %q, want %q", short(string(x)), short(s)) - } - } - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testLiterals(t *testing.T, s []string) { - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == len(s) { - return "", false - } - i++ - return s[i-1], true - } - testGenerator(t, reset, gen) -} - -func TestMany(t *testing.T) { - const n = 1e5 - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return fmt.Sprintf("%d.", i-1), true - } - testGenerator(t, reset, gen) -} - -func TestRandom(t *testing.T) { - const n = 1e2 - var ( - i int - r *rand.Rand - ) - reset := func() { - i, r = 0, rand.New(rand.NewSource(0)) - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true - } - testGenerator(t, reset, gen) -} - -func TestBasic(t *testing.T) { - testLiterals(t, []string{ - strings.Repeat("a", 1000), - strings.Repeat("b", 97270), - strings.Repeat("c", 8000), - }) -} - -func TestBoundary(t *testing.T) { - for i := blockSize - 16; i < blockSize+16; i++ { - s0 := big("abcd", i) - for j := blockSize - 16; j < blockSize+16; j++ { - s1 := big("ABCDE", j) - testLiterals(t, []string{s0, s1}) - testLiterals(t, []string{s0, "", s1}) - testLiterals(t, []string{s0, "x", s1}) - } - } -} - -func TestFlush(t *testing.T) { - buf := new(bytes.Buffer) - w := NewWriter(buf) - // Write a couple of records. Everything should still be held - // in the record.Writer buffer, so that buf.Len should be 0. - w0, _ := w.Next() - w0.Write([]byte("0")) - w1, _ := w.Next() - w1.Write([]byte("11")) - if got, want := buf.Len(), 0; got != want { - t.Fatalf("buffer length #0: got %d want %d", got, want) - } - // Flush the record.Writer buffer, which should yield 17 bytes. - // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #1: got %d want %d", got, want) - } - // Do another write, one that isn't large enough to complete the block. - // The write should not have flowed through to buf. - w2, _ := w.Next() - w2.Write(bytes.Repeat([]byte("2"), 10000)) - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #2: got %d want %d", got, want) - } - // Flushing should get us up to 10024 bytes written. - // 10024 = 17 + 7 + 10000. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 10024; got != want { - t.Fatalf("buffer length #3: got %d want %d", got, want) - } - // Do a bigger write, one that completes the current block. - // We should now have 32768 bytes (a complete block), without - // an explicit flush. - w3, _ := w.Next() - w3.Write(bytes.Repeat([]byte("3"), 40000)) - if got, want := buf.Len(), 32768; got != want { - t.Fatalf("buffer length #4: got %d want %d", got, want) - } - // Flushing should get us up to 50038 bytes written. - // 50038 = 10024 + 2*7 + 40000. There are two headers because - // the one record was split into two chunks. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 50038; got != want { - t.Fatalf("buffer length #5: got %d want %d", got, want) - } - // Check that reading those records give the right lengths. - r := NewReader(buf, dropper{t}, true, true) - wants := []int64{1, 2, 10000, 40000} - for i, want := range wants { - rr, _ := r.Next() - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #%d: %v", i, err) - } - if n != want { - t.Fatalf("read #%d: got %d bytes want %d", i, n, want) - } - } -} - -func TestNonExhaustiveRead(t *testing.T) { - const n = 100 - buf := new(bytes.Buffer) - p := make([]byte, 10) - rnd := rand.New(rand.NewSource(1)) - - w := NewWriter(buf) - for i := 0; i < n; i++ { - length := len(p) + rnd.Intn(3*blockSize) - s := string(uint8(i)) + "123456789abcdefgh" - ww, _ := w.Next() - ww.Write([]byte(big(s, length))) - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - for i := 0; i < n; i++ { - rr, _ := r.Next() - _, err := io.ReadFull(rr, p) - if err != nil { - t.Fatal(err) - } - want := string(uint8(i)) + "123456789" - if got := string(p); got != want { - t.Fatalf("read #%d: got %q want %q", i, got, want) - } - } -} - -func TestStaleReader(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w0.Write([]byte("0")) - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1.Write([]byte("11")) - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - r0, err := r.Next() - if err != nil { - t.Fatal(err) - } - r1, err := r.Next() - if err != nil { - t.Fatal(err) - } - p := make([]byte, 1) - if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale read #0: unexpected error: %v", err) - } - if _, err := r1.Read(p); err != nil { - t.Fatalf("fresh read #1: got %v want nil error", err) - } - if p[0] != '1' { - t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) - } -} - -func TestStaleWriter(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #0: unexpected error: %v", err) - } - if _, err := w1.Write([]byte("11")); err != nil { - t.Fatalf("fresh write #1: got %v want nil error", err) - } - if err := w.Flush(); err != nil { - t.Fatalf("flush: %v", err) - } - if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #1: unexpected error: %v", err) - } -} - -func TestCorrupt_MissingLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Cut the last block. - b := buf.Bytes()[:blockSize] - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read. - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if n != blockSize-1024 { - t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024) - } - - // Second read. - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedFirstBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #0. - for i := 0; i < 1024; i++ { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (third record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedMiddleBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #1. - for i := 0; i < 1024; i++ { - b[blockSize+i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - // Third read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #3. - for i := len(b) - 1; i > len(b)-1024; i-- { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize - headerSize); n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - // Third read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - // Fourth read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #3: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize/2 + headerSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/key.go b/kit/github.com/syndtr/goleveldb/leveldb/key.go deleted file mode 100644 index b9acf93..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/key.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" -) - -type vType int - -func (t vType) String() string { - switch t { - case tDel: - return "d" - case tVal: - return "v" - } - return "x" -} - -// Value types encoded as the last component of internal keys. -// Don't modify; this value are saved to disk. -const ( - tDel vType = iota - tVal -) - -// tSeek defines the vType that should be passed when constructing an -// internal key for seeking to a particular sequence number (since we -// sort sequence numbers in decreasing order and the value type is -// embedded as the low 8 bits in the sequence number in internal keys, -// we need to use the highest-numbered ValueType, not the lowest). -const tSeek = tVal - -const ( - // Maximum value possible for sequence number; the 8-bits are - // used by value type, so its can packed together in single - // 64-bit integer. - kMaxSeq uint64 = (uint64(1) << 56) - 1 - // Maximum value possible for packed sequence number and type. - kMaxNum uint64 = (kMaxSeq << 8) | uint64(tSeek) -) - -// Maximum number encoded in bytes. -var kMaxNumBytes = make([]byte, 8) - -func init() { - binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum) -} - -type iKey []byte - -func newIKey(ukey []byte, seq uint64, t vType) iKey { - if seq > kMaxSeq || t > tVal { - panic("invalid seq number or value type") - } - - b := make(iKey, len(ukey)+8) - copy(b, ukey) - binary.LittleEndian.PutUint64(b[len(ukey):], (seq<<8)|uint64(t)) - return b -} - -func parseIkey(p []byte) (ukey []byte, seq uint64, t vType, ok bool) { - if len(p) < 8 { - return - } - num := binary.LittleEndian.Uint64(p[len(p)-8:]) - seq, t = uint64(num>>8), vType(num&0xff) - if t > tVal { - return - } - ukey = p[:len(p)-8] - ok = true - return -} - -func validIkey(p []byte) bool { - _, _, _, ok := parseIkey(p) - return ok -} - -func (p iKey) assert() { - if p == nil { - panic("nil iKey") - } - if len(p) < 8 { - panic(fmt.Sprintf("invalid iKey %q, len=%d", []byte(p), len(p))) - } -} - -func (p iKey) ok() bool { - if len(p) < 8 { - return false - } - _, _, ok := p.parseNum() - return ok -} - -func (p iKey) ukey() []byte { - p.assert() - return p[:len(p)-8] -} - -func (p iKey) num() uint64 { - p.assert() - return binary.LittleEndian.Uint64(p[len(p)-8:]) -} - -func (p iKey) parseNum() (seq uint64, t vType, ok bool) { - if p == nil { - panic("nil iKey") - } - if len(p) < 8 { - return - } - num := p.num() - seq, t = uint64(num>>8), vType(num&0xff) - if t > tVal { - return 0, 0, false - } - ok = true - return -} - -func (p iKey) String() string { - if len(p) == 0 { - return "" - } - if seq, t, ok := p.parseNum(); ok { - return fmt.Sprintf("%s,%s%d", shorten(string(p.ukey())), t, seq) - } - return "" -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/key_test.go b/kit/github.com/syndtr/goleveldb/leveldb/key_test.go deleted file mode 100644 index 6856715..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/key_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" -) - -var defaultIComparer = &iComparer{comparer.DefaultComparer} - -func ikey(key string, seq uint64, t vType) iKey { - return newIKey([]byte(key), uint64(seq), t) -} - -func shortSep(a, b []byte) []byte { - dst := make([]byte, len(a)) - dst = defaultIComparer.Separator(dst[:0], a, b) - if dst == nil { - return a - } - return dst -} - -func shortSuccessor(b []byte) []byte { - dst := make([]byte, len(b)) - dst = defaultIComparer.Successor(dst[:0], b) - if dst == nil { - return b - } - return dst -} - -func testSingleKey(t *testing.T, key string, seq uint64, vt vType) { - ik := ikey(key, seq, vt) - - if !bytes.Equal(ik.ukey(), []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - - if rseq, rt, ok := ik.parseNum(); ok { - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - - if rt != vt { - t.Errorf("type does not equal, got %v, want %v", rt, vt) - } - } else { - t.Error("cannot parse seq and type") - } -} - -func TestIKey_EncodeDecode(t *testing.T) { - keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} - seqs := []uint64{ - 1, 2, 3, - (1 << 8) - 1, 1 << 8, (1 << 8) + 1, - (1 << 16) - 1, 1 << 16, (1 << 16) + 1, - (1 << 32) - 1, 1 << 32, (1 << 32) + 1, - } - for _, key := range keys { - for _, seq := range seqs { - testSingleKey(t, key, seq, tVal) - testSingleKey(t, "hello", 1, tDel) - } - } -} - -func assertBytes(t *testing.T, want, got []byte) { - if !bytes.Equal(got, want) { - t.Errorf("assert failed, got %v, want %v", got, want) - } -} - -func TestIKeyShortSeparator(t *testing.T) { - // When user keys are same - assertBytes(t, ikey("foo", 100, tVal), - shortSep(ikey("foo", 100, tVal), - ikey("foo", 99, tVal))) - assertBytes(t, ikey("foo", 100, tVal), - shortSep(ikey("foo", 100, tVal), - ikey("foo", 101, tVal))) - assertBytes(t, ikey("foo", 100, tVal), - shortSep(ikey("foo", 100, tVal), - ikey("foo", 100, tVal))) - assertBytes(t, ikey("foo", 100, tVal), - shortSep(ikey("foo", 100, tVal), - ikey("foo", 100, tDel))) - - // When user keys are misordered - assertBytes(t, ikey("foo", 100, tVal), - shortSep(ikey("foo", 100, tVal), - ikey("bar", 99, tVal))) - - // When user keys are different, but correctly ordered - assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek), - shortSep(ikey("foo", 100, tVal), - ikey("hello", 200, tVal))) - - // When start user key is prefix of limit user key - assertBytes(t, ikey("foo", 100, tVal), - shortSep(ikey("foo", 100, tVal), - ikey("foobar", 200, tVal))) - - // When limit user key is prefix of start user key - assertBytes(t, ikey("foobar", 100, tVal), - shortSep(ikey("foobar", 100, tVal), - ikey("foo", 200, tVal))) -} - -func TestIKeyShortestSuccessor(t *testing.T) { - assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek), - shortSuccessor(ikey("foo", 100, tVal))) - assertBytes(t, ikey("\xff\xff", 100, tVal), - shortSuccessor(ikey("\xff\xff", 100, tVal))) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/kit/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go deleted file mode 100644 index 5173c66..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package leveldb - -import ( - "testing" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestLeveldb(t *testing.T) { - testutil.RunDefer() - - RegisterFailHandler(Fail) - RunSpecs(t, "Leveldb Suite") - - RegisterTestingT(t) - testutil.RunDefer("teardown") -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/kit/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go deleted file mode 100644 index 8582837..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - "encoding/binary" - "math/rand" - "testing" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" -) - -func BenchmarkPut(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkPutRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkGet(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := range buf { - p.Get(buf[i][:]) - } -} - -func BenchmarkGetRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - p.Get(buf[rand.Int()%b.N][:]) - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/kit/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go deleted file mode 100644 index 7339457..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package memdb provides in-memory key/value database implementation. -package memdb - -import ( - "math/rand" - "sync" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = util.ErrNotFound -) - -const tMaxHeight = 12 - -type dbIter struct { - util.BasicReleaser - p *DB - slice *util.Range - node int - forward bool - key, value []byte -} - -func (i *dbIter) fill(checkStart, checkLimit bool) bool { - if i.node != 0 { - n := i.p.nodeData[i.node] - m := n + i.p.nodeData[i.node+nKey] - i.key = i.p.kvData[n:m] - if i.slice != nil { - switch { - case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: - fallthrough - case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: - i.node = 0 - goto bail - } - } - i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] - return true - } -bail: - i.key = nil - i.value = nil - return false -} - -func (i *dbIter) Valid() bool { - return i.node != 0 -} - -func (i *dbIter) First() bool { - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil { - i.node, _ = i.p.findGE(i.slice.Start, false) - } else { - i.node = i.p.nodeData[nNext] - } - return i.fill(false, true) -} - -func (i *dbIter) Last() bool { - if i.p == nil { - return false - } - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Limit != nil { - i.node = i.p.findLT(i.slice.Limit) - } else { - i.node = i.p.findLast() - } - return i.fill(true, false) -} - -func (i *dbIter) Seek(key []byte) bool { - if i.p == nil { - return false - } - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { - key = i.slice.Start - } - i.node, _ = i.p.findGE(key, false) - return i.fill(false, true) -} - -func (i *dbIter) Next() bool { - if i.p == nil { - return false - } - if i.node == 0 { - if !i.forward { - return i.First() - } - return false - } - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.nodeData[i.node+nNext] - return i.fill(false, true) -} - -func (i *dbIter) Prev() bool { - if i.p == nil { - return false - } - if i.node == 0 { - if i.forward { - return i.Last() - } - return false - } - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.findLT(i.key) - return i.fill(true, false) -} - -func (i *dbIter) Key() []byte { - return i.key -} - -func (i *dbIter) Value() []byte { - return i.value -} - -func (i *dbIter) Error() error { return nil } - -func (i *dbIter) Release() { - if i.p != nil { - i.p = nil - i.node = 0 - i.key = nil - i.value = nil - i.BasicReleaser.Release() - } -} - -const ( - nKV = iota - nKey - nVal - nHeight - nNext -) - -// DB is an in-memory key/value database. -type DB struct { - cmp comparer.BasicComparer - rnd *rand.Rand - - mu sync.RWMutex - kvData []byte - // Node data: - // [0] : KV offset - // [1] : Key length - // [2] : Value length - // [3] : Height - // [3..height] : Next nodes - nodeData []int - prevNode [tMaxHeight]int - maxHeight int - n int - kvSize int -} - -func (p *DB) randHeight() (h int) { - const branching = 4 - h = 1 - for h < tMaxHeight && p.rnd.Int()%branching == 0 { - h++ - } - return -} - -func (p *DB) findGE(key []byte, prev bool) (int, bool) { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - cmp := 1 - if next != 0 { - o := p.nodeData[next] - cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) - } - if cmp < 0 { - // Keep searching in this list - node = next - } else { - if prev { - p.prevNode[h] = node - } else if cmp == 0 { - return next, true - } - if h == 0 { - return next, cmp == 0 - } - h-- - } - } -} - -func (p *DB) findLT(key []byte) int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - o := p.nodeData[next] - if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -func (p *DB) findLast() int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - if next == 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (p *DB) Put(key []byte, value []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - if node, exact := p.findGE(key, true); exact { - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - p.nodeData[node] = kvOffset - m := p.nodeData[node+nVal] - p.nodeData[node+nVal] = len(value) - p.kvSize += len(value) - m - return nil - } - - h := p.randHeight() - if h > p.maxHeight { - for i := p.maxHeight; i < h; i++ { - p.prevNode[i] = 0 - } - p.maxHeight = h - } - - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - // Node - node := len(p.nodeData) - p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) - for i, n := range p.prevNode[:h] { - m := n + 4 + i - p.nodeData = append(p.nodeData, p.nodeData[m]) - p.nodeData[m] = node - } - - p.kvSize += len(key) + len(value) - p.n++ - return nil -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (p *DB) Delete(key []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - node, exact := p.findGE(key, true) - if !exact { - return ErrNotFound - } - - h := p.nodeData[node+nHeight] - for i, n := range p.prevNode[:h] { - m := n + 4 + i - p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] - } - - p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] - p.n-- - return nil -} - -// Contains returns true if the given key are in the DB. -// -// It is safe to modify the contents of the arguments after Contains returns. -func (p *DB) Contains(key []byte) bool { - p.mu.RLock() - _, exact := p.findGE(key, false) - p.mu.RUnlock() - return exact -} - -// Get gets the value for the given key. It returns error.ErrNotFound if the -// DB does not contain the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (p *DB) Get(key []byte) (value []byte, err error) { - p.mu.RLock() - if node, exact := p.findGE(key, false); exact { - o := p.nodeData[node] + p.nodeData[node+nKey] - value = p.kvData[o : o+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Find returns. -func (p *DB) Find(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node, _ := p.findGE(key, false); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// NewIterator returns an iterator of the DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. However, the resultant key/value pairs are not guaranteed -// to be a consistent snapshot of the DB at a particular point in time. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { - return &dbIter{p: p, slice: slice} -} - -// Capacity returns keys/values buffer capacity. -func (p *DB) Capacity() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) -} - -// Size returns sum of keys and values length. Note that deleted -// key/value will not be accouted for, but it will still consume -// the buffer, since the buffer is append only. -func (p *DB) Size() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.kvSize -} - -// Free returns keys/values free buffer before need to grow. -func (p *DB) Free() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) - len(p.kvData) -} - -// Len returns the number of entries in the DB. -func (p *DB) Len() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.n -} - -// Reset resets the DB to initial empty state. Allows reuse the buffer. -func (p *DB) Reset() { - p.rnd = rand.New(rand.NewSource(0xdeadbeef)) - p.maxHeight = 1 - p.n = 0 - p.kvSize = 0 - p.kvData = p.kvData[:0] - p.nodeData = p.nodeData[:4+tMaxHeight] - p.nodeData[nKV] = 0 - p.nodeData[nKey] = 0 - p.nodeData[nVal] = 0 - p.nodeData[nHeight] = tMaxHeight - for n := 0; n < tMaxHeight; n++ { - p.nodeData[4+n] = 0 - p.prevNode[n] = 0 - } -} - -// New creates a new initalized in-memory key/value DB. The capacity -// is the initial key/value buffer capacity. The capacity is advisory, -// not enforced. -// -// The returned DB instance is goroutine-safe. -func New(cmp comparer.BasicComparer, capacity int) *DB { - p := &DB{ - cmp: cmp, - rnd: rand.New(rand.NewSource(0xdeadbeef)), - maxHeight: 1, - kvData: make([]byte, 0, capacity), - nodeData: make([]int, 4+tMaxHeight), - } - p.nodeData[nHeight] = tMaxHeight - return p -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/kit/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go deleted file mode 100644 index df36d80..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package memdb - -import ( - "testing" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestMemdb(t *testing.T) { - testutil.RunDefer() - - RegisterFailHandler(Fail) - RunSpecs(t, "Memdb Suite") -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/kit/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go deleted file mode 100644 index e2b77ac..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLT(key); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestFindLast() (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLast(); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestPut(key []byte, value []byte) error { - p.Put(key, value) - return nil -} - -func (p *DB) TestDelete(key []byte) error { - p.Delete(key) - return nil -} - -func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return p.Find(key) -} - -func (p *DB) TestGet(key []byte) (value []byte, err error) { - return p.Get(key) -} - -func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator { - return p.NewIterator(slice) -} - -var _ = testutil.Defer(func() { - Describe("Memdb", func() { - Describe("write test", func() { - It("should do write correctly", func() { - db := New(comparer.DefaultComparer, 0) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(), - PostFn: func(t *testutil.DBTesting) { - Expect(db.Len()).Should(Equal(t.Present.Len())) - Expect(db.Size()).Should(Equal(t.Present.Size())) - switch t.Act { - case testutil.DBPut, testutil.DBOverwrite: - Expect(db.Contains(t.ActKey)).Should(BeTrue()) - default: - Expect(db.Contains(t.ActKey)).Should(BeFalse()) - } - }, - } - testutil.DoDBTesting(&t) - }) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := New(comparer.DefaultComparer, 0) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - db.Put(key, value) - }) - - if kv.Len() > 1 { - It("Should find correct keys with findLT", func() { - testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) { - key_, key, _ := kv.IndexInexact(i + 1) - expectedKey, expectedValue := kv.Index(i) - - // Using key that exist. - rkey, rvalue, err := db.TestFindLT(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey) - Expect(rkey).Should(Equal(expectedKey), "Key") - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey) - - // Using key that doesn't exist. - rkey, rvalue, err = db.TestFindLT(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey) - Expect(rkey).Should(Equal(expectedKey)) - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey) - }) - }) - } - - if kv.Len() > 0 { - It("Should find last key with findLast", func() { - key, value := kv.Index(kv.Len() - 1) - rkey, rvalue, err := db.TestFindLast() - Expect(err).ShouldNot(HaveOccurred()) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value)) - }) - } - - return db - }) - }) - }) -}) diff --git a/kit/github.com/syndtr/goleveldb/leveldb/opt/options.go b/kit/github.com/syndtr/goleveldb/leveldb/opt/options.go deleted file mode 100644 index a066e3b..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/opt/options.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package opt provides sets of options used by LevelDB. -package opt - -import ( - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/filter" -) - -const ( - KiB = 1024 - MiB = KiB * 1024 - GiB = MiB * 1024 -) - -const ( - DefaultBlockCacheSize = 8 * MiB - DefaultBlockRestartInterval = 16 - DefaultBlockSize = 4 * KiB - DefaultCompressionType = SnappyCompression - DefaultMaxOpenFiles = 1000 - DefaultWriteBuffer = 4 * MiB -) - -type noCache struct{} - -func (noCache) SetCapacity(capacity int) {} -func (noCache) GetNamespace(id uint64) cache.Namespace { return nil } -func (noCache) Purge(fin cache.PurgeFin) {} -func (noCache) Zap(closed bool) {} - -var NoCache cache.Cache = noCache{} - -// Compression is the per-block compression algorithm to use. -type Compression uint - -func (c Compression) String() string { - switch c { - case DefaultCompression: - return "default" - case NoCompression: - return "none" - case SnappyCompression: - return "snappy" - } - return "invalid" -} - -const ( - DefaultCompression Compression = iota - NoCompression - SnappyCompression - nCompression -) - -// Strict is the DB strict level. -type Strict uint - -const ( - // If present then a corrupted or invalid chunk or block in manifest - // journal will cause an error istead of being dropped. - StrictManifest Strict = 1 << iota - - // If present then a corrupted or invalid chunk or block in journal - // will cause an error istead of being dropped. - StrictJournal - - // If present then journal chunk checksum will be verified. - StrictJournalChecksum - - // If present then an invalid key/value pair will cause an error - // instead of being skipped. - StrictIterator - - // If present then 'sorted table' block checksum will be verified. - StrictBlockChecksum - - // StrictAll enables all strict flags. - StrictAll = StrictManifest | StrictJournal | StrictJournalChecksum | StrictIterator | StrictBlockChecksum - - // DefaultStrict is the default strict flags. Specify any strict flags - // will override default strict flags as whole (i.e. not OR'ed). - DefaultStrict = StrictJournalChecksum | StrictBlockChecksum - - // NoStrict disables all strict flags. Override default strict flags. - NoStrict = ^StrictAll -) - -// Options holds the optional parameters for the DB at large. -type Options struct { - // AltFilters defines one or more 'alternative filters'. - // 'alternative filters' will be used during reads if a filter block - // does not match with the 'effective filter'. - // - // The default value is nil - AltFilters []filter.Filter - - // BlockCache provides per-block caching for LevelDB. Specify NoCache to - // disable block caching. - // - // By default LevelDB will create LRU-cache with capacity of 8MiB. - BlockCache cache.Cache - - // BlockRestartInterval is the number of keys between restart points for - // delta encoding of keys. - // - // The default value is 16. - BlockRestartInterval int - - // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' - // block. - // - // The default value is 4KiB. - BlockSize int - - // Comparer defines a total ordering over the space of []byte keys: a 'less - // than' relationship. The same comparison algorithm must be used for reads - // and writes over the lifetime of the DB. - // - // The default value uses the same ordering as bytes.Compare. - Comparer comparer.Comparer - - // Compression defines the per-block compression to use. - // - // The default value (DefaultCompression) uses snappy compression. - Compression Compression - - // ErrorIfExist defines whether an error should returned if the DB already - // exist. - // - // The default value is false. - ErrorIfExist bool - - // ErrorIfMissing defines whether an error should returned if the DB is - // missing. If false then the database will be created if missing, otherwise - // an error will be returned. - // - // The default value is false. - ErrorIfMissing bool - - // Filter defines an 'effective filter' to use. An 'effective filter' - // if defined will be used to generate per-table filter block. - // The filter name will be stored on disk. - // During reads LevelDB will try to find matching filter from - // 'effective filter' and 'alternative filters'. - // - // Filter can be changed after a DB has been created. It is recommended - // to put old filter to the 'alternative filters' to mitigate lack of - // filter during transition period. - // - // A filter is used to reduce disk reads when looking for a specific key. - // - // The default value is nil. - Filter filter.Filter - - // MaxOpenFiles defines maximum number of open files to kept around - // (cached). This is not an hard limit, actual open files may exceed - // the defined value. - // - // The default value is 1000. - MaxOpenFiles int - - // Strict defines the DB strict level. - Strict Strict - - // WriteBuffer defines maximum size of a 'memdb' before flushed to - // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk - // unsorted journal. - // - // LevelDB may held up to two 'memdb' at the same time. - // - // The default value is 4MiB. - WriteBuffer int -} - -func (o *Options) GetAltFilters() []filter.Filter { - if o == nil { - return nil - } - return o.AltFilters -} - -func (o *Options) GetBlockCache() cache.Cache { - if o == nil { - return nil - } - return o.BlockCache -} - -func (o *Options) GetBlockRestartInterval() int { - if o == nil || o.BlockRestartInterval <= 0 { - return DefaultBlockRestartInterval - } - return o.BlockRestartInterval -} - -func (o *Options) GetBlockSize() int { - if o == nil || o.BlockSize <= 0 { - return DefaultBlockSize - } - return o.BlockSize -} - -func (o *Options) GetComparer() comparer.Comparer { - if o == nil || o.Comparer == nil { - return comparer.DefaultComparer - } - return o.Comparer -} - -func (o *Options) GetCompression() Compression { - if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { - return DefaultCompressionType - } - return o.Compression -} - -func (o *Options) GetErrorIfExist() bool { - if o == nil { - return false - } - return o.ErrorIfExist -} - -func (o *Options) GetErrorIfMissing() bool { - if o == nil { - return false - } - return o.ErrorIfMissing -} - -func (o *Options) GetFilter() filter.Filter { - if o == nil { - return nil - } - return o.Filter -} - -func (o *Options) GetMaxOpenFiles() int { - if o == nil || o.MaxOpenFiles <= 0 { - return DefaultMaxOpenFiles - } - return o.MaxOpenFiles -} - -func (o *Options) GetStrict(strict Strict) bool { - if o == nil || o.Strict == 0 { - return DefaultStrict&strict != 0 - } - return o.Strict&strict != 0 -} - -func (o *Options) GetWriteBuffer() int { - if o == nil || o.WriteBuffer <= 0 { - return DefaultWriteBuffer - } - return o.WriteBuffer -} - -// ReadOptions holds the optional parameters for 'read operation'. The -// 'read operation' includes Get, Find and NewIterator. -type ReadOptions struct { - // DontFillCache defines whether block reads for this 'read operation' - // should be cached. If false then the block will be cached. This does - // not affects already cached block. - // - // The default value is false. - DontFillCache bool - - // Strict overrides global DB strict level. Only StrictIterator and - // StrictBlockChecksum that does have effects here. - Strict Strict -} - -func (ro *ReadOptions) GetDontFillCache() bool { - if ro == nil { - return false - } - return ro.DontFillCache -} - -func (ro *ReadOptions) GetStrict(strict Strict) bool { - if ro == nil { - return false - } - return ro.Strict&strict != 0 -} - -// WriteOptions holds the optional parameters for 'write operation'. The -// 'write operation' includes Write, Put and Delete. -type WriteOptions struct { - // Sync is whether to sync underlying writes from the OS buffer cache - // through to actual disk, if applicable. Setting Sync can result in - // slower writes. - // - // If false, and the machine crashes, then some recent writes may be lost. - // Note that if it is just the process that crashes (and the machine does - // not) then no writes will be lost. - // - // In other words, Sync being false has the same semantics as a write - // system call. Sync being true means write followed by fsync. - // - // The default value is false. - Sync bool -} - -func (wo *WriteOptions) GetSync() bool { - if wo == nil { - return false - } - return wo.Sync -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/options.go b/kit/github.com/syndtr/goleveldb/leveldb/options.go deleted file mode 100644 index 6a4cc6e..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/options.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" -) - -func (s *session) setOptions(o *opt.Options) { - s.o = &opt.Options{} - if o != nil { - *s.o = *o - } - // Alternative filters. - if filters := o.GetAltFilters(); len(filters) > 0 { - s.o.AltFilters = make([]filter.Filter, len(filters)) - for i, filter := range filters { - s.o.AltFilters[i] = &iFilter{filter} - } - } - // Block cache. - switch o.GetBlockCache() { - case nil: - s.o.BlockCache = cache.NewLRUCache(opt.DefaultBlockCacheSize) - case opt.NoCache: - s.o.BlockCache = nil - } - // Comparer. - s.icmp = &iComparer{o.GetComparer()} - s.o.Comparer = s.icmp - // Filter. - if filter := o.GetFilter(); filter != nil { - s.o.Filter = &iFilter{filter} - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/session.go b/kit/github.com/syndtr/goleveldb/leveldb/session.go deleted file mode 100644 index f1dedc8..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/session.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "io" - "os" - "sync" - "sync/atomic" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -// session represent a persistent database session. -type session struct { - // Need 64-bit alignment. - stFileNum uint64 // current unused file number - stJournalNum uint64 // current journal file number; need external synchronization - stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb - stSeq uint64 // last mem compacted seq; need external synchronization - stTempFileNum uint64 - - stor storage.Storage - storLock util.Releaser - o *opt.Options - icmp *iComparer - tops *tOps - - manifest *journal.Writer - manifestWriter storage.Writer - manifestFile storage.File - - stCptrs [kNumLevels]iKey // compact pointers; need external synchronization - stVersion *version // current version - vmu sync.Mutex -} - -// Creates new initialized session instance. -func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { - if stor == nil { - return nil, os.ErrInvalid - } - storLock, err := stor.Lock() - if err != nil { - return - } - s = &session{ - stor: stor, - storLock: storLock, - } - s.setOptions(o) - s.tops = newTableOps(s, s.o.GetMaxOpenFiles()) - s.setVersion(&version{s: s}) - s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock D·DeletedEntry L·Level Q·SeqNum T·TimeElapsed") - return -} - -// Close session. -func (s *session) close() { - s.tops.close() - if bc := s.o.GetBlockCache(); bc != nil { - bc.Purge(nil) - } - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - s.manifest = nil - s.manifestWriter = nil - s.manifestFile = nil - s.stVersion = nil -} - -// Release session lock. -func (s *session) release() { - s.storLock.Release() -} - -// Create a new database session; need external synchronization. -func (s *session) create() error { - // create manifest - return s.newManifest(nil, nil) -} - -// Recover a database session; need external synchronization. -func (s *session) recover() (err error) { - defer func() { - if os.IsNotExist(err) { - // Don't return os.ErrNotExist if the underlying storage contains - // other files that belong to LevelDB. So the DB won't get trashed. - if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 { - err = ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest file missing")} - } - } - }() - - file, err := s.stor.GetManifest() - if err != nil { - return - } - - reader, err := file.Open() - if err != nil { - return - } - defer reader.Close() - strict := s.o.GetStrict(opt.StrictManifest) - jr := journal.NewReader(reader, dropper{s, file}, strict, true) - - staging := s.version_NB().newStaging() - rec := &sessionRecord{} - for { - var r io.Reader - r, err = jr.Next() - if err != nil { - if err == io.EOF { - err = nil - break - } - return - } - - err = rec.decode(r) - if err == nil { - // save compact pointers - for _, r := range rec.compactionPointers { - s.stCptrs[r.level] = iKey(r.ikey) - } - // commit record to version staging - staging.commit(rec) - } else if strict { - return ErrCorrupted{Type: CorruptedManifest, Err: err} - } else { - s.logf("manifest error: %v (skipped)", err) - } - rec.resetCompactionPointers() - rec.resetAddedTables() - rec.resetDeletedTables() - } - - switch { - case !rec.has(recComparer): - return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing comparer name")} - case rec.comparer != s.icmp.uName(): - return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: comparer mismatch, " + "want '" + s.icmp.uName() + "', " + "got '" + rec.comparer + "'")} - case !rec.has(recNextNum): - return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing next file number")} - case !rec.has(recJournalNum): - return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing journal file number")} - case !rec.has(recSeq): - return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing seq number")} - } - - s.manifestFile = file - s.setVersion(staging.finish()) - s.setFileNum(rec.nextNum) - s.recordCommited(rec) - return nil -} - -// Commit session; need external synchronization. -func (s *session) commit(r *sessionRecord) (err error) { - // spawn new version based on current version - nv := s.version_NB().spawn(r) - - if s.manifest == nil { - // manifest journal writer not yet created, create one - err = s.newManifest(r, nv) - } else { - err = s.flushManifest(r) - } - - // finally, apply new version if no error rise - if err == nil { - s.setVersion(nv) - } - - return -} - -// Pick a compaction based on current state; need external synchronization. -func (s *session) pickCompaction() *compaction { - v := s.version_NB() - - var level int - var t0 tFiles - if v.cScore >= 1 { - level = v.cLevel - cptr := s.stCptrs[level] - tables := v.tables[level] - for _, t := range tables { - if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { - t0 = append(t0, t) - break - } - } - if len(t0) == 0 { - t0 = append(t0, tables[0]) - } - } else { - if p := atomic.LoadPointer(&v.cSeek); p != nil { - ts := (*tSet)(p) - level = ts.level - t0 = append(t0, ts.table) - } else { - return nil - } - } - - c := &compaction{s: s, v: v, level: level} - if level == 0 { - imin, imax := t0.getRange(s.icmp) - t0 = v.tables[0].getOverlaps(t0[:0], s.icmp, imin.ukey(), imax.ukey(), true) - } - - c.tables[0] = t0 - c.expand() - return c -} - -// Create compaction from given level and range; need external synchronization. -func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { - v := s.version_NB() - - t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0) - if len(t0) == 0 { - return nil - } - - // Avoid compacting too much in one shot in case the range is large. - // But we cannot do this for level-0 since level-0 files can overlap - // and we must not pick one file and drop another older file if the - // two files overlap. - if level > 0 { - limit := uint64(kMaxTableSize) - total := uint64(0) - for i, t := range t0 { - total += t.size - if total >= limit { - s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) - t0 = t0[:i+1] - break - } - } - } - - c := &compaction{s: s, v: v, level: level} - c.tables[0] = t0 - c.expand() - return c -} - -// compaction represent a compaction state. -type compaction struct { - s *session - v *version - - level int - tables [2]tFiles - - gp tFiles - gpidx int - seenKey bool - overlappedBytes uint64 - imin, imax iKey - - tPtrs [kNumLevels]int -} - -// Expand compacted tables; need external synchronization. -func (c *compaction) expand() { - level := c.level - vt0, vt1 := c.v.tables[level], c.v.tables[level+1] - - t0, t1 := c.tables[0], c.tables[1] - imin, imax := t0.getRange(c.s.icmp) - t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) - // Get entire range covered by compaction. - amin, amax := append(t0, t1...).getRange(c.s.icmp) - - // See if we can grow the number of inputs in "level" without - // changing the number of "level+1" files we pick up. - if len(t1) > 0 { - exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), level == 0) - if len(exp0) > len(t0) && t1.size()+exp0.size() < kExpCompactionMaxBytes { - xmin, xmax := exp0.getRange(c.s.icmp) - exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) - if len(exp1) == len(t1) { - c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", - level, level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), - len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) - imin, imax = xmin, xmax - t0, t1 = exp0, exp1 - amin, amax = append(t0, t1...).getRange(c.s.icmp) - } - } - } - - // Compute the set of grandparent files that overlap this compaction - // (parent == level+1; grandparent == level+2) - if level+2 < kNumLevels { - c.gp = c.v.tables[level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) - } - - c.tables[0], c.tables[1] = t0, t1 - c.imin, c.imax = imin, imax -} - -// Check whether compaction is trivial. -func (c *compaction) trivial() bool { - return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= kMaxGrandParentOverlapBytes -} - -func (c *compaction) baseLevelForKey(ukey []byte) bool { - for level, tables := range c.v.tables[c.level+2:] { - for c.tPtrs[level] < len(tables) { - t := tables[c.tPtrs[level]] - if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { - // We've advanced far enough. - if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - // Key falls in this file's range, so definitely not base level. - return false - } - break - } - c.tPtrs[level]++ - } - } - return true -} - -func (c *compaction) shouldStopBefore(ikey iKey) bool { - for ; c.gpidx < len(c.gp); c.gpidx++ { - gp := c.gp[c.gpidx] - if c.s.icmp.Compare(ikey, gp.imax) <= 0 { - break - } - if c.seenKey { - c.overlappedBytes += gp.size - } - } - c.seenKey = true - - if c.overlappedBytes > kMaxGrandParentOverlapBytes { - // Too much overlap for current output; start new output. - c.overlappedBytes = 0 - return true - } - return false -} - -// Creates an iterator. -func (c *compaction) newIterator() iterator.Iterator { - // Creates iterator slice. - icap := len(c.tables) - if c.level == 0 { - // Special case for level-0 - icap = len(c.tables[0]) + 1 - } - its := make([]iterator.Iterator, 0, icap) - - // Options. - ro := &opt.ReadOptions{ - DontFillCache: true, - } - strict := c.s.o.GetStrict(opt.StrictIterator) - - for i, tables := range c.tables { - if len(tables) == 0 { - continue - } - - // Level-0 is not sorted and may overlaps each other. - if c.level+i == 0 { - for _, t := range tables { - its = append(its, c.s.tops.newIterator(t, nil, ro)) - } - } else { - it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict, true) - its = append(its, it) - } - } - - return iterator.NewMergedIterator(its, c.s.icmp, true) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/session_record.go b/kit/github.com/syndtr/goleveldb/leveldb/session_record.go deleted file mode 100644 index 2721295..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/session_record.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bufio" - "encoding/binary" - "errors" - "io" -) - -var errCorruptManifest = errors.New("leveldb: corrupt manifest") - -type byteReader interface { - io.Reader - io.ByteReader -} - -// These numbers are written to disk and should not be changed. -const ( - recComparer = 1 - recJournalNum = 2 - recNextNum = 3 - recSeq = 4 - recCompactionPointer = 5 - recDeletedTable = 6 - recNewTable = 7 - // 8 was used for large value refs - recPrevJournalNum = 9 -) - -type cpRecord struct { - level int - ikey iKey -} - -type ntRecord struct { - level int - num uint64 - size uint64 - imin iKey - imax iKey -} - -func (r ntRecord) makeFile(s *session) *tFile { - return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax) -} - -type dtRecord struct { - level int - num uint64 -} - -type sessionRecord struct { - hasRec int - comparer string - journalNum uint64 - prevJournalNum uint64 - nextNum uint64 - seq uint64 - compactionPointers []cpRecord - addedTables []ntRecord - deletedTables []dtRecord - scratch [binary.MaxVarintLen64]byte - err error -} - -func (p *sessionRecord) has(rec int) bool { - return p.hasRec&(1<= kNumLevels { - p.err = errCorruptManifest - return 0 - } - return int(x) -} - -func (p *sessionRecord) decode(r io.Reader) error { - br, ok := r.(byteReader) - if !ok { - br = bufio.NewReader(r) - } - p.err = nil - for p.err == nil { - rec, err := binary.ReadUvarint(br) - if err != nil { - if err == io.EOF { - err = nil - } - return err - } - switch rec { - case recComparer: - x := p.readBytes(br) - if p.err == nil { - p.setComparer(string(x)) - } - case recJournalNum: - x := p.readUvarint(br) - if p.err == nil { - p.setJournalNum(x) - } - case recPrevJournalNum: - x := p.readUvarint(br) - if p.err == nil { - p.setPrevJournalNum(x) - } - case recNextNum: - x := p.readUvarint(br) - if p.err == nil { - p.setNextNum(x) - } - case recSeq: - x := p.readUvarint(br) - if p.err == nil { - p.setSeq(x) - } - case recCompactionPointer: - level := p.readLevel(br) - ikey := p.readBytes(br) - if p.err == nil { - p.addCompactionPointer(level, iKey(ikey)) - } - case recNewTable: - level := p.readLevel(br) - num := p.readUvarint(br) - size := p.readUvarint(br) - imin := p.readBytes(br) - imax := p.readBytes(br) - if p.err == nil { - p.addTable(level, num, size, imin, imax) - } - case recDeletedTable: - level := p.readLevel(br) - num := p.readUvarint(br) - if p.err == nil { - p.deleteTable(level, num) - } - } - } - - return p.err -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/kit/github.com/syndtr/goleveldb/leveldb/session_record_test.go deleted file mode 100644 index 029fabf..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/session_record_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" -) - -func decodeEncode(v *sessionRecord) (res bool, err error) { - b := new(bytes.Buffer) - err = v.encode(b) - if err != nil { - return - } - v2 := new(sessionRecord) - err = v.decode(b) - if err != nil { - return - } - b2 := new(bytes.Buffer) - err = v2.encode(b2) - if err != nil { - return - } - return bytes.Equal(b.Bytes(), b2.Bytes()), nil -} - -func TestSessionRecord_EncodeDecode(t *testing.T) { - big := uint64(1) << 50 - v := new(sessionRecord) - i := uint64(0) - test := func() { - res, err := decodeEncode(v) - if err != nil { - t.Fatalf("error when testing encode/decode sessionRecord: %v", err) - } - if !res { - t.Error("encode/decode test failed at iteration:", i) - } - } - - for ; i < 4; i++ { - test() - v.addTable(3, big+300+i, big+400+i, - newIKey([]byte("foo"), big+500+1, tVal), - newIKey([]byte("zoo"), big+600+1, tDel)) - v.deleteTable(4, big+700+i) - v.addCompactionPointer(int(i), newIKey([]byte("x"), big+900+1, tVal)) - } - - v.setComparer("foo") - v.setJournalNum(big + 100) - v.setPrevJournalNum(big + 99) - v.setNextNum(big + 200) - v.setSeq(big + 1000) - test() -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/session_util.go b/kit/github.com/syndtr/goleveldb/leveldb/session_util.go deleted file mode 100644 index db8c878..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/session_util.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sync/atomic" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" -) - -// Logging. - -type dropper struct { - s *session - file storage.File -} - -func (d dropper) Drop(err error) { - if e, ok := err.(journal.ErrCorrupted); ok { - d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason) - } else { - d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err) - } -} - -func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } -func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } - -// File utils. - -func (s *session) getJournalFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeJournal) -} - -func (s *session) getTableFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeTable) -} - -func (s *session) getFiles(t storage.FileType) ([]storage.File, error) { - return s.stor.GetFiles(t) -} - -func (s *session) newTemp() storage.File { - num := atomic.AddUint64(&s.stTempFileNum, 1) - 1 - return s.stor.GetFile(num, storage.TypeTemp) -} - -// Session state. - -// Get current version. -func (s *session) version() *version { - s.vmu.Lock() - defer s.vmu.Unlock() - s.stVersion.ref++ - return s.stVersion -} - -// Get current version; no barrier. -func (s *session) version_NB() *version { - return s.stVersion -} - -// Set current version to v. -func (s *session) setVersion(v *version) { - s.vmu.Lock() - v.ref = 1 - if old := s.stVersion; old != nil { - v.ref++ - old.next = v - old.release_NB() - } - s.stVersion = v - s.vmu.Unlock() -} - -// Get current unused file number. -func (s *session) fileNum() uint64 { - return atomic.LoadUint64(&s.stFileNum) -} - -// Get current unused file number to num. -func (s *session) setFileNum(num uint64) { - atomic.StoreUint64(&s.stFileNum, num) -} - -// Mark file number as used. -func (s *session) markFileNum(num uint64) { - num += 1 - for { - old, x := s.stFileNum, num - if old > x { - x = old - } - if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) { - break - } - } -} - -// Allocate a file number. -func (s *session) allocFileNum() (num uint64) { - return atomic.AddUint64(&s.stFileNum, 1) - 1 -} - -// Reuse given file number. -func (s *session) reuseFileNum(num uint64) { - for { - old, x := s.stFileNum, num - if old != x+1 { - x = old - } - if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) { - break - } - } -} - -// Manifest related utils. - -// Fill given session record obj with current states; need external -// synchronization. -func (s *session) fillRecord(r *sessionRecord, snapshot bool) { - r.setNextNum(s.fileNum()) - - if snapshot { - if !r.has(recJournalNum) { - r.setJournalNum(s.stJournalNum) - } - - if !r.has(recSeq) { - r.setSeq(s.stSeq) - } - - for level, ik := range s.stCptrs { - if ik != nil { - r.addCompactionPointer(level, ik) - } - } - - r.setComparer(s.icmp.uName()) - } -} - -// Mark if record has been commited, this will update session state; -// need external synchronization. -func (s *session) recordCommited(r *sessionRecord) { - if r.has(recJournalNum) { - s.stJournalNum = r.journalNum - } - - if r.has(recPrevJournalNum) { - s.stPrevJournalNum = r.prevJournalNum - } - - if r.has(recSeq) { - s.stSeq = r.seq - } - - for _, p := range r.compactionPointers { - s.stCptrs[p.level] = iKey(p.ikey) - } -} - -// Create a new manifest file; need external synchronization. -func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { - num := s.allocFileNum() - file := s.stor.GetFile(num, storage.TypeManifest) - writer, err := file.Create() - if err != nil { - return - } - jw := journal.NewWriter(writer) - - if v == nil { - v = s.version_NB() - } - if rec == nil { - rec = new(sessionRecord) - } - s.fillRecord(rec, true) - v.fillRecord(rec) - - defer func() { - if err == nil { - s.recordCommited(rec) - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - if s.manifestFile != nil { - s.manifestFile.Remove() - } - s.manifestFile = file - s.manifestWriter = writer - s.manifest = jw - } else { - writer.Close() - file.Remove() - s.reuseFileNum(num) - } - }() - - w, err := jw.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = jw.Flush() - if err != nil { - return - } - err = s.stor.SetManifest(file) - return -} - -// Flush record to disk. -func (s *session) flushManifest(rec *sessionRecord) (err error) { - s.fillRecord(rec, false) - w, err := s.manifest.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = s.manifest.Flush() - if err != nil { - return - } - err = s.manifestWriter.Sync() - if err != nil { - return - } - s.recordCommited(rec) - return -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go deleted file mode 100644 index 1e997ef..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ /dev/null @@ -1,534 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reservefs. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -var errFileOpen = errors.New("leveldb/storage: file still open") - -type fileLock interface { - release() error -} - -type fileStorageLock struct { - fs *fileStorage -} - -func (lock *fileStorageLock) Release() { - fs := lock.fs - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.slock == lock { - fs.slock = nil - } - return -} - -// fileStorage is a file-system backed storage. -type fileStorage struct { - path string - - mu sync.Mutex - flock fileLock - slock *fileStorageLock - logw *os.File - buf []byte - // Opened file counter; if open < 0 means closed. - open int - day int -} - -// OpenFile returns a new filesytem-backed storage implementation with the given -// path. This also hold a file lock, so any subsequent attempt to open the same -// path will fail. -// -// The storage must be closed after use, by calling Close method. -func OpenFile(path string) (Storage, error) { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - - flock, err := newFileLock(filepath.Join(path, "LOCK")) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - flock.release() - } - }() - - rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old")) - logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, err - } - - fs := &fileStorage{path: path, flock: flock, logw: logw} - runtime.SetFinalizer(fs, (*fileStorage).Close) - return fs, nil -} - -func (fs *fileStorage) Lock() (util.Releaser, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - if fs.slock != nil { - return nil, ErrLocked - } - fs.slock = &fileStorageLock{fs: fs} - return fs.slock, nil -} - -func itoa(buf []byte, i int, wid int) []byte { - var u uint = uint(i) - if u == 0 && wid <= 1 { - return append(buf, '0') - } - - // Assemble decimal in reverse order. - var b [32]byte - bp := len(b) - for ; u > 0 || wid > 0; u /= 10 { - bp-- - wid-- - b[bp] = byte(u%10) + '0' - } - return append(buf, b[bp:]...) -} - -func (fs *fileStorage) printDay(t time.Time) { - if fs.day == t.Day() { - return - } - fs.day = t.Day() - fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) -} - -func (fs *fileStorage) doLog(t time.Time, str string) { - fs.printDay(t) - hour, min, sec := t.Clock() - msec := t.Nanosecond() / 1e3 - // time - fs.buf = itoa(fs.buf[:0], hour, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, min, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, sec, 2) - fs.buf = append(fs.buf, '.') - fs.buf = itoa(fs.buf, msec, 6) - fs.buf = append(fs.buf, ' ') - // write - fs.buf = append(fs.buf, []byte(str)...) - fs.buf = append(fs.buf, '\n') - fs.logw.Write(fs.buf) -} - -func (fs *fileStorage) Log(str string) { - t := time.Now() - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return - } - fs.doLog(t, str) -} - -func (fs *fileStorage) log(str string) { - fs.doLog(time.Now(), str) -} - -func (fs *fileStorage) GetFile(num uint64, t FileType) File { - return &file{fs: fs, num: num, t: t} -} - -func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - f := &file{fs: fs} - for _, fn := range fnn { - if f.parse(fn) && (f.t&t) != 0 { - ff = append(ff, f) - f = &file{fs: fs} - } - } - return -} - -func (fs *fileStorage) GetManifest() (f File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - // Find latest CURRENT file. - var rem []string - var pend bool - var cerr error - for _, fn := range fnn { - if strings.HasPrefix(fn, "CURRENT") { - pend1 := len(fn) > 7 - // Make sure it is valid name for a CURRENT file, otherwise skip it. - if pend1 { - if fn[7] != '.' || len(fn) < 9 { - fs.log(fmt.Sprintf("skipping %s: invalid file name", fn)) - continue - } - if _, e1 := strconv.ParseUint(fn[7:], 10, 0); e1 != nil { - fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1)) - continue - } - } - path := filepath.Join(fs.path, fn) - r, e1 := os.OpenFile(path, os.O_RDONLY, 0) - if e1 != nil { - return nil, e1 - } - b, e1 := ioutil.ReadAll(r) - if e1 != nil { - r.Close() - return nil, e1 - } - f1 := &file{fs: fs} - if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) { - fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn)) - if pend1 { - rem = append(rem, fn) - } - if !pend1 || cerr == nil { - cerr = fmt.Errorf("leveldb/storage: corrupted or incomplete %s file", fn) - } - } else if f != nil && f1.Num() < f.Num() { - fs.log(fmt.Sprintf("skipping %s: obsolete", fn)) - if pend1 { - rem = append(rem, fn) - } - } else { - f = f1 - pend = pend1 - } - if err := r.Close(); err != nil { - fs.log(fmt.Sprintf("close %s: %v", fn, err)) - } - } - } - // Don't remove any files if there is no valid CURRENT file. - if f == nil { - if cerr != nil { - err = cerr - } else { - err = os.ErrNotExist - } - return - } - // Rename pending CURRENT file to an effective CURRENT. - if pend { - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num()) - if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err)) - } - } - // Remove obsolete or incomplete pending CURRENT files. - for _, fn := range rem { - path := filepath.Join(fs.path, fn) - if err := os.Remove(path); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", fn, err)) - } - } - return -} - -func (fs *fileStorage) SetManifest(f File) (err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - f2, ok := f.(*file) - if !ok || f2.t != TypeManifest { - return ErrInvalidFile - } - defer func() { - if err != nil { - fs.log(fmt.Sprintf("CURRENT: %v", err)) - } - }() - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num()) - w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - _, err = fmt.Fprintln(w, f2.name()) - // Close the file first. - if err := w.Close(); err != nil { - fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err)) - } - if err != nil { - return err - } - return rename(path, filepath.Join(fs.path, "CURRENT")) -} - -func (fs *fileStorage) Close() error { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - // Clear the finalizer. - runtime.SetFinalizer(fs, nil) - - if fs.open > 0 { - fs.log(fmt.Sprintf("refuse to close, %d files still open", fs.open)) - return fmt.Errorf("leveldb/storage: cannot close, %d files still open", fs.open) - } - fs.open = -1 - e1 := fs.logw.Close() - err := fs.flock.release() - if err == nil { - err = e1 - } - return err -} - -type fileWrap struct { - *os.File - f *file -} - -func (fw fileWrap) Sync() error { - if err := fw.File.Sync(); err != nil { - return err - } - if fw.f.Type() == TypeManifest { - // Also sync parent directory if file type is manifest. - // See: https://code.google.com/p/leveldb/issues/detail?id=190. - if err := syncDir(fw.f.fs.path); err != nil { - return err - } - } - return nil -} - -func (fw fileWrap) Close() error { - f := fw.f - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if !f.open { - return ErrClosed - } - f.open = false - f.fs.open-- - err := fw.File.Close() - if err != nil { - f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err)) - } - return err -} - -type file struct { - fs *fileStorage - num uint64 - t FileType - open bool -} - -func (f *file) Open() (Reader, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_RDONLY, 0) - if err != nil { - if f.hasOldName() && os.IsNotExist(err) { - of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0) - if err == nil { - goto ok - } - } - return nil, err - } -ok: - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Create() (Writer, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil, err - } - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Replace(newfile File) error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - newfile2, ok := newfile.(*file) - if !ok { - return ErrInvalidFile - } - if f.open || newfile2.open { - return errFileOpen - } - return rename(newfile2.path(), f.path()) -} - -func (f *file) Type() FileType { - return f.t -} - -func (f *file) Num() uint64 { - return f.num -} - -func (f *file) Remove() error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - if f.open { - return errFileOpen - } - err := os.Remove(f.path()) - if err != nil { - f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err)) - } - // Also try remove file with old name, just in case. - if f.hasOldName() { - if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) { - f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err)) - err = e1 - } - } - return err -} - -func (f *file) hasOldName() bool { - return f.t == TypeTable -} - -func (f *file) oldName() string { - switch f.t { - case TypeTable: - return fmt.Sprintf("%06d.sst", f.num) - } - return f.name() -} - -func (f *file) oldPath() string { - return filepath.Join(f.fs.path, f.oldName()) -} - -func (f *file) name() string { - switch f.t { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", f.num) - case TypeJournal: - return fmt.Sprintf("%06d.log", f.num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", f.num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", f.num) - default: - panic("invalid file type") - } -} - -func (f *file) path() string { - return filepath.Join(f.fs.path, f.name()) -} - -func (f *file) parse(name string) bool { - var num uint64 - var tail string - _, err := fmt.Sscanf(name, "%d.%s", &num, &tail) - if err == nil { - switch tail { - case "log": - f.t = TypeJournal - case "ldb", "sst": - f.t = TypeTable - case "tmp": - f.t = TypeTemp - default: - return false - } - f.num = num - return true - } - n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &num, &tail) - if n == 1 { - f.t = TypeManifest - f.num = num - return true - } - - return false -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go deleted file mode 100644 index 42940d7..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "os" - "path/filepath" -) - -type plan9FileLock struct { - f *os.File -} - -func (fl *plan9FileLock) release() error { - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644) - if err != nil { - return - } - fl = &plan9FileLock{f: f} - return -} - -func rename(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - if err := os.Remove(newpath); err != nil { - return err - } - } - - _, fname := filepath.Split(newpath) - return os.Rename(oldpath, fname) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go deleted file mode 100644 index 102031b..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build solaris - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return - } - err = setFileLock(f, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, lock bool) error { - flock := syscall.Flock_t{ - Type: syscall.F_UNLCK, - Start: 0, - Len: 0, - Whence: 1, - } - if lock { - flock.Type = syscall.F_WRLCK - } - return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go deleted file mode 100644 index 92abcbb..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "fmt" - "os" - "path/filepath" - "testing" -) - -var cases = []struct { - oldName []string - name string - ftype FileType - num uint64 -}{ - {nil, "000100.log", TypeJournal, 100}, - {nil, "000000.log", TypeJournal, 0}, - {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, - {nil, "MANIFEST-000002", TypeManifest, 2}, - {nil, "MANIFEST-000007", TypeManifest, 7}, - {nil, "18446744073709551615.log", TypeJournal, 18446744073709551615}, - {nil, "000100.tmp", TypeTemp, 100}, -} - -var invalidCases = []string{ - "", - "foo", - "foo-dx-100.log", - ".log", - "", - "manifest", - "CURREN", - "CURRENTX", - "MANIFES", - "MANIFEST", - "MANIFEST-", - "XMANIFEST-3", - "MANIFEST-3x", - "LOC", - "LOCKx", - "LO", - "LOGx", - "18446744073709551616.log", - "184467440737095516150.log", - "100", - "100.", - "100.lop", -} - -func TestFileStorage_CreateFileName(t *testing.T) { - for _, c := range cases { - f := &file{num: c.num, t: c.ftype} - if f.name() != c.name { - t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name) - } - } -} - -func TestFileStorage_ParseFileName(t *testing.T) { - for _, c := range cases { - for _, name := range append([]string{c.name}, c.oldName...) { - f := new(file) - if !f.parse(name) { - t.Errorf("cannot parse filename '%s'", name) - continue - } - if f.Type() != c.ftype { - t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype) - } - if f.Num() != c.num { - t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num) - } - } - } -} - -func TestFileStorage_InvalidFileName(t *testing.T) { - for _, name := range invalidCases { - f := new(file) - if f.parse(name) { - t.Errorf("filename '%s' should be invalid", name) - } - } -} - -func TestFileStorage_Locking(t *testing.T) { - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid())) - - _, err := os.Stat(path) - if err == nil { - err = os.RemoveAll(path) - if err != nil { - t.Fatal("RemoveAll: got error: ", err) - } - } - - p1, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(1): got error: ", err) - } - - defer os.RemoveAll(path) - - p2, err := OpenFile(path) - if err != nil { - t.Logf("OpenFile(2): got error: %s (expected)", err) - } else { - p2.Close() - p1.Close() - t.Fatal("OpenFile(2): expect error") - } - - p1.Close() - - p3, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(3): got error: ", err) - } - defer p3.Close() - - l, err := p3.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = p3.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = p3.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go deleted file mode 100644 index d0a604b..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return - } - err = setFileLock(f, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, lock bool) error { - how := syscall.LOCK_UN - if lock { - how = syscall.LOCK_EX - } - return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go deleted file mode 100644 index 50c3c45..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procMoveFileExW = modkernel32.NewProc("MoveFileExW") -) - -const ( - _MOVEFILE_REPLACE_EXISTING = 1 -) - -type windowsFileLock struct { - fd syscall.Handle -} - -func (fl *windowsFileLock) release() error { - return syscall.Close(fl.fd) -} - -func newFileLock(path string) (fl fileLock, err error) { - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return - } - fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) - if err != nil { - return - } - fl = &windowsFileLock{fd: fd} - return -} - -func moveFileEx(from *uint16, to *uint16, flags uint32) error { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - return error(e1) - } else { - return syscall.EINVAL - } - } - return nil -} - -func rename(oldpath, newpath string) error { - from, err := syscall.UTF16PtrFromString(oldpath) - if err != nil { - return err - } - to, err := syscall.UTF16PtrFromString(newpath) - if err != nil { - return err - } - return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) -} - -func syncDir(name string) error { return nil } diff --git a/kit/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/kit/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go deleted file mode 100644 index 863d1c5..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "os" - "sync" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 3 - -type memStorageLock struct { - ms *memStorage -} - -func (lock *memStorageLock) Release() { - ms := lock.ms - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock == lock { - ms.slock = nil - } - return -} - -// memStorage is a memory-backed storage. -type memStorage struct { - mu sync.Mutex - slock *memStorageLock - files map[uint64]*memFile - manifest *memFilePtr -} - -// NewMemStorage returns a new memory-backed storage implementation. -func NewMemStorage() Storage { - return &memStorage{ - files: make(map[uint64]*memFile), - } -} - -func (ms *memStorage) Lock() (util.Releaser, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock != nil { - return nil, ErrLocked - } - ms.slock = &memStorageLock{ms: ms} - return ms.slock, nil -} - -func (*memStorage) Log(str string) {} - -func (ms *memStorage) GetFile(num uint64, t FileType) File { - return &memFilePtr{ms: ms, num: num, t: t} -} - -func (ms *memStorage) GetFiles(t FileType) ([]File, error) { - ms.mu.Lock() - var ff []File - for x, _ := range ms.files { - num, mt := x>>typeShift, FileType(x)&TypeAll - if mt&t == 0 { - continue - } - ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt}) - } - ms.mu.Unlock() - return ff, nil -} - -func (ms *memStorage) GetManifest() (File, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.manifest == nil { - return nil, os.ErrNotExist - } - return ms.manifest, nil -} - -func (ms *memStorage) SetManifest(f File) error { - fm, ok := f.(*memFilePtr) - if !ok || fm.t != TypeManifest { - return ErrInvalidFile - } - ms.mu.Lock() - ms.manifest = fm - ms.mu.Unlock() - return nil -} - -func (*memStorage) Close() error { return nil } - -type memReader struct { - *bytes.Reader - m *memFile -} - -func (mr *memReader) Close() error { - return mr.m.Close() -} - -type memFile struct { - bytes.Buffer - ms *memStorage - open bool -} - -func (*memFile) Sync() error { return nil } -func (m *memFile) Close() error { - m.ms.mu.Lock() - m.open = false - m.ms.mu.Unlock() - return nil -} - -type memFilePtr struct { - ms *memStorage - num uint64 - t FileType -} - -func (p *memFilePtr) x() uint64 { - return p.Num()< -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "testing" -) - -func TestMemStorage(t *testing.T) { - m := NewMemStorage() - - l, err := m.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = m.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = m.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } - - f := m.GetFile(1, TypeTable) - if f.Num() != 1 && f.Type() != TypeTable { - t.Fatal("invalid file number and type") - } - w, _ := f.Create() - w.Write([]byte("abc")) - w.Close() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 { - t.Fatal("invalid GetFiles len") - } - buf := new(bytes.Buffer) - r, err := f.Open() - if err != nil { - t.Fatal("Open: got error: ", err) - } - buf.ReadFrom(r) - r.Close() - if got := buf.String(); got != "abc" { - t.Fatalf("Read: invalid value, want=abc got=%s", got) - } - if _, err := f.Open(); err != nil { - t.Fatal("Open: got error: ", err) - } - if _, err := m.GetFile(1, TypeTable).Open(); err == nil { - t.Fatal("expecting error") - } - f.Remove() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 { - t.Fatal("invalid GetFiles len", len(ff)) - } - if _, err := f.Open(); err == nil { - t.Fatal("expecting error") - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/kit/github.com/syndtr/goleveldb/leveldb/storage/storage.go deleted file mode 100644 index a57baa1..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package storage provides storage abstraction for LevelDB. -package storage - -import ( - "errors" - "fmt" - "io" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -type FileType uint32 - -const ( - TypeManifest FileType = 1 << iota - TypeJournal - TypeTable - TypeTemp - - TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp -) - -func (t FileType) String() string { - switch t { - case TypeManifest: - return "manifest" - case TypeJournal: - return "journal" - case TypeTable: - return "table" - case TypeTemp: - return "temp" - } - return fmt.Sprintf("", t) -} - -var ( - ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") - ErrLocked = errors.New("leveldb/storage: already locked") - ErrClosed = errors.New("leveldb/storage: closed") -) - -// Syncer is the interface that wraps basic Sync method. -type Syncer interface { - // Sync commits the current contents of the file to stable storage. - Sync() error -} - -// Reader is the interface that groups the basic Read, Seek, ReadAt and Close -// methods. -type Reader interface { - io.ReadSeeker - io.ReaderAt - io.Closer -} - -// Writer is the interface that groups the basic Write, Sync and Close -// methods. -type Writer interface { - io.WriteCloser - Syncer -} - -// File is the file. A file instance must be goroutine-safe. -type File interface { - // Open opens the file for read. Returns os.ErrNotExist error - // if the file does not exist. - // Returns ErrClosed if the underlying storage is closed. - Open() (r Reader, err error) - - // Create creates the file for writting. Truncate the file if - // already exist. - // Returns ErrClosed if the underlying storage is closed. - Create() (w Writer, err error) - - // Replace replaces file with newfile. - // Returns ErrClosed if the underlying storage is closed. - Replace(newfile File) error - - // Type returns the file type - Type() FileType - - // Num returns the file number. - Num() uint64 - - // Remove removes the file. - // Returns ErrClosed if the underlying storage is closed. - Remove() error -} - -// Storage is the storage. A storage instance must be goroutine-safe. -type Storage interface { - // Lock locks the storage. Any subsequent attempt to call Lock will fail - // until the last lock released. - // After use the caller should call the Release method. - Lock() (l util.Releaser, err error) - - // Log logs a string. This is used for logging. An implementation - // may write to a file, stdout or simply do nothing. - Log(str string) - - // GetFile returns a file for the given number and type. GetFile will never - // returns nil, even if the underlying storage is closed. - GetFile(num uint64, t FileType) File - - // GetFiles returns a slice of files that match the given file types. - // The file types may be OR'ed together. - GetFiles(t FileType) ([]File, error) - - // GetManifest returns a manifest file. Returns os.ErrNotExist if manifest - // file does not exist. - GetManifest() (File, error) - - // SetManifest sets the given file as manifest file. The given file should - // be a manifest file type or error will be returned. - SetManifest(f File) error - - // Close closes the storage. It is valid to call Close multiple times. - // Other methods should not be called after the storage has been closed. - Close() error -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/storage_test.go b/kit/github.com/syndtr/goleveldb/leveldb/storage_test.go deleted file mode 100644 index 1c5c426..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/storage_test.go +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENE file. - -package leveldb - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 4 - -var ( - tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument") - tsErrFileOpen = errors.New("leveldb.testStorage: file still open") -) - -var ( - tsFSEnv = os.Getenv("GOLEVELDB_USEFS") - tsKeepFS = tsFSEnv == "2" - tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1" - tsMU = &sync.Mutex{} - tsNum = 0 -) - -type tsLock struct { - ts *testStorage - r util.Releaser -} - -func (l tsLock) Release() { - l.r.Release() - l.ts.t.Log("I: storage lock released") -} - -type tsReader struct { - tf tsFile - storage.Reader -} - -func (tr tsReader) Read(b []byte) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - n, err = tr.Reader.Read(b) - if err != nil && err != io.EOF { - ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err) - } - return -} - -func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - n, err = tr.Reader.ReadAt(b, off) - if err != nil && err != io.EOF { - ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err) - } - return -} - -func (tr tsReader) Close() (err error) { - err = tr.Reader.Close() - tr.tf.close("reader", err) - return -} - -type tsWriter struct { - tf tsFile - storage.Writer -} - -func (tw tsWriter) Write(b []byte) (n int, err error) { - ts := tw.tf.ts - ts.mu.Lock() - defer ts.mu.Unlock() - if ts.emuWriteErr&tw.tf.Type() != 0 { - return 0, errors.New("leveldb.testStorage: emulated write error") - } - n, err = tw.Writer.Write(b) - if err != nil { - ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err) - } - return -} - -func (tw tsWriter) Sync() (err error) { - ts := tw.tf.ts - ts.mu.Lock() - defer ts.mu.Unlock() - for ts.emuDelaySync&tw.tf.Type() != 0 { - ts.cond.Wait() - } - if ts.emuSyncErr&tw.tf.Type() != 0 { - return errors.New("leveldb.testStorage: emulated sync error") - } - err = tw.Writer.Sync() - if err != nil { - ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err) - } - return -} - -func (tw tsWriter) Close() (err error) { - err = tw.Writer.Close() - tw.tf.close("reader", err) - return -} - -type tsFile struct { - ts *testStorage - storage.File -} - -func (tf tsFile) x() uint64 { - return tf.Num()<>typeShift, storage.FileType(x)&storage.TypeAll - ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer) - } - } - ts.mu.Unlock() -} - -func newTestStorage(t *testing.T) *testStorage { - var stor storage.Storage - var closeFn func() error - if tsFS { - for { - tsMU.Lock() - num := tsNum - tsNum++ - tsMU.Unlock() - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); err != nil { - stor, err = storage.OpenFile(path) - if err != nil { - t.Fatalf("F: cannot create storage: %v", err) - } - t.Logf("I: storage created: %s", path) - closeFn = func() error { - for _, name := range []string{"LOG.old", "LOG"} { - f, err := os.Open(filepath.Join(path, name)) - if err != nil { - continue - } - if log, err := ioutil.ReadAll(f); err != nil { - t.Logf("---------------------- %s ----------------------", name) - t.Logf("cannot read log: %v", err) - t.Logf("---------------------- %s ----------------------", name) - } else if len(log) > 0 { - t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) - t.Logf("---------------------- %s ----------------------", name) - } - f.Close() - } - if tsKeepFS { - return nil - } - return os.RemoveAll(path) - } - - break - } - } - } else { - stor = storage.NewMemStorage() - } - ts := &testStorage{ - t: t, - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - } - ts.cond.L = &ts.mu - return ts -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/table.go b/kit/github.com/syndtr/goleveldb/leveldb/table.go deleted file mode 100644 index ee340eb..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/table.go +++ /dev/null @@ -1,476 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sort" - "sync/atomic" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/table" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -// tFile holds basic information about a table. -type tFile struct { - file storage.File - seekLeft int32 - size uint64 - imin, imax iKey -} - -// Returns true if given key is after largest key of this table. -func (t *tFile) after(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 -} - -// Returns true if given key is before smallest key of this table. -func (t *tFile) before(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 -} - -// Returns true if given key range overlaps with this table key range. -func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { - return !t.after(icmp, umin) && !t.before(icmp, umax) -} - -// Cosumes one seek and return current seeks left. -func (t *tFile) consumeSeek() int32 { - return atomic.AddInt32(&t.seekLeft, -1) -} - -// Creates new tFile. -func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile { - f := &tFile{ - file: file, - size: size, - imin: imin, - imax: imax, - } - - // We arrange to automatically compact this file after - // a certain number of seeks. Let's assume: - // (1) One seek costs 10ms - // (2) Writing or reading 1MB costs 10ms (100MB/s) - // (3) A compaction of 1MB does 25MB of IO: - // 1MB read from this level - // 10-12MB read from next level (boundaries may be misaligned) - // 10-12MB written to next level - // This implies that 25 seeks cost the same as the compaction - // of 1MB of data. I.e., one seek costs approximately the - // same as the compaction of 40KB of data. We are a little - // conservative and allow approximately one seek for every 16KB - // of data before triggering a compaction. - f.seekLeft = int32(size / 16384) - if f.seekLeft < 100 { - f.seekLeft = 100 - } - - return f -} - -// tFiles hold multiple tFile. -type tFiles []*tFile - -func (tf tFiles) Len() int { return len(tf) } -func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } - -// Returns true if i smallest key is less than j. -// This used for sort by key in ascending order. -func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { - a, b := tf[i], tf[j] - n := icmp.Compare(a.imin, b.imin) - if n == 0 { - return a.file.Num() < b.file.Num() - } - return n < 0 -} - -// Returns true if i file number is greater than j. -// This used for sort by file number in descending order. -func (tf tFiles) lessByNum(i, j int) bool { - return tf[i].file.Num() > tf[j].file.Num() -} - -// Sorts tables by key in ascending order. -func (tf tFiles) sortByKey(icmp *iComparer) { - sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) -} - -// Sorts tables by file number in descending order. -func (tf tFiles) sortByNum() { - sort.Sort(&tFilesSortByNum{tFiles: tf}) -} - -// Returns sum of all tables size. -func (tf tFiles) size() (sum uint64) { - for _, t := range tf { - sum += t.size - } - return sum -} - -// Searches smallest index of tables whose its smallest -// key is after or equal with given key. -func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imin, ikey) >= 0 - }) -} - -// Searches smallest index of tables whose its largest -// key is after or equal with given key. -func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imax, ikey) >= 0 - }) -} - -// Returns true if given key range overlaps with one or more -// tables key range. If unsorted is true then binary search will not be used. -func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { - if unsorted { - // Check against all files. - for _, t := range tf { - if t.overlaps(icmp, umin, umax) { - return true - } - } - return false - } - - i := 0 - if len(umin) > 0 { - // Find the earliest possible internal key for min. - i = tf.searchMax(icmp, newIKey(umin, kMaxSeq, tSeek)) - } - if i >= len(tf) { - // Beginning of range is after all files, so no overlap. - return false - } - return !tf[i].before(icmp, umax) -} - -// Returns tables whose its key range overlaps with given key range. -// If overlapped is true then the search will be expanded to tables that -// overlaps with each other. -func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { - x := len(dst) - for i := 0; i < len(tf); { - t := tf[i] - if t.overlaps(icmp, umin, umax) { - if overlapped { - // For overlapped files, check if the newly added file has - // expanded the range. If so, restart search. - if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { - umin = t.imin.ukey() - dst = dst[:x] - i = 0 - continue - } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { - umax = t.imax.ukey() - dst = dst[:x] - i = 0 - continue - } - } - - dst = append(dst, t) - } - i++ - } - - return dst -} - -// Returns tables key range. -func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) { - for i, t := range tf { - if i == 0 { - imin, imax = t.imin, t.imax - continue - } - if icmp.Compare(t.imin, imin) < 0 { - imin = t.imin - } - if icmp.Compare(t.imax, imax) > 0 { - imax = t.imax - } - } - - return -} - -// Creates iterator index from tables. -func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { - if slice != nil { - var start, limit int - if slice.Start != nil { - start = tf.searchMax(icmp, iKey(slice.Start)) - } - if slice.Limit != nil { - limit = tf.searchMin(icmp, iKey(slice.Limit)) - } else { - limit = tf.Len() - } - tf = tf[start:limit] - } - return iterator.NewArrayIndexer(&tFilesArrayIndexer{ - tFiles: tf, - tops: tops, - icmp: icmp, - slice: slice, - ro: ro, - }) -} - -// Tables iterator index. -type tFilesArrayIndexer struct { - tFiles - tops *tOps - icmp *iComparer - slice *util.Range - ro *opt.ReadOptions -} - -func (a *tFilesArrayIndexer) Search(key []byte) int { - return a.searchMax(a.icmp, iKey(key)) -} - -func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { - if i == 0 || i == a.Len()-1 { - return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) - } - return a.tops.newIterator(a.tFiles[i], nil, a.ro) -} - -// Helper type for sortByKey. -type tFilesSortByKey struct { - tFiles - icmp *iComparer -} - -func (x *tFilesSortByKey) Less(i, j int) bool { - return x.lessByKey(x.icmp, i, j) -} - -// Helper type for sortByNum. -type tFilesSortByNum struct { - tFiles -} - -func (x *tFilesSortByNum) Less(i, j int) bool { - return x.lessByNum(i, j) -} - -// Table operations. -type tOps struct { - s *session - cache cache.Cache - cacheNS cache.Namespace - bpool *util.BufferPool -} - -// Creates an empty table and returns table writer. -func (t *tOps) create() (*tWriter, error) { - file := t.s.getTableFile(t.s.allocFileNum()) - fw, err := file.Create() - if err != nil { - return nil, err - } - return &tWriter{ - t: t, - file: file, - w: fw, - tw: table.NewWriter(fw, t.s.o), - }, nil -} - -// Builds table from src iterator. -func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { - w, err := t.create() - if err != nil { - return f, n, err - } - - defer func() { - if err != nil { - w.drop() - } - }() - - for src.Next() { - err = w.append(src.Key(), src.Value()) - if err != nil { - return - } - } - err = src.Error() - if err != nil { - return - } - - n = w.tw.EntriesLen() - f, err = w.finish() - return -} - -// Opens table. It returns a cache object, which should -// be released after use. -func (t *tOps) open(f *tFile) (c cache.Object, err error) { - num := f.file.Num() - c, ok := t.cacheNS.Get(num, func() (ok bool, value interface{}, charge int, fin cache.SetFin) { - var r storage.Reader - r, err = f.file.Open() - if err != nil { - return - } - - o := t.s.o - - var cacheNS cache.Namespace - if bc := o.GetBlockCache(); bc != nil { - cacheNS = bc.GetNamespace(num) - } - - ok = true - value = table.NewReader(r, int64(f.size), cacheNS, t.bpool, o) - charge = 1 - fin = func() { - r.Close() - } - return - }) - if !ok && err == nil { - err = ErrClosed - } - return -} - -// Finds key/value pair whose key is greater than or equal to the -// given key. -func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { - c, err := t.open(f) - if err != nil { - return nil, nil, err - } - defer c.Release() - return c.Value().(*table.Reader).Find(key, ro) -} - -// Returns approximate offset of the given key. -func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) { - c, err := t.open(f) - if err != nil { - return - } - _offset, err := c.Value().(*table.Reader).OffsetOf(key) - offset = uint64(_offset) - c.Release() - return -} - -// Creates an iterator from the given table. -func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - c, err := t.open(f) - if err != nil { - return iterator.NewEmptyIterator(err) - } - iter := c.Value().(*table.Reader).NewIterator(slice, ro) - iter.SetReleaser(c) - return iter -} - -// Removes table from persistent storage. It waits until -// no one use the the table. -func (t *tOps) remove(f *tFile) { - num := f.file.Num() - t.cacheNS.Delete(num, func(exist bool) { - if err := f.file.Remove(); err != nil { - t.s.logf("table@remove removing @%d %q", num, err) - } else { - t.s.logf("table@remove removed @%d", num) - } - if bc := t.s.o.GetBlockCache(); bc != nil { - bc.GetNamespace(num).Zap(false) - } - }) -} - -// Closes the table ops instance. It will close all tables, -// regadless still used or not. -func (t *tOps) close() { - t.cache.Zap(true) -} - -// Creates new initialized table ops instance. -func newTableOps(s *session, cacheCap int) *tOps { - c := cache.NewLRUCache(cacheCap) - return &tOps{ - s: s, - cache: c, - cacheNS: c.GetNamespace(0), - bpool: util.NewBufferPool(s.o.GetBlockSize() + 5), - } -} - -// tWriter wraps the table writer. It keep track of file descriptor -// and added key range. -type tWriter struct { - t *tOps - - file storage.File - w storage.Writer - tw *table.Writer - - first, last []byte -} - -// Append key/value pair to the table. -func (w *tWriter) append(key, value []byte) error { - if w.first == nil { - w.first = append([]byte{}, key...) - } - w.last = append(w.last[:0], key...) - return w.tw.Append(key, value) -} - -// Returns true if the table is empty. -func (w *tWriter) empty() bool { - return w.first == nil -} - -// Finalizes the table and returns table file. -func (w *tWriter) finish() (f *tFile, err error) { - err = w.tw.Close() - if err != nil { - return - } - err = w.w.Sync() - if err != nil { - w.w.Close() - return - } - w.w.Close() - f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last)) - return -} - -// Drops the table. -func (w *tWriter) drop() { - w.w.Close() - w.file.Remove() - w.t.s.reuseFileNum(w.file.Num()) - w.w = nil - w.file = nil - w.tw = nil - w.first = nil - w.last = nil -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/kit/github.com/syndtr/goleveldb/leveldb/table/block_test.go deleted file mode 100644 index 2b15106..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/table/block_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -func (b *block) TestNewIterator(slice *util.Range) iterator.Iterator { - return b.newIterator(slice, false, nil) -} - -var _ = testutil.Defer(func() { - Describe("Block", func() { - Build := func(kv *testutil.KeyValue, restartInterval int) *block { - // Building the block. - bw := &blockWriter{ - restartInterval: restartInterval, - scratch: make([]byte, 30), - } - kv.Iterate(func(i int, key, value []byte) { - bw.append(key, value) - }) - bw.finish() - - // Opening the block. - data := bw.buf.Bytes() - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - return &block{ - cmp: comparer.DefaultComparer, - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - } - } - - Describe("read test", func() { - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - kv := &testutil.KeyValue{} - Text := func() string { - return fmt.Sprintf("and %d keys", kv.Len()) - } - - Test := func() { - // Make block. - br := Build(kv, restartInterval) - // Do testing. - testutil.KeyValueTesting(nil, br, kv.Clone()) - } - - Describe(Text(), Test) - - kv.PutString("", "empty") - Describe(Text(), Test) - - kv.PutString("a1", "foo") - Describe(Text(), Test) - - kv.PutString("a2", "v") - Describe(Text(), Test) - - kv.PutString("a3qqwrkks", "hello") - Describe(Text(), Test) - - kv.PutString("a4", "bar") - Describe(Text(), Test) - - kv.PutString("a5111111", "v5") - kv.PutString("a6", "") - kv.PutString("a7", "v7") - kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8") - kv.PutString("b", "v9") - kv.PutString("c9", "v9") - kv.PutString("c91", "v9") - kv.PutString("d0", "v9") - Describe(Text(), Test) - }) - } - }) - - Describe("out-of-bound slice test", func() { - kv := &testutil.KeyValue{} - kv.PutString("k1", "v1") - kv.PutString("k2", "v2") - kv.PutString("k3abcdefgg", "v3") - kv.PutString("k4", "v4") - kv.PutString("k5", "v5") - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - // Make block. - br := Build(kv, restartInterval) - - Test := func(r *util.Range) func(done Done) { - return func(done Done) { - iter := br.newIterator(r, false, nil) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: iter, - } - - testutil.DoIteratorTesting(&t) - done <- true - } - } - - It("Should do iterations and seeks correctly #0", - Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0) - - It("Should do iterations and seeks correctly #1", - Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0) - }) - } - }) - }) -}) diff --git a/kit/github.com/syndtr/goleveldb/leveldb/table/reader.go b/kit/github.com/syndtr/goleveldb/leveldb/table/reader.go deleted file mode 100644 index 5b383a2..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/table/reader.go +++ /dev/null @@ -1,882 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "sort" - "strings" - - "github.com/gocircuit/escher/kit/code.google.com/p/snappy-go/snappy" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = util.ErrNotFound - ErrIterReleased = errors.New("leveldb/table: iterator released") -) - -func max(x, y int) int { - if x > y { - return x - } - return y -} - -type block struct { - cmp comparer.BasicComparer - data []byte - restartsLen int - restartsOffset int - // Whether checksum is verified and valid. - checksum bool -} - -func (b *block) seek(rstart, rlimit int, key []byte) (index, offset int, err error) { - n := b.restartsOffset - data := b.data - cmp := b.cmp - - index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - offset := int(binary.LittleEndian.Uint32(data[n+4*(rstart+i):])) - offset += 1 // shared always zero, since this is a restart point - v1, n1 := binary.Uvarint(data[offset:]) // key length - _, n2 := binary.Uvarint(data[offset+n1:]) // value length - m := offset + n1 + n2 - return cmp.Compare(data[m:m+int(v1)], key) > 0 - }) + rstart - 1 - if index < rstart { - // The smallest key is greater-than key sought. - index = rstart - } - offset = int(binary.LittleEndian.Uint32(data[n+4*index:])) - return -} - -func (b *block) restartIndex(rstart, rlimit, offset int) int { - n := b.restartsOffset - data := b.data - return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - return int(binary.LittleEndian.Uint32(data[n+4*(rstart+i):])) > offset - }) + rstart - 1 -} - -func (b *block) restartOffset(index int) int { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) -} - -func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { - if offset >= b.restartsOffset { - if offset != b.restartsOffset { - err = errors.New("leveldb/table: Reader: BlockEntry: invalid block (block entries offset not aligned)") - } - return - } - v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length - v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length - v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length - m := n0 + n1 + n2 - n = m + int(v1) + int(v2) - if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { - err = errors.New("leveldb/table: Reader: invalid block (block entries corrupted)") - return - } - key = b.data[offset+m : offset+m+int(v1)] - value = b.data[offset+m+int(v1) : offset+n] - nShared = int(v0) - return -} - -func (b *block) newIterator(slice *util.Range, inclLimit bool, cache util.Releaser) *blockIter { - bi := &blockIter{ - block: b, - cache: cache, - // Valid key should never be nil. - key: make([]byte, 0), - dir: dirSOI, - riStart: 0, - riLimit: b.restartsLen, - offsetStart: 0, - offsetRealStart: 0, - offsetLimit: b.restartsOffset, - } - if slice != nil { - if slice.Start != nil { - if bi.Seek(slice.Start) { - bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) - bi.offsetStart = b.restartOffset(bi.riStart) - bi.offsetRealStart = bi.prevOffset - } else { - bi.riStart = b.restartsLen - bi.offsetStart = b.restartsOffset - bi.offsetRealStart = b.restartsOffset - } - } - if slice.Limit != nil { - if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { - bi.offsetLimit = bi.prevOffset - bi.riLimit = bi.restartIndex + 1 - } - } - bi.reset() - if bi.offsetStart > bi.offsetLimit { - bi.sErr(errors.New("leveldb/table: Reader: invalid slice range")) - } - } - return bi -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type blockIter struct { - block *block - cache, releaser util.Releaser - key, value []byte - offset int - // Previous offset, only filled by Next. - prevOffset int - prevNode []int - prevKeys []byte - restartIndex int - // Iterator direction. - dir dir - // Restart index slice range. - riStart int - riLimit int - // Offset slice range. - offsetStart int - offsetRealStart int - offsetLimit int - // Error. - err error -} - -func (i *blockIter) sErr(err error) { - i.err = err - i.key = nil - i.value = nil - i.prevNode = nil - i.prevKeys = nil -} - -func (i *blockIter) reset() { - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.restartIndex = i.riStart - i.offset = i.offsetStart - i.dir = dirSOI - i.key = i.key[:0] - i.value = nil -} - -func (i *blockIter) isFirst() bool { - switch i.dir { - case dirForward: - return i.prevOffset == i.offsetRealStart - case dirBackward: - return len(i.prevNode) == 1 && i.restartIndex == i.riStart - } - return false -} - -func (i *blockIter) isLast() bool { - switch i.dir { - case dirForward, dirBackward: - return i.offset == i.offsetLimit - } - return false -} - -func (i *blockIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirSOI - return i.Next() -} - -func (i *blockIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirEOI - return i.Prev() -} - -func (i *blockIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ri, offset, err := i.block.seek(i.riStart, i.riLimit, key) - if err != nil { - i.sErr(err) - return false - } - i.restartIndex = ri - i.offset = max(i.offsetStart, offset) - if i.dir == dirSOI || i.dir == dirEOI { - i.dir = dirForward - } - for i.Next() { - if i.block.cmp.Compare(i.key, key) >= 0 { - return true - } - } - return false -} - -func (i *blockIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirSOI { - i.restartIndex = i.riStart - i.offset = i.offsetStart - } else if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - for i.offset < i.offsetRealStart { - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(err) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.offset += n - } - if i.offset >= i.offsetLimit { - i.dir = dirEOI - if i.offset != i.offsetLimit { - i.sErr(errors.New("leveldb/table: Reader: Next: invalid block (block entries offset not aligned)")) - } - return false - } - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(err) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.prevOffset = i.offset - i.offset += n - i.dir = dirForward - return true -} - -func (i *blockIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - var ri int - if i.dir == dirForward { - // Change direction. - i.offset = i.prevOffset - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) - i.dir = dirBackward - } else if i.dir == dirEOI { - // At the end of iterator. - i.restartIndex = i.riLimit - i.offset = i.offsetLimit - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.riLimit - 1 - i.dir = dirBackward - } else if len(i.prevNode) == 1 { - // This is the end of a restart range. - i.offset = i.prevNode[0] - i.prevNode = i.prevNode[:0] - if i.restartIndex == i.riStart { - i.dir = dirSOI - return false - } - i.restartIndex-- - ri = i.restartIndex - } else { - // In the middle of restart range, get from cache. - n := len(i.prevNode) - 3 - node := i.prevNode[n:] - i.prevNode = i.prevNode[:n] - // Get the key. - ko := node[0] - i.key = append(i.key[:0], i.prevKeys[ko:]...) - i.prevKeys = i.prevKeys[:ko] - // Get the value. - vo := node[1] - vl := vo + node[2] - i.value = i.block.data[vo:vl] - i.offset = vl - return true - } - // Build entries cache. - i.key = i.key[:0] - i.value = nil - offset := i.block.restartOffset(ri) - if offset == i.offset { - ri -= 1 - if ri < 0 { - i.dir = dirSOI - return false - } - offset = i.block.restartOffset(ri) - } - i.prevNode = append(i.prevNode, offset) - for { - key, value, nShared, n, err := i.block.entry(offset) - if err != nil { - i.sErr(err) - return false - } - if offset >= i.offsetRealStart { - if i.value != nil { - // Appends 3 variables: - // 1. Previous keys offset - // 2. Value offset in the data block - // 3. Value length - i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) - i.prevKeys = append(i.prevKeys, i.key...) - } - i.value = value - } - i.key = append(i.key[:nShared], key...) - offset += n - // Stop if target offset reached. - if offset >= i.offset { - if offset != i.offset { - i.sErr(errors.New("leveldb/table: Reader: Prev: invalid block (block entries offset not aligned)")) - return false - } - - break - } - } - i.restartIndex = ri - i.offset = offset - return true -} - -func (i *blockIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *blockIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *blockIter) Release() { - if i.dir > dirReleased { - i.block = nil - i.prevNode = nil - i.prevKeys = nil - i.key = nil - i.value = nil - i.dir = dirReleased - if i.cache != nil { - i.cache.Release() - i.cache = nil - } - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *blockIter) SetReleaser(releaser util.Releaser) { - if i.dir > dirReleased { - i.releaser = releaser - } -} - -func (i *blockIter) Valid() bool { - return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) -} - -func (i *blockIter) Error() error { - return i.err -} - -type filterBlock struct { - filter filter.Filter - data []byte - oOffset int - baseLg uint - filtersNum int -} - -func (b *filterBlock) contains(offset uint64, key []byte) bool { - i := int(offset >> b.baseLg) - if i < b.filtersNum { - o := b.data[b.oOffset+i*4:] - n := int(binary.LittleEndian.Uint32(o)) - m := int(binary.LittleEndian.Uint32(o[4:])) - if n < m && m <= b.oOffset { - return b.filter.Contains(b.data[n:m], key) - } else if n == m { - return false - } - } - return true -} - -type indexIter struct { - blockIter - tableReader *Reader - slice *util.Range - // Options - checksum bool - fillCache bool -} - -func (i *indexIter) Get() iterator.Iterator { - value := i.Value() - if value == nil { - return nil - } - dataBH, n := decodeBlockHandle(value) - if n == 0 { - return iterator.NewEmptyIterator(errors.New("leveldb/table: Reader: invalid table (bad data block handle)")) - } - var slice *util.Range - if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { - slice = i.slice - } - return i.tableReader.getDataIter(dataBH, slice, i.checksum, i.fillCache) -} - -// Reader is a table reader. -type Reader struct { - reader io.ReaderAt - cache cache.Namespace - err error - bpool *util.BufferPool - // Options - cmp comparer.Comparer - filter filter.Filter - checksum bool - strictIter bool - - dataEnd int64 - indexBlock *block - filterBlock *filterBlock -} - -func verifyChecksum(data []byte) bool { - n := len(data) - 4 - checksum0 := binary.LittleEndian.Uint32(data[n:]) - checksum1 := util.NewCRC(data[:n]).Value() - return checksum0 == checksum1 -} - -func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) { - data := r.bpool.Get(int(bh.length + blockTrailerLen)) - if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { - return nil, err - } - if checksum || r.checksum { - if !verifyChecksum(data) { - return nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)") - } - } - switch data[bh.length] { - case blockTypeNoCompression: - data = data[:bh.length] - case blockTypeSnappyCompression: - decLen, err := snappy.DecodedLen(data[:bh.length]) - if err != nil { - return nil, err - } - tmp := data - data, err = snappy.Decode(r.bpool.Get(decLen), tmp[:bh.length]) - r.bpool.Put(tmp) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("leveldb/table: Reader: unknown block compression type: %d", data[bh.length]) - } - return data, nil -} - -func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) { - data, err := r.readRawBlock(bh, checksum) - if err != nil { - return nil, err - } - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - b := &block{ - cmp: r.cmp, - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - checksum: checksum || r.checksum, - } - return b, nil -} - -func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterBlock, error) { - data, err := r.readRawBlock(bh, true) - if err != nil { - return nil, err - } - n := len(data) - if n < 5 { - return nil, errors.New("leveldb/table: Reader: invalid filter block (too short)") - } - m := n - 5 - oOffset := int(binary.LittleEndian.Uint32(data[m:])) - if oOffset > m { - return nil, errors.New("leveldb/table: Reader: invalid filter block (invalid offset)") - } - b := &filterBlock{ - filter: filter, - data: data, - oOffset: oOffset, - baseLg: uint(data[n-1]), - filtersNum: (m - oOffset) / 4, - } - return b, nil -} - -type releaseBlock struct { - r *Reader - b *block -} - -func (r releaseBlock) Release() { - if r.b.data != nil { - r.r.bpool.Put(r.b.data) - r.b.data = nil - } -} - -func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator { - if r.cache != nil { - // Get/set block cache. - var err error - cache, ok := r.cache.Get(dataBH.offset, func() (ok bool, value interface{}, charge int, fin cache.SetFin) { - if !fillCache { - return - } - var dataBlock *block - dataBlock, err = r.readBlock(dataBH, checksum) - if err == nil { - ok = true - value = dataBlock - charge = int(dataBH.length) - fin = func() { - r.bpool.Put(dataBlock.data) - dataBlock.data = nil - } - } - return - }) - if err != nil { - return iterator.NewEmptyIterator(err) - } - if ok { - dataBlock := cache.Value().(*block) - if !dataBlock.checksum && (r.checksum || checksum) { - if !verifyChecksum(dataBlock.data) { - return iterator.NewEmptyIterator(errors.New("leveldb/table: Reader: invalid block (checksum mismatch)")) - } - dataBlock.checksum = true - } - iter := dataBlock.newIterator(slice, false, cache) - return iter - } - } - dataBlock, err := r.readBlock(dataBH, checksum) - if err != nil { - return iterator.NewEmptyIterator(err) - } - iter := dataBlock.newIterator(slice, false, releaseBlock{r, dataBlock}) - return iter -} - -// NewIterator creates an iterator from the table. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// table. And a nil Range.Limit is treated as a key after all keys in -// the table. -// -// The returned iterator is not goroutine-safe and should be released -// when not used. -// -// Also read Iterator documentation of the leveldb/iterator package. - -func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - index := &indexIter{ - blockIter: *r.indexBlock.newIterator(slice, true, nil), - tableReader: r, - slice: slice, - checksum: ro.GetStrict(opt.StrictBlockChecksum), - fillCache: !ro.GetDontFillCache(), - } - return iterator.NewIndexedIterator(index, r.strictIter || ro.GetStrict(opt.StrictIterator), false) -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Find returns. -func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err error) { - if r.err != nil { - err = r.err - return - } - - index := r.indexBlock.newIterator(nil, true, nil) - defer index.Release() - if !index.Seek(key) { - err = index.Error() - if err == nil { - err = ErrNotFound - } - return - } - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)") - return - } - if r.filterBlock != nil && !r.filterBlock.contains(dataBH.offset, key) { - err = ErrNotFound - return - } - data := r.getDataIter(dataBH, nil, ro.GetStrict(opt.StrictBlockChecksum), !ro.GetDontFillCache()) - defer data.Release() - if !data.Seek(key) { - err = data.Error() - if err == nil { - err = ErrNotFound - } - return - } - // Don't use block buffer, no need to copy the buffer. - rkey = data.Key() - // Use block buffer, and since the buffer will be recycled, the buffer - // need to be copied. - value = append([]byte{}, data.Value()...) - return -} - -// Get gets the value for the given key. It returns errors.ErrNotFound -// if the table does not contain the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - if r.err != nil { - err = r.err - return - } - - rkey, value, err := r.Find(key, ro) - if err == nil && r.cmp.Compare(rkey, key) != 0 { - value = nil - err = ErrNotFound - } - return -} - -// OffsetOf returns approximate offset for the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { - if r.err != nil { - err = r.err - return - } - - index := r.indexBlock.newIterator(nil, true, nil) - defer index.Release() - if index.Seek(key) { - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)") - return - } - offset = int64(dataBH.offset) - return - } - err = index.Error() - if err == nil { - offset = r.dataEnd - } - return -} - -// NewReader creates a new initialized table reader for the file. -// The cache and bpool is optional and can be nil. -// -// The returned table reader instance is goroutine-safe. -func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) *Reader { - if bpool == nil { - bpool = util.NewBufferPool(o.GetBlockSize() + blockTrailerLen) - } - r := &Reader{ - reader: f, - cache: cache, - bpool: bpool, - cmp: o.GetComparer(), - checksum: o.GetStrict(opt.StrictBlockChecksum), - strictIter: o.GetStrict(opt.StrictIterator), - } - if f == nil { - r.err = errors.New("leveldb/table: Reader: nil file") - return r - } - if size < footerLen { - r.err = errors.New("leveldb/table: Reader: invalid table (file size is too small)") - return r - } - var footer [footerLen]byte - if _, err := r.reader.ReadAt(footer[:], size-footerLen); err != nil && err != io.EOF { - r.err = fmt.Errorf("leveldb/table: Reader: invalid table (could not read footer): %v", err) - } - if string(footer[footerLen-len(magic):footerLen]) != magic { - r.err = errors.New("leveldb/table: Reader: invalid table (bad magic number)") - return r - } - // Decode the metaindex block handle. - metaBH, n := decodeBlockHandle(footer[:]) - if n == 0 { - r.err = errors.New("leveldb/table: Reader: invalid table (bad metaindex block handle)") - return r - } - // Decode the index block handle. - indexBH, n := decodeBlockHandle(footer[n:]) - if n == 0 { - r.err = errors.New("leveldb/table: Reader: invalid table (bad index block handle)") - return r - } - // Read index block. - r.indexBlock, r.err = r.readBlock(indexBH, true) - if r.err != nil { - return r - } - // Read metaindex block. - metaBlock, err := r.readBlock(metaBH, true) - if err != nil { - r.err = err - return r - } - // Set data end. - r.dataEnd = int64(metaBH.offset) - metaIter := metaBlock.newIterator(nil, false, nil) - for metaIter.Next() { - key := string(metaIter.Key()) - if !strings.HasPrefix(key, "filter.") { - continue - } - fn := key[7:] - var filter filter.Filter - if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { - filter = f0 - } else { - for _, f0 := range o.GetAltFilters() { - if f0.Name() == fn { - filter = f0 - break - } - } - } - if filter != nil { - filterBH, n := decodeBlockHandle(metaIter.Value()) - if n == 0 { - continue - } - // Update data end. - r.dataEnd = int64(filterBH.offset) - filterBlock, err := r.readFilterBlock(filterBH, filter) - if err != nil { - continue - } - r.filterBlock = filterBlock - break - } - } - metaIter.Release() - return r -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/table/table.go b/kit/github.com/syndtr/goleveldb/leveldb/table/table.go deleted file mode 100644 index c0ac70d..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/table/table.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package table allows read and write sorted key/value. -package table - -import ( - "encoding/binary" -) - -/* -Table: - -Table is consist of one or more data blocks, an optional filter block -a metaindex block, an index block and a table footer. Metaindex block -is a special block used to keep parameters of the table, such as filter -block name and its block handle. Index block is a special block used to -keep record of data blocks offset and length, index block use one as -restart interval. The key used by index block are the last key of preceding -block, shorter separator of adjacent blocks or shorter successor of the -last key of the last block. Filter block is an optional block contains -sequence of filter data generated by a filter generator. - -Table data structure: - + optional - / - +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ - | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | - +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ - - Each block followed by a 5-bytes trailer contains compression type and checksum. - -Table block trailer: - - +---------------------------+-------------------+ - | compression type (1-byte) | checksum (4-byte) | - +---------------------------+-------------------+ - - The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression - type also included in the checksum. - -Table footer: - - +------------------- 40-bytes -------------------+ - / \ - +------------------------+--------------------+------+-----------------+ - | metaindex block handle / index block handle / ---- | magic (8-bytes) | - +------------------------+--------------------+------+-----------------+ - - The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Block: - -Block is consist of one or more key/value entries and a block trailer. -Block entry shares key prefix with its preceding key until a restart -point reached. A block should contains at least one restart point. -First restart point are always zero. - -Block data structure: - - + restart point + restart point (depends on restart interval) - / / - +---------------+---------------+---------------+---------------+---------+ - | block entry 1 | block entry 2 | ... | block entry n | trailer | - +---------------+---------------+---------------+---------------+---------+ - -Key/value entry: - - +---- key len ----+ - / \ - +-------+---------+-----------+---------+--------------------+--------------+----------------+ - | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | - +-----------------+---------------------+--------------------+--------------+----------------+ - - Block entry shares key prefix with its preceding key: - Conditions: - restart_interval=2 - entry one : key=deck,value=v1 - entry two : key=dock,value=v2 - entry three: key=duck,value=v3 - The entries will be encoded as follow: - - + restart point (offset=0) + restart point (offset=16) - / / - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - \ / \ / \ / - +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ - - The block trailer will contains two restart points: - - +------------+-----------+--------+ - | 0 | 16 | 2 | - +------------+-----------+---+----+ - \ / \ - +-- restart points --+ + restart points length - -Block trailer: - - +-- 4-bytes --+ - / \ - +-----------------+-----------------+-----------------+------------------------------+ - | restart point 1 | .... | restart point n | restart points len (4-bytes) | - +-----------------+-----------------+-----------------+------------------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Filter block: - -Filter block consist of one or more filter data and a filter block trailer. -The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. - -Filter block data structure: - - + offset 1 + offset 2 + offset n + trailer offset - / / / / - +---------------+---------------+---------------+---------+ - | filter data 1 | ... | filter data n | trailer | - +---------------+---------------+---------------+---------+ - -Filter block trailer: - - +- 4-bytes -+ - / \ - +---------------+---------------+---------------+-------------------------+------------------+ - | offset 1 | .... | offset n | filter offset (4-bytes) | base Lg (1-byte) | - +-------------- +---------------+---------------+-------------------------+------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -const ( - blockTrailerLen = 5 - footerLen = 48 - - magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" - - // The block type gives the per-block compression format. - // These constants are part of the file format and should not be changed. - blockTypeNoCompression = 0 - blockTypeSnappyCompression = 1 - - // Generate new filter every 2KB of data - filterBaseLg = 11 - filterBase = 1 << filterBaseLg -) - -type blockHandle struct { - offset, length uint64 -} - -func decodeBlockHandle(src []byte) (blockHandle, int) { - offset, n := binary.Uvarint(src) - length, m := binary.Uvarint(src[n:]) - if n == 0 || m == 0 { - return blockHandle{}, 0 - } - return blockHandle{offset, length}, n + m -} - -func encodeBlockHandle(dst []byte, b blockHandle) int { - n := binary.PutUvarint(dst, b.offset) - m := binary.PutUvarint(dst[n:], b.length) - return n + m -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/kit/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go deleted file mode 100644 index 40ff036..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package table - -import ( - "testing" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestTable(t *testing.T) { - testutil.RunDefer() - - RegisterFailHandler(Fail) - RunSpecs(t, "Table Suite") -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/kit/github.com/syndtr/goleveldb/leveldb/table/table_test.go deleted file mode 100644 index 2b89b73..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/table/table_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "bytes" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -type tableWrapper struct { - *Reader -} - -func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return t.Reader.Find(key, nil) -} - -func (t tableWrapper) TestGet(key []byte) (value []byte, err error) { - return t.Reader.Get(key, nil) -} - -func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.Reader.NewIterator(slice, nil) -} - -var _ = testutil.Defer(func() { - Describe("Table", func() { - Describe("approximate offset test", func() { - var ( - buf = &bytes.Buffer{} - o = &opt.Options{ - BlockSize: 1024, - Compression: opt.NoCompression, - } - ) - - // Building the table. - tw := NewWriter(buf, o) - tw.Append([]byte("k01"), []byte("hello")) - tw.Append([]byte("k02"), []byte("hello2")) - tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000)) - tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000)) - tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000)) - tw.Append([]byte("k06"), []byte("hello3")) - tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000)) - err := tw.Close() - - It("Should be able to approximate offset of a key correctly", func() { - Expect(err).ShouldNot(HaveOccurred()) - - tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o) - CheckOffset := func(key string, expect, threshold int) { - offset, err := tr.OffsetOf([]byte(key)) - Expect(err).ShouldNot(HaveOccurred()) - Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) - } - - CheckOffset("k0", 0, 0) - CheckOffset("k01a", 0, 0) - CheckOffset("k02", 0, 0) - CheckOffset("k03", 0, 0) - CheckOffset("k04", 10000, 1000) - CheckOffset("k04a", 210000, 1000) - CheckOffset("k05", 210000, 1000) - CheckOffset("k06", 510000, 1000) - CheckOffset("k07", 510000, 1000) - CheckOffset("xyz", 610000, 2000) - }) - }) - - Describe("read test", func() { - Build := func(kv testutil.KeyValue) testutil.DB { - o := &opt.Options{ - BlockSize: 512, - BlockRestartInterval: 3, - } - buf := &bytes.Buffer{} - - // Building the table. - tw := NewWriter(buf, o) - kv.Iterate(func(i int, key, value []byte) { - tw.Append(key, value) - }) - tw.Close() - - // Opening the table. - tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o) - return tableWrapper{tr} - } - Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { - return func() { - db := Build(*kv) - if body != nil { - body(db.(tableWrapper).Reader) - } - testutil.KeyValueTesting(nil, db, *kv) - } - } - - testutil.AllKeyValueTesting(nil, Build) - Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) { - It("should have correct blocks number", func() { - Expect(r.indexBlock.restartsLen).Should(Equal(9)) - }) - })) - }) - }) -}) diff --git a/kit/github.com/syndtr/goleveldb/leveldb/table/writer.go b/kit/github.com/syndtr/goleveldb/leveldb/table/writer.go deleted file mode 100644 index 58c0e13..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/table/writer.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/gocircuit/escher/kit/code.google.com/p/snappy-go/snappy" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -func sharedPrefixLen(a, b []byte) int { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for i < n && a[i] == b[i] { - i++ - } - return i -} - -type blockWriter struct { - restartInterval int - buf util.Buffer - nEntries int - prevKey []byte - restarts []uint32 - scratch []byte -} - -func (w *blockWriter) append(key, value []byte) { - nShared := 0 - if w.nEntries%w.restartInterval == 0 { - w.restarts = append(w.restarts, uint32(w.buf.Len())) - } else { - nShared = sharedPrefixLen(w.prevKey, key) - } - n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) - w.buf.Write(w.scratch[:n]) - w.buf.Write(key[nShared:]) - w.buf.Write(value) - w.prevKey = append(w.prevKey[:0], key...) - w.nEntries++ -} - -func (w *blockWriter) finish() { - // Write restarts entry. - if w.nEntries == 0 { - // Must have at least one restart entry. - w.restarts = append(w.restarts, 0) - } - w.restarts = append(w.restarts, uint32(len(w.restarts))) - for _, x := range w.restarts { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } -} - -func (w *blockWriter) reset() { - w.buf.Reset() - w.nEntries = 0 - w.restarts = w.restarts[:0] -} - -func (w *blockWriter) bytesLen() int { - restartsLen := len(w.restarts) - if restartsLen == 0 { - restartsLen = 1 - } - return w.buf.Len() + 4*restartsLen + 4 -} - -type filterWriter struct { - generator filter.FilterGenerator - buf util.Buffer - nKeys int - offsets []uint32 -} - -func (w *filterWriter) add(key []byte) { - if w.generator == nil { - return - } - w.generator.Add(key) - w.nKeys++ -} - -func (w *filterWriter) flush(offset uint64) { - if w.generator == nil { - return - } - for x := int(offset / filterBase); x > len(w.offsets); { - w.generate() - } -} - -func (w *filterWriter) finish() { - if w.generator == nil { - return - } - // Generate last keys. - - if w.nKeys > 0 { - w.generate() - } - w.offsets = append(w.offsets, uint32(w.buf.Len())) - for _, x := range w.offsets { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } - w.buf.WriteByte(filterBaseLg) -} - -func (w *filterWriter) generate() { - // Record offset. - w.offsets = append(w.offsets, uint32(w.buf.Len())) - // Generate filters. - if w.nKeys > 0 { - w.generator.Generate(&w.buf) - w.nKeys = 0 - } -} - -// Writer is a table writer. -type Writer struct { - writer io.Writer - err error - // Options - cmp comparer.Comparer - filter filter.Filter - compression opt.Compression - blockSize int - - dataBlock blockWriter - indexBlock blockWriter - filterBlock filterWriter - pendingBH blockHandle - offset uint64 - nEntries int - // Scratch allocated enough for 5 uvarint. Block writer should not use - // first 20-bytes since it will be used to encode block handle, which - // then passed to the block writer itself. - scratch [50]byte - comparerScratch []byte - compressionScratch []byte -} - -func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { - // Compress the buffer if necessary. - var b []byte - if compression == opt.SnappyCompression { - // Allocate scratch enough for compression and block trailer. - if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { - w.compressionScratch = make([]byte, n) - } - var compressed []byte - compressed, err = snappy.Encode(w.compressionScratch, buf.Bytes()) - if err != nil { - return - } - n := len(compressed) - b = compressed[:n+blockTrailerLen] - b[n] = blockTypeSnappyCompression - } else { - tmp := buf.Alloc(blockTrailerLen) - tmp[0] = blockTypeNoCompression - b = buf.Bytes() - } - - // Calculate the checksum. - n := len(b) - 4 - checksum := util.NewCRC(b[:n]).Value() - binary.LittleEndian.PutUint32(b[n:], checksum) - - // Write the buffer to the file. - _, err = w.writer.Write(b) - if err != nil { - return - } - bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} - w.offset += uint64(len(b)) - return -} - -func (w *Writer) flushPendingBH(key []byte) { - if w.pendingBH.length == 0 { - return - } - var separator []byte - if len(key) == 0 { - separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) - } else { - separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) - } - if separator == nil { - separator = w.dataBlock.prevKey - } else { - w.comparerScratch = separator - } - n := encodeBlockHandle(w.scratch[:20], w.pendingBH) - // Append the block handle to the index block. - w.indexBlock.append(separator, w.scratch[:n]) - // Reset prev key of the data block. - w.dataBlock.prevKey = w.dataBlock.prevKey[:0] - // Clear pending block handle. - w.pendingBH = blockHandle{} -} - -func (w *Writer) finishBlock() error { - w.dataBlock.finish() - bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - return err - } - w.pendingBH = bh - // Reset the data block. - w.dataBlock.reset() - // Flush the filter block. - w.filterBlock.flush(w.offset) - return nil -} - -// Append appends key/value pair to the table. The keys passed must -// be in increasing order. -// -// It is safe to modify the contents of the arguments after Append returns. -func (w *Writer) Append(key, value []byte) error { - if w.err != nil { - return w.err - } - if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { - w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) - return w.err - } - - w.flushPendingBH(key) - // Append key/value pair to the data block. - w.dataBlock.append(key, value) - // Add key to the filter block. - w.filterBlock.add(key) - - // Finish the data block if block size target reached. - if w.dataBlock.bytesLen() >= w.blockSize { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.nEntries++ - return nil -} - -// BlocksLen returns number of blocks written so far. -func (w *Writer) BlocksLen() int { - n := w.indexBlock.nEntries - if w.pendingBH.length > 0 { - // Includes the pending block. - n++ - } - return n -} - -// EntriesLen returns number of entries added so far. -func (w *Writer) EntriesLen() int { - return w.nEntries -} - -// BytesLen returns number of bytes written so far. -func (w *Writer) BytesLen() int { - return int(w.offset) -} - -// Close will finalize the table. Calling Append is not possible -// after Close, but calling BlocksLen, EntriesLen and BytesLen -// is still possible. -func (w *Writer) Close() error { - if w.err != nil { - return w.err - } - - // Write the last data block. Or empty data block if there - // aren't any data blocks at all. - if w.dataBlock.nEntries > 0 || w.nEntries == 0 { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.flushPendingBH(nil) - - // Write the filter block. - var filterBH blockHandle - w.filterBlock.finish() - if buf := &w.filterBlock.buf; buf.Len() > 0 { - filterBH, w.err = w.writeBlock(buf, opt.NoCompression) - if w.err != nil { - return w.err - } - } - - // Write the metaindex block. - if filterBH.length > 0 { - key := []byte("filter." + w.filter.Name()) - n := encodeBlockHandle(w.scratch[:20], filterBH) - w.dataBlock.append(key, w.scratch[:n]) - } - w.dataBlock.finish() - metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the index block. - w.indexBlock.finish() - indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the table footer. - footer := w.scratch[:footerLen] - for i := range footer { - footer[i] = 0 - } - n := encodeBlockHandle(footer, metaindexBH) - encodeBlockHandle(footer[n:], indexBH) - copy(footer[footerLen-len(magic):], magic) - if _, err := w.writer.Write(footer); err != nil { - w.err = err - return w.err - } - w.offset += footerLen - - w.err = errors.New("leveldb/table: writer is closed") - return nil -} - -// NewWriter creates a new initialized table writer for the file. -// -// Table writer is not goroutine-safe. -func NewWriter(f io.Writer, o *opt.Options) *Writer { - w := &Writer{ - writer: f, - cmp: o.GetComparer(), - filter: o.GetFilter(), - compression: o.GetCompression(), - blockSize: o.GetBlockSize(), - comparerScratch: make([]byte, 0), - } - // data block - w.dataBlock.restartInterval = o.GetBlockRestartInterval() - // The first 20-bytes are used for encoding block handle. - w.dataBlock.scratch = w.scratch[20:] - // index block - w.indexBlock.restartInterval = 1 - w.indexBlock.scratch = w.scratch[20:] - // filter block - if w.filter != nil { - w.filterBlock.generator = w.filter.NewGenerator() - w.filterBlock.flush(0) - } - return w -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/kit/github.com/syndtr/goleveldb/leveldb/testutil/db.go deleted file mode 100644 index acc6af5..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/testutil/db.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -type DB interface{} - -type Put interface { - TestPut(key []byte, value []byte) error -} - -type Delete interface { - TestDelete(key []byte) error -} - -type Find interface { - TestFind(key []byte) (rkey, rvalue []byte, err error) -} - -type Get interface { - TestGet(key []byte) (value []byte, err error) -} - -type NewIterator interface { - TestNewIterator(slice *util.Range) iterator.Iterator -} - -type DBAct int - -func (a DBAct) String() string { - switch a { - case DBNone: - return "none" - case DBPut: - return "put" - case DBOverwrite: - return "overwrite" - case DBDelete: - return "delete" - case DBDeleteNA: - return "delete_na" - } - return "unknown" -} - -const ( - DBNone DBAct = iota - DBPut - DBOverwrite - DBDelete - DBDeleteNA -) - -type DBTesting struct { - Rand *rand.Rand - DB interface { - Get - Put - Delete - } - PostFn func(t *DBTesting) - Deleted, Present KeyValue - Act, LastAct DBAct - ActKey, LastActKey []byte -} - -func (t *DBTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *DBTesting) setAct(act DBAct, key []byte) { - t.LastAct, t.Act = t.Act, act - t.LastActKey, t.ActKey = t.ActKey, key -} - -func (t *DBTesting) text() string { - return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey) -} - -func (t *DBTesting) Text() string { - return "DBTesting " + t.text() -} - -func (t *DBTesting) TestPresentKV(key, value []byte) { - rvalue, err := t.DB.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text()) - Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllPresent() { - t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestPresentKV(key, value) - }) -} - -func (t *DBTesting) TestDeletedKey(key []byte) { - _, err := t.DB.TestGet(key) - Expect(err).Should(Equal(util.ErrNotFound), "Get on deleted key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllDeleted() { - t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestDeletedKey(key) - }) -} - -func (t *DBTesting) TestAll() { - dn := t.Deleted.Len() - pn := t.Present.Len() - ShuffledIndex(t.Rand, dn+pn, 1, func(i int) { - if i >= dn { - key, value := t.Present.Index(i - dn) - t.TestPresentKV(key, value) - } else { - t.TestDeletedKey(t.Deleted.KeyAt(i)) - } - }) -} - -func (t *DBTesting) Put(key, value []byte) { - if new := t.Present.PutU(key, value); new { - t.setAct(DBPut, key) - } else { - t.setAct(DBOverwrite, key) - } - t.Deleted.Delete(key) - err := t.DB.TestPut(key, value) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestPresentKV(key, value) - t.post() -} - -func (t *DBTesting) PutRandom() bool { - if t.Deleted.Len() > 0 { - i := t.Rand.Intn(t.Deleted.Len()) - key, value := t.Deleted.Index(i) - t.Put(key, value) - return true - } - return false -} - -func (t *DBTesting) Delete(key []byte) { - if exist, value := t.Present.Delete(key); exist { - t.setAct(DBDelete, key) - t.Deleted.PutU(key, value) - } else { - t.setAct(DBDeleteNA, key) - } - err := t.DB.TestDelete(key) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestDeletedKey(key) - t.post() -} - -func (t *DBTesting) DeleteRandom() bool { - if t.Present.Len() > 0 { - i := t.Rand.Intn(t.Present.Len()) - t.Delete(t.Present.KeyAt(i)) - return true - } - return false -} - -func (t *DBTesting) RandomAct(round int) { - for i := 0; i < round; i++ { - if t.Rand.Int()%2 == 0 { - t.PutRandom() - } else { - t.DeleteRandom() - } - } -} - -func DoDBTesting(t *DBTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - - t.DeleteRandom() - t.PutRandom() - t.DeleteRandom() - t.DeleteRandom() - for i := t.Deleted.Len() / 2; i >= 0; i-- { - t.PutRandom() - } - t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10) - - // Additional iterator testing - if db, ok := t.DB.(NewIterator); ok { - iter := db.TestNewIterator(nil) - Expect(iter.Error()).NotTo(HaveOccurred()) - - it := IteratorTesting{ - KeyValue: t.Present, - Iter: iter, - } - - DoIteratorTesting(&it) - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/kit/github.com/syndtr/goleveldb/leveldb/testutil/iter.go deleted file mode 100644 index 30226f5..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/testutil/iter.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" -) - -type IterAct int - -func (a IterAct) String() string { - switch a { - case IterNone: - return "none" - case IterFirst: - return "first" - case IterLast: - return "last" - case IterPrev: - return "prev" - case IterNext: - return "next" - case IterSeek: - return "seek" - case IterSOI: - return "soi" - case IterEOI: - return "eoi" - } - return "unknown" -} - -const ( - IterNone IterAct = iota - IterFirst - IterLast - IterPrev - IterNext - IterSeek - IterSOI - IterEOI -) - -type IteratorTesting struct { - KeyValue - Iter iterator.Iterator - Rand *rand.Rand - PostFn func(t *IteratorTesting) - Pos int - Act, LastAct IterAct - - once bool -} - -func (t *IteratorTesting) init() { - if !t.once { - t.Pos = -1 - t.once = true - } -} - -func (t *IteratorTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *IteratorTesting) setAct(act IterAct) { - t.LastAct, t.Act = t.Act, act -} - -func (t *IteratorTesting) text() string { - return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act) -} - -func (t *IteratorTesting) Text() string { - return "IteratorTesting is " + t.text() -} - -func (t *IteratorTesting) IsFirst() bool { - t.init() - return t.Len() > 0 && t.Pos == 0 -} - -func (t *IteratorTesting) IsLast() bool { - t.init() - return t.Len() > 0 && t.Pos == t.Len()-1 -} - -func (t *IteratorTesting) TestKV() { - t.init() - key, value := t.Index(t.Pos) - Expect(t.Iter.Key()).NotTo(BeNil()) - Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text()) - Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *IteratorTesting) First() { - t.init() - t.setAct(IterFirst) - - ok := t.Iter.First() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = 0 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Last() { - t.init() - t.setAct(IterLast) - - ok := t.Iter.Last() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = t.Len() - 1 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = 0 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Next() { - t.init() - t.setAct(IterNext) - - ok := t.Iter.Next() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos < t.Len()-1 { - t.Pos++ - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = t.Len() - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Prev() { - t.init() - t.setAct(IterPrev) - - ok := t.Iter.Prev() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos > 0 { - t.Pos-- - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Seek(i int) { - t.init() - t.setAct(IterSeek) - - key, _ := t.Index(i) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekInexact(i int) { - t.init() - t.setAct(IterSeek) - var key0 []byte - key1, _ := t.Index(i) - if i > 0 { - key0, _ = t.Index(i - 1) - } - key := BytesSeparator(key0, key1) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekKey(key []byte) { - t.init() - t.setAct(IterSeek) - oldKey, _ := t.IndexOrNil(t.Pos) - i := t.Search(key) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if i < t.Len() { - key_, _ := t.Index(i) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text())) - t.Pos = i - t.TestKV() - } else { - Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text())) - } - - t.Pos = i - t.post() -} - -func (t *IteratorTesting) SOI() { - t.init() - t.setAct(IterSOI) - Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text()) - for i := 0; i < 3; i++ { - t.Prev() - } - t.post() -} - -func (t *IteratorTesting) EOI() { - t.init() - t.setAct(IterEOI) - Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text()) - for i := 0; i < 3; i++ { - t.Next() - } - t.post() -} - -func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos > 0; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically("<", old), t.Text()) - } -} - -func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically(">", old), t.Text()) - } -} - -func (t *IteratorTesting) PrevAll() { - t.WalkPrev(func(t *IteratorTesting) { - t.Prev() - }) -} - -func (t *IteratorTesting) NextAll() { - t.WalkNext(func(t *IteratorTesting) { - t.Next() - }) -} - -func DoIteratorTesting(t *IteratorTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - t.SOI() - t.NextAll() - t.First() - t.SOI() - t.NextAll() - t.EOI() - t.PrevAll() - t.Last() - t.EOI() - t.PrevAll() - t.SOI() - - t.NextAll() - t.PrevAll() - t.NextAll() - t.Last() - t.PrevAll() - t.First() - t.NextAll() - t.EOI() - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.SeekInexact(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - if i%2 != 0 { - t.PrevAll() - t.SOI() - } else { - t.NextAll() - t.EOI() - } - }) - - for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} { - t.SeekKey([]byte(key)) - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/kit/github.com/syndtr/goleveldb/leveldb/testutil/kv.go deleted file mode 100644 index fff4cb1..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/testutil/kv.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - "sort" - "strings" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -type KeyValueEntry struct { - key, value []byte -} - -type KeyValue struct { - entries []KeyValueEntry - nbytes int -} - -func (kv *KeyValue) Put(key, value []byte) { - if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 { - panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key)) - } - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - kv.nbytes += len(key) + len(value) -} - -func (kv *KeyValue) PutString(key, value string) { - kv.Put([]byte(key), []byte(value)) -} - -func (kv *KeyValue) PutU(key, value []byte) bool { - if i, exist := kv.Get(key); !exist { - if i < kv.Len() { - kv.entries = append(kv.entries[:i+1], kv.entries[i:]...) - kv.entries[i] = KeyValueEntry{key, value} - } else { - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - } - kv.nbytes += len(key) + len(value) - return true - } else { - kv.nbytes += len(value) - len(kv.ValueAt(i)) - kv.entries[i].value = value - } - return false -} - -func (kv *KeyValue) PutUString(key, value string) bool { - return kv.PutU([]byte(key), []byte(value)) -} - -func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) { - i, exist := kv.Get(key) - if exist { - value = kv.entries[i].value - kv.DeleteIndex(i) - } - return -} - -func (kv *KeyValue) DeleteIndex(i int) bool { - if i < kv.Len() { - kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i)) - kv.entries = append(kv.entries[:i], kv.entries[i+1:]...) - return true - } - return false -} - -func (kv KeyValue) Len() int { - return len(kv.entries) -} - -func (kv *KeyValue) Size() int { - return kv.nbytes -} - -func (kv KeyValue) KeyAt(i int) []byte { - return kv.entries[i].key -} - -func (kv KeyValue) ValueAt(i int) []byte { - return kv.entries[i].value -} - -func (kv KeyValue) Index(i int) (key, value []byte) { - if i < 0 || i >= len(kv.entries) { - panic(fmt.Sprintf("Index #%d: out of range", i)) - } - return kv.entries[i].key, kv.entries[i].value -} - -func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) { - key, value = kv.Index(i) - var key0 []byte - var key1 = kv.KeyAt(i) - if i > 0 { - key0 = kv.KeyAt(i - 1) - } - key_ = BytesSeparator(key0, key1) - return -} - -func (kv KeyValue) IndexOrNil(i int) (key, value []byte) { - if i >= 0 && i < len(kv.entries) { - return kv.entries[i].key, kv.entries[i].value - } - return nil, nil -} - -func (kv KeyValue) IndexString(i int) (key, value string) { - key_, _value := kv.Index(i) - return string(key_), string(_value) -} - -func (kv KeyValue) Search(key []byte) int { - return sort.Search(kv.Len(), func(i int) bool { - return cmp.Compare(kv.KeyAt(i), key) >= 0 - }) -} - -func (kv KeyValue) SearchString(key string) int { - return kv.Search([]byte(key)) -} - -func (kv KeyValue) Get(key []byte) (i int, exist bool) { - i = kv.Search(key) - if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 { - exist = true - } - return -} - -func (kv KeyValue) GetString(key string) (i int, exist bool) { - return kv.Get([]byte(key)) -} - -func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) { - for i, x := range kv.entries { - fn(i, x.key, x.value) - } -} - -func (kv KeyValue) IterateString(fn func(i int, key, value string)) { - kv.Iterate(func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) { - ShuffledIndex(rnd, kv.Len(), 1, func(i int) { - fn(i, kv.entries[i].key, kv.entries[i].value) - }) -} - -func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) { - kv.IterateShuffled(rnd, func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) { - for i := range kv.entries { - key_, key, value := kv.IndexInexact(i) - fn(i, key_, key, value) - } -} - -func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) { - kv.IterateInexact(func(i int, key_, key, value []byte) { - fn(i, string(key_), string(key), string(value)) - }) -} - -func (kv KeyValue) Clone() KeyValue { - return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes} -} - -func (kv KeyValue) Slice(start, limit int) KeyValue { - if start < 0 || limit > kv.Len() { - panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit)) - } else if limit < start { - panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit)) - } - return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes} -} - -func (kv KeyValue) SliceKey(start, limit []byte) KeyValue { - start_ := 0 - limit_ := kv.Len() - if start != nil { - start_ = kv.Search(start) - } - if limit != nil { - limit_ = kv.Search(limit) - } - return kv.Slice(start_, limit_) -} - -func (kv KeyValue) SliceKeyString(start, limit string) KeyValue { - return kv.SliceKey([]byte(start), []byte(limit)) -} - -func (kv KeyValue) SliceRange(r *util.Range) KeyValue { - if r != nil { - return kv.SliceKey(r.Start, r.Limit) - } - return kv.Clone() -} - -func (kv KeyValue) Range(start, limit int) (r util.Range) { - if kv.Len() > 0 { - if start == kv.Len() { - r.Start = BytesAfter(kv.KeyAt(start - 1)) - } else { - r.Start = kv.KeyAt(start) - } - } - if limit < kv.Len() { - r.Limit = kv.KeyAt(limit) - } - return -} - -func KeyValue_EmptyKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("", "v") - return kv -} - -func KeyValue_EmptyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "") - kv.PutString("abcd", "") - return kv -} - -func KeyValue_OneKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "v") - return kv -} - -func KeyValue_BigValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("big1", strings.Repeat("1", 200000)) - return kv -} - -func KeyValue_SpecialKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("\xff\xff", "v3") - return kv -} - -func KeyValue_MultipleKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("a", "v") - kv.PutString("aa", "v1") - kv.PutString("aaa", "v2") - kv.PutString("aaacccccccccc", "v2") - kv.PutString("aaaccccccccccd", "v3") - kv.PutString("aaaccccccccccf", "v4") - kv.PutString("aaaccccccccccfg", "v5") - kv.PutString("ab", "v6") - kv.PutString("abc", "v7") - kv.PutString("abcd", "v8") - kv.PutString("accccccccccccccc", "v9") - kv.PutString("b", "v10") - kv.PutString("bb", "v11") - kv.PutString("bc", "v12") - kv.PutString("c", "v13") - kv.PutString("c1", "v13") - kv.PutString("czzzzzzzzzzzzzz", "v14") - kv.PutString("fffffffffffffff", "v15") - kv.PutString("g11", "v15") - kv.PutString("g111", "v15") - kv.PutString("g111\xff", "v15") - kv.PutString("zz", "v16") - kv.PutString("zzzzzzz", "v16") - kv.PutString("zzzzzzzzzzzzzzzz", "v16") - return kv -} - -var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy") - -func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue { - if rnd == nil { - rnd = NewRand() - } - if maxlen < minlen { - panic("max len should >= min len") - } - - rrand := func(min, max int) int { - if min == max { - return max - } - return rnd.Intn(max-min) + min - } - - kv := &KeyValue{} - endC := byte(len(keymap) - 1) - gen := make([]byte, 0, maxlen) - for i := 0; i < n; i++ { - m := rrand(minlen, maxlen) - last := gen - retry: - gen = last[:m] - if k := len(last); m > k { - for j := k; j < m; j++ { - gen[j] = 0 - } - } else { - for j := m - 1; j >= 0; j-- { - c := last[j] - if c == endC { - continue - } - gen[j] = c + 1 - for j += 1; j < m; j++ { - gen[j] = 0 - } - goto ok - } - if m < maxlen { - m++ - goto retry - } - panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n)) - ok: - } - key := make([]byte, m) - for j := 0; j < m; j++ { - key[j] = keymap[gen[j]] - } - value := make([]byte, rrand(vminlen, vmaxlen)) - for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ { - value[n] = 'x' - } - kv.Put(key, value) - } - return kv -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/kit/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go deleted file mode 100644 index c4fea19..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo" - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) { - if rnd == nil { - rnd = NewRand() - } - - if db, ok := p.(Find); ok { - It("Should find all keys with Find", func() { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rkey, rvalue, err := db.TestFind(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rkey).Should(Equal(key), "Key") - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - rkey, rvalue, err = db.TestFind(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key) - }) - }) - - It("Should return error if the key is not present", func() { - var key []byte - if kv.Len() > 0 { - key_, _ := kv.Index(kv.Len() - 1) - key = BytesAfter(key_) - } - rkey, _, err := db.TestFind(key) - Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) - Expect(err).Should(Equal(util.ErrNotFound)) - }) - } - - if db, ok := p.(Get); ok { - It("Should only find exact key with Get", func() { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rvalue, err := db.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - _, err = db.TestGet(key_) - Expect(err).Should(HaveOccurred(), "Error for key %q", key_) - Expect(err).Should(Equal(util.ErrNotFound)) - } - }) - }) - } - - if db, ok := p.(NewIterator); ok { - TestIter := func(r *util.Range, _kv KeyValue) { - iter := db.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := IteratorTesting{ - KeyValue: _kv, - Iter: iter, - } - - DoIteratorTesting(&t) - } - - It("Should iterates and seeks correctly", func(done Done) { - TestIter(nil, kv.Clone()) - done <- true - }, 3.0) - - RandomIndex(rnd, kv.Len(), kv.Len(), func(i int) { - type slice struct { - r *util.Range - start, limit int - } - - key_, _, _ := kv.IndexInexact(i) - for _, x := range []slice{ - {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, - {&util.Range{Start: nil, Limit: key_}, 0, i}, - } { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) { - TestIter(x.r, kv.Slice(x.start, x.limit)) - done <- true - }, 3.0) - } - }) - - RandomRange(rnd, kv.Len(), kv.Len(), func(start, limit int) { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) { - r := kv.Range(start, limit) - TestIter(&r, kv.Slice(start, limit)) - done <- true - }, 3.0) - }) - } -} - -func AllKeyValueTesting(rnd *rand.Rand, body func(kv KeyValue) DB) { - Test := func(kv *KeyValue) func() { - return func() { - db := body(*kv) - KeyValueTesting(rnd, db, *kv) - } - } - - Describe("with no key/value (empty)", Test(&KeyValue{})) - Describe("with empty key", Test(KeyValue_EmptyKey())) - Describe("with empty value", Test(KeyValue_EmptyValue())) - Describe("with one key/value", Test(KeyValue_OneKeyValue())) - Describe("with big value", Test(KeyValue_BigValue())) - Describe("with special key", Test(KeyValue_SpecialKey())) - Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue())) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/kit/github.com/syndtr/goleveldb/leveldb/testutil/storage.go deleted file mode 100644 index 17e37be..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/testutil/storage.go +++ /dev/null @@ -1,585 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - storageMu sync.Mutex - storageUseFS bool = true - storageKeepFS bool = false - storageNum int -) - -type StorageMode int - -const ( - ModeOpen StorageMode = 1 << iota - ModeCreate - ModeRemove - ModeRead - ModeWrite - ModeSync - ModeClose -) - -const ( - modeOpen = iota - modeCreate - modeRemove - modeRead - modeWrite - modeSync - modeClose - - modeCount -) - -const ( - typeManifest = iota - typeJournal - typeTable - typeTemp - - typeCount -) - -const flattenCount = modeCount * typeCount - -func flattenType(m StorageMode, t storage.FileType) int { - var x int - switch m { - case ModeOpen: - x = modeOpen - case ModeCreate: - x = modeCreate - case ModeRemove: - x = modeRemove - case ModeRead: - x = modeRead - case ModeWrite: - x = modeWrite - case ModeSync: - x = modeSync - case ModeClose: - x = modeClose - default: - panic("invalid storage mode") - } - x *= typeCount - switch t { - case storage.TypeManifest: - return x + typeManifest - case storage.TypeJournal: - return x + typeJournal - case storage.TypeTable: - return x + typeTable - case storage.TypeTemp: - return x + typeTemp - default: - panic("invalid file type") - } -} - -func listFlattenType(m StorageMode, t storage.FileType) []int { - ret := make([]int, 0, flattenCount) - add := func(x int) { - x *= typeCount - switch { - case t&storage.TypeManifest != 0: - ret = append(ret, x+typeManifest) - case t&storage.TypeJournal != 0: - ret = append(ret, x+typeJournal) - case t&storage.TypeTable != 0: - ret = append(ret, x+typeTable) - case t&storage.TypeTemp != 0: - ret = append(ret, x+typeTemp) - } - } - switch { - case m&ModeOpen != 0: - add(modeOpen) - case m&ModeCreate != 0: - add(modeCreate) - case m&ModeRemove != 0: - add(modeRemove) - case m&ModeRead != 0: - add(modeRead) - case m&ModeWrite != 0: - add(modeWrite) - case m&ModeSync != 0: - add(modeSync) - case m&ModeClose != 0: - add(modeClose) - } - return ret -} - -func packFile(num uint64, t storage.FileType) uint64 { - if num>>(64-typeCount) != 0 { - panic("overflow") - } - return num<> typeCount, storage.FileType(x) & storage.TypeAll -} - -type emulatedError struct { - err error -} - -func (err emulatedError) Error() string { - return fmt.Sprintf("emulated storage error: %v", err.err) -} - -type storageLock struct { - s *Storage - r util.Releaser -} - -func (l storageLock) Release() { - l.r.Release() - l.s.logI("storage lock released") -} - -type reader struct { - f *file - storage.Reader -} - -func (r *reader) Read(p []byte) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.Read(p) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err) - } - return -} - -func (r *reader) ReadAt(p []byte, off int64) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.ReadAt(p, off) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err) - } - return -} - -func (r *reader) Close() (err error) { - return r.f.doClose(r.Reader) -} - -type writer struct { - f *file - storage.Writer -} - -func (w *writer) Write(p []byte) (n int, err error) { - err = w.f.s.emulateError(ModeWrite, w.f.Type()) - if err == nil { - w.f.s.stall(ModeWrite, w.f.Type()) - n, err = w.Writer.Write(p) - } - w.f.s.count(ModeWrite, w.f.Type(), n) - if err != nil && err != io.EOF { - w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err) - } - return -} - -func (w *writer) Sync() (err error) { - err = w.f.s.emulateError(ModeSync, w.f.Type()) - if err == nil { - w.f.s.stall(ModeSync, w.f.Type()) - err = w.Writer.Sync() - } - w.f.s.count(ModeSync, w.f.Type(), 0) - if err != nil { - w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err) - } - return -} - -func (w *writer) Close() (err error) { - return w.f.doClose(w.Writer) -} - -type file struct { - s *Storage - storage.File -} - -func (f *file) pack() uint64 { - return packFile(f.Num(), f.Type()) -} - -func (f *file) assertOpen() { - ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()]) -} - -func (f *file) doClose(closer io.Closer) (err error) { - err = f.s.emulateError(ModeClose, f.Type()) - if err == nil { - f.s.stall(ModeClose, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type()) - err = closer.Close() - } - f.s.countNB(ModeClose, f.Type(), 0) - writer := f.s.opens[f.pack()] - if err != nil { - f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err) - } else { - f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer) - delete(f.s.opens, f.pack()) - } - return -} - -func (f *file) Open() (r storage.Reader, err error) { - err = f.s.emulateError(ModeOpen, f.Type()) - if err == nil { - f.s.stall(ModeOpen, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeOpen, f.Type(), 0) - r, err = f.File.Open() - } - if err != nil { - f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = false - r = &reader{f, r} - } - return -} - -func (f *file) Create() (w storage.Writer, err error) { - err = f.s.emulateError(ModeCreate, f.Type()) - if err == nil { - f.s.stall(ModeCreate, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeCreate, f.Type(), 0) - w, err = f.File.Create() - } - if err != nil { - f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file created, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = true - w = &writer{f, w} - } - return -} - -func (f *file) Remove() (err error) { - err = f.s.emulateError(ModeRemove, f.Type()) - if err == nil { - f.s.stall(ModeRemove, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeRemove, f.Type(), 0) - err = f.File.Remove() - } - if err != nil { - f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type()) - } - return -} - -type Storage struct { - storage.Storage - closeFn func() error - - lmu sync.Mutex - lb bytes.Buffer - - mu sync.Mutex - // Open files, true=writer, false=reader - opens map[uint64]bool - counters [flattenCount]int - bytesCounter [flattenCount]int64 - emulatedError [flattenCount]error - stallCond sync.Cond - stalled [flattenCount]bool -} - -func (s *Storage) log(skip int, str string) { - s.lmu.Lock() - defer s.lmu.Unlock() - _, file, line, ok := runtime.Caller(skip + 2) - if ok { - // Truncate file name at last file name separator. - if index := strings.LastIndex(file, "/"); index >= 0 { - file = file[index+1:] - } else if index = strings.LastIndex(file, "\\"); index >= 0 { - file = file[index+1:] - } - } else { - file = "???" - line = 1 - } - fmt.Fprintf(&s.lb, "%s:%d: ", file, line) - lines := strings.Split(str, "\n") - if l := len(lines); l > 1 && lines[l-1] == "" { - lines = lines[:l-1] - } - for i, line := range lines { - if i > 0 { - s.lb.WriteString("\n\t") - } - s.lb.WriteString(line) - } - s.lb.WriteByte('\n') -} - -func (s *Storage) logISkip(skip int, format string, args ...interface{}) { - pc, _, _, ok := runtime.Caller(skip + 1) - if ok { - if f := runtime.FuncForPC(pc); f != nil { - fname := f.Name() - if index := strings.LastIndex(fname, "."); index >= 0 { - fname = fname[index+1:] - } - format = fname + ": " + format - } - } - s.log(skip+1, fmt.Sprintf(format, args...)) -} - -func (s *Storage) logI(format string, args ...interface{}) { - s.logISkip(1, format, args...) -} - -func (s *Storage) Log(str string) { - s.log(1, "Log: "+str) -} - -func (s *Storage) Lock() (r util.Releaser, err error) { - r, err = s.Storage.Lock() - if err != nil { - s.logI("storage locking failed, err=%v", err) - } else { - s.logI("storage locked") - r = storageLock{s, r} - } - return -} - -func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File { - return &file{s, s.Storage.GetFile(num, t)} -} - -func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) { - rfiles, err := s.Storage.GetFiles(t) - if err != nil { - s.logI("get files failed, err=%v", err) - return - } - files = make([]storage.File, len(rfiles)) - for i, f := range rfiles { - files[i] = &file{s, f} - } - s.logI("get files, type=0x%x count=%d", int(t), len(files)) - return -} - -func (s *Storage) GetManifest() (f storage.File, err error) { - manifest, err := s.Storage.GetManifest() - if err != nil { - if !os.IsNotExist(err) { - s.logI("get manifest failed, err=%v", err) - } - return - } - s.logI("get manifest, num=%d", manifest.Num()) - return &file{s, manifest}, nil -} - -func (s *Storage) SetManifest(f storage.File) error { - f_, ok := f.(*file) - ExpectWithOffset(1, ok).To(BeTrue()) - ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest)) - err := s.Storage.SetManifest(f_.File) - if err != nil { - s.logI("set manifest failed, err=%v", err) - } else { - s.logI("set manifest, num=%d", f_.Num()) - } - return err -} - -func (s *Storage) openFiles() string { - out := "Open files:" - for x, writer := range s.opens { - num, t := unpackFile(x) - out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer) - } - return out -} - -func (s *Storage) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) - err := s.Storage.Close() - if err != nil { - s.logI("storage closing failed, err=%v", err) - } else { - s.logI("storage closed") - } - if s.closeFn != nil { - if err1 := s.closeFn(); err1 != nil { - s.logI("close func error, err=%v", err1) - } - } - return err -} - -func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) { - s.counters[flattenType(m, t)]++ - s.bytesCounter[flattenType(m, t)] += int64(n) -} - -func (s *Storage) count(m StorageMode, t storage.FileType, n int) { - s.mu.Lock() - defer s.mu.Unlock() - s.countNB(m, t, n) -} - -func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) { - for _, x := range listFlattenType(m, t) { - s.counters[x] = 0 - s.bytesCounter[x] = 0 - } -} - -func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) { - for _, x := range listFlattenType(m, t) { - count += s.counters[x] - bytes += s.bytesCounter[x] - } - return -} - -func (s *Storage) emulateError(m StorageMode, t storage.FileType) error { - s.mu.Lock() - defer s.mu.Unlock() - err := s.emulatedError[flattenType(m, t)] - if err != nil { - return emulatedError{err} - } - return nil -} - -func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.emulatedError[x] = err - } -} - -func (s *Storage) stall(m StorageMode, t storage.FileType) { - x := flattenType(m, t) - s.mu.Lock() - defer s.mu.Unlock() - for s.stalled[x] { - s.stallCond.Wait() - } -} - -func (s *Storage) Stall(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = true - } -} - -func (s *Storage) Release(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = false - } - s.stallCond.Broadcast() -} - -func NewStorage() *Storage { - var stor storage.Storage - var closeFn func() error - if storageUseFS { - for { - storageMu.Lock() - num := storageNum - storageNum++ - storageMu.Unlock() - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); os.IsNotExist(err) { - stor, err = storage.OpenFile(path) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) - closeFn = func() error { - if storageKeepFS { - return nil - } - return os.RemoveAll(path) - } - break - } - } - } else { - stor = storage.NewMemStorage() - } - s := &Storage{ - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - } - s.stallCond.L = &s.mu - return s -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/kit/github.com/syndtr/goleveldb/leveldb/testutil/util.go deleted file mode 100644 index 39bc9d2..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/testutil/util.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "flag" - "math/rand" - "reflect" - "sync" - - "github.com/gocircuit/escher/kit/github.com/onsi/ginkgo/config" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/comparer" -) - -var ( - runfn = make(map[string][]func()) - runmu sync.Mutex -) - -func Defer(args ...interface{}) bool { - var ( - group string - fn func() - ) - for _, arg := range args { - v := reflect.ValueOf(arg) - switch v.Kind() { - case reflect.String: - group = v.String() - case reflect.Func: - r := reflect.ValueOf(&fn).Elem() - r.Set(v) - } - } - if fn != nil { - runmu.Lock() - runfn[group] = append(runfn[group], fn) - runmu.Unlock() - } - return true -} - -func RunDefer(groups ...string) bool { - if len(groups) == 0 { - groups = append(groups, "") - } - runmu.Lock() - var runfn_ []func() - for _, group := range groups { - runfn_ = append(runfn_, runfn[group]...) - delete(runfn, group) - } - runmu.Unlock() - for _, fn := range runfn_ { - fn() - } - return runfn_ != nil -} - -func RandomSeed() int64 { - if !flag.Parsed() { - panic("random seed not initialized") - } - return config.GinkgoConfig.RandomSeed -} - -func NewRand() *rand.Rand { - return rand.New(rand.NewSource(RandomSeed())) -} - -var cmp = comparer.DefaultComparer - -func BytesSeparator(a, b []byte) []byte { - if bytes.Equal(a, b) { - return b - } - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && (a[i] == b[i]); i++ { - } - x := append([]byte{}, a[:i]...) - if i < n { - if c := a[i] + 1; c < b[i] { - return append(x, c) - } - x = append(x, a[i]) - i++ - } - for ; i < len(a); i++ { - if c := a[i]; c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - if len(b) > i && b[i] > 0 { - return append(x, b[i]-1) - } - return append(x, 'x') -} - -func BytesAfter(b []byte) []byte { - var x []byte - for _, c := range b { - if c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - return append(x, 'x') -} - -func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - fn(rnd.Intn(n)) - } - return -} - -func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - for _, i := range rnd.Perm(n) { - fn(i) - } - } - return -} - -func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - start := rnd.Intn(n) - length := 0 - if j := n - start; j > 0 { - length = rnd.Intn(j) - } - fn(start, start+length) - } - return -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/kit/github.com/syndtr/goleveldb/leveldb/testutil_test.go deleted file mode 100644 index 157b2e4..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/testutil_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/gocircuit/escher/kit/github.com/onsi/gomega" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -type testingDB struct { - *DB - ro *opt.ReadOptions - wo *opt.WriteOptions - stor *testutil.Storage -} - -func (t *testingDB) TestPut(key []byte, value []byte) error { - return t.Put(key, value, t.wo) -} - -func (t *testingDB) TestDelete(key []byte) error { - return t.Delete(key, t.wo) -} - -func (t *testingDB) TestGet(key []byte) (value []byte, err error) { - return t.Get(key, t.ro) -} - -func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.NewIterator(slice, t.ro) -} - -func (t *testingDB) TestClose() { - err := t.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - err = t.stor.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) -} - -func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB { - stor := testutil.NewStorage() - db, err := Open(stor, o) - // FIXME: This may be called from outside It, which may cause panic. - Expect(err).NotTo(HaveOccurred()) - return &testingDB{ - DB: db, - ro: ro, - wo: wo, - stor: stor, - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util.go b/kit/github.com/syndtr/goleveldb/leveldb/util.go deleted file mode 100644 index b53d6ea..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" -) - -func shorten(str string) string { - if len(str) <= 4 { - return str - } - return str[:1] + ".." + str[len(str)-1:] -} - -var bunits = [...]string{"", "Ki", "Mi", "Gi"} - -func shortenb(bytes int) string { - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%d%sB", bytes, bunits[i]) -} - -func sshortenb(bytes int) string { - if bytes == 0 { - return "~" - } - sign := "+" - if bytes < 0 { - sign = "-" - bytes *= -1 - } - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) -} - -func sint(x int) string { - if x == 0 { - return "~" - } - sign := "+" - if x < 0 { - sign = "-" - x *= -1 - } - return fmt.Sprintf("%s%d", sign, x) -} - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -type files []storage.File - -func (p files) Len() int { - return len(p) -} - -func (p files) Less(i, j int) bool { - return p[i].Num() < p[j].Num() -} - -func (p files) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func (p files) sort() { - sort.Sort(p) -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/kit/github.com/syndtr/goleveldb/leveldb/util/buffer.go deleted file mode 100644 index 21de242..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util/buffer.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -// This a copy of Go std bytes.Buffer with some modification -// and some features stripped. - -import ( - "bytes" - "io" -) - -// A Buffer is a variable-sized buffer of bytes with Read and Write methods. -// The zero value for Buffer is an empty buffer ready to use. -type Buffer struct { - buf []byte // contents are the bytes buf[off : len(buf)] - off int // read at &buf[off], write at &buf[len(buf)] - bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. -} - -// Bytes returns a slice of the contents of the unread portion of the buffer; -// len(b.Bytes()) == b.Len(). If the caller changes the contents of the -// returned slice, the contents of the buffer will change provided there -// are no intervening method calls on the Buffer. -func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } - -// String returns the contents of the unread portion of the buffer -// as a string. If the Buffer is a nil pointer, it returns "". -func (b *Buffer) String() string { - if b == nil { - // Special case, useful in debugging. - return "" - } - return string(b.buf[b.off:]) -} - -// Len returns the number of bytes of the unread portion of the buffer; -// b.Len() == len(b.Bytes()). -func (b *Buffer) Len() int { return len(b.buf) - b.off } - -// Truncate discards all but the first n unread bytes from the buffer. -// It panics if n is negative or greater than the length of the buffer. -func (b *Buffer) Truncate(n int) { - switch { - case n < 0 || n > b.Len(): - panic("leveldb/util.Buffer: truncation out of range") - case n == 0: - // Reuse buffer space. - b.off = 0 - } - b.buf = b.buf[0 : b.off+n] -} - -// Reset resets the buffer so it has no content. -// b.Reset() is the same as b.Truncate(0). -func (b *Buffer) Reset() { b.Truncate(0) } - -// grow grows the buffer to guarantee space for n more bytes. -// It returns the index where bytes should be written. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) grow(n int) int { - m := b.Len() - // If buffer is empty, reset to recover space. - if m == 0 && b.off != 0 { - b.Truncate(0) - } - if len(b.buf)+n > cap(b.buf) { - var buf []byte - if b.buf == nil && n <= len(b.bootstrap) { - buf = b.bootstrap[0:] - } else if m+n <= cap(b.buf)/2 { - // We can slide things down instead of allocating a new - // slice. We only need m+n <= cap(b.buf) to slide, but - // we instead let capacity get twice as large so we - // don't spend all our time copying. - copy(b.buf[:], b.buf[b.off:]) - buf = b.buf[:m] - } else { - // not enough space anywhere - buf = makeSlice(2*cap(b.buf) + n) - copy(buf, b.buf[b.off:]) - } - b.buf = buf - b.off = 0 - } - b.buf = b.buf[0 : b.off+m+n] - return b.off + m -} - -// Alloc allocs n bytes of slice from the buffer, growing the buffer as -// needed. If n is negative, Alloc will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Alloc(n int) []byte { - if n < 0 { - panic("leveldb/util.Buffer.Alloc: negative count") - } - m := b.grow(n) - return b.buf[m:] -} - -// Grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After Grow(n), at least n bytes can be written to the -// buffer without another allocation. -// If n is negative, Grow will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Grow(n int) { - if n < 0 { - panic("leveldb/util.Buffer.Grow: negative count") - } - m := b.grow(n) - b.buf = b.buf[0:m] -} - -// Write appends the contents of p to the buffer, growing the buffer as -// needed. The return value n is the length of p; err is always nil. If the -// buffer becomes too large, Write will panic with bytes.ErrTooLarge. -func (b *Buffer) Write(p []byte) (n int, err error) { - m := b.grow(len(p)) - return copy(b.buf[m:], p), nil -} - -// MinRead is the minimum slice size passed to a Read call by -// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond -// what is required to hold the contents of r, ReadFrom will not grow the -// underlying buffer. -const MinRead = 512 - -// ReadFrom reads data from r until EOF and appends it to the buffer, growing -// the buffer as needed. The return value n is the number of bytes read. Any -// error except io.EOF encountered during the read is also returned. If the -// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. -func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { - // If buffer is empty, reset to recover space. - if b.off >= len(b.buf) { - b.Truncate(0) - } - for { - if free := cap(b.buf) - len(b.buf); free < MinRead { - // not enough space at end - newBuf := b.buf - if b.off+free < MinRead { - // not enough space using beginning of buffer; - // double buffer capacity - newBuf = makeSlice(2*cap(b.buf) + MinRead) - } - copy(newBuf, b.buf[b.off:]) - b.buf = newBuf[:len(b.buf)-b.off] - b.off = 0 - } - m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) - b.buf = b.buf[0 : len(b.buf)+m] - n += int64(m) - if e == io.EOF { - break - } - if e != nil { - return n, e - } - } - return n, nil // err is EOF, so return nil explicitly -} - -// makeSlice allocates a slice of size n. If the allocation fails, it panics -// with bytes.ErrTooLarge. -func makeSlice(n int) []byte { - // If the make fails, give a known error. - defer func() { - if recover() != nil { - panic(bytes.ErrTooLarge) - } - }() - return make([]byte, n) -} - -// WriteTo writes data to w until the buffer is drained or an error occurs. -// The return value n is the number of bytes written; it always fits into an -// int, but it is int64 to match the io.WriterTo interface. Any error -// encountered during the write is also returned. -func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { - if b.off < len(b.buf) { - nBytes := b.Len() - m, e := w.Write(b.buf[b.off:]) - if m > nBytes { - panic("leveldb/util.Buffer.WriteTo: invalid Write count") - } - b.off += m - n = int64(m) - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != nBytes { - return n, io.ErrShortWrite - } - } - // Buffer is now empty; reset. - b.Truncate(0) - return -} - -// WriteByte appends the byte c to the buffer, growing the buffer as needed. -// The returned error is always nil, but is included to match bufio.Writer's -// WriteByte. If the buffer becomes too large, WriteByte will panic with -// bytes.ErrTooLarge. -func (b *Buffer) WriteByte(c byte) error { - m := b.grow(1) - b.buf[m] = c - return nil -} - -// Read reads the next len(p) bytes from the buffer or until the buffer -// is drained. The return value n is the number of bytes read. If the -// buffer has no data to return, err is io.EOF (unless len(p) is zero); -// otherwise it is nil. -func (b *Buffer) Read(p []byte) (n int, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - if len(p) == 0 { - return - } - return 0, io.EOF - } - n = copy(p, b.buf[b.off:]) - b.off += n - return -} - -// Next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -// If there are fewer than n bytes in the buffer, Next returns the entire buffer. -// The slice is only valid until the next call to a read or write method. -func (b *Buffer) Next(n int) []byte { - m := b.Len() - if n > m { - n = m - } - data := b.buf[b.off : b.off+n] - b.off += n - return data -} - -// ReadByte reads and returns the next byte from the buffer. -// If no byte is available, it returns error io.EOF. -func (b *Buffer) ReadByte() (c byte, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - return 0, io.EOF - } - c = b.buf[b.off] - b.off++ - return c, nil -} - -// ReadBytes reads until the first occurrence of delim in the input, -// returning a slice containing the data up to and including the delimiter. -// If ReadBytes encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadBytes returns err != nil if and only if the returned data does not end in -// delim. -func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { - slice, err := b.readSlice(delim) - // return a copy of slice. The buffer's backing array may - // be overwritten by later calls. - line = append(line, slice...) - return -} - -// readSlice is like ReadBytes but returns a reference to internal buffer data. -func (b *Buffer) readSlice(delim byte) (line []byte, err error) { - i := bytes.IndexByte(b.buf[b.off:], delim) - end := b.off + i + 1 - if i < 0 { - end = len(b.buf) - err = io.EOF - } - line = b.buf[b.off:end] - b.off = end - return line, err -} - -// NewBuffer creates and initializes a new Buffer using buf as its initial -// contents. It is intended to prepare a Buffer to read existing data. It -// can also be used to size the internal buffer for writing. To do that, -// buf should have the desired capacity but a length of zero. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/kit/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go deleted file mode 100644 index b04ff01..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build go1.3 - -package util - -import ( - "fmt" - "sync" - "sync/atomic" -) - -type buffer struct { - b []byte - miss int -} - -// BufferPool is a 'buffer pool'. -type BufferPool struct { - pool [4]sync.Pool - size [3]uint32 - sizeMiss [3]uint32 - baseline0 int - baseline1 int - baseline2 int - - get uint32 - put uint32 - less uint32 - equal uint32 - greater uint32 - miss uint32 -} - -func (p *BufferPool) poolNum(n int) int { - switch { - case n <= p.baseline0: - return 0 - case n <= p.baseline1: - return 1 - case n <= p.baseline2: - return 2 - default: - return 3 - } -} - -// Get returns buffer with length of n. -func (p *BufferPool) Get(n int) []byte { - atomic.AddUint32(&p.get, 1) - - if poolNum := p.poolNum(n); poolNum == 0 { - // Fast path. - if b, ok := p.pool[0].Get().([]byte); ok { - switch { - case cap(b) > n: - atomic.AddUint32(&p.less, 1) - return b[:n] - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - panic("not reached") - } - } else { - atomic.AddUint32(&p.miss, 1) - } - - return make([]byte, n, p.baseline0) - } else { - sizePtr := &p.size[poolNum-1] - - if b, ok := p.pool[poolNum].Get().([]byte); ok { - switch { - case cap(b) > n: - atomic.AddUint32(&p.less, 1) - return b[:n] - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { - p.pool[poolNum].Put(b) - } - } - } else { - atomic.AddUint32(&p.miss, 1) - } - - if size := atomic.LoadUint32(sizePtr); uint32(n) > size { - if size == 0 { - atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) - } else { - sizeMissPtr := &p.sizeMiss[poolNum-1] - if atomic.AddUint32(sizeMissPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(n)) - atomic.StoreUint32(sizeMissPtr, 0) - } - } - return make([]byte, n) - } else { - return make([]byte, n, size) - } - } -} - -// Put adds given buffer to the pool. -func (p *BufferPool) Put(b []byte) { - atomic.AddUint32(&p.put, 1) - p.pool[p.poolNum(cap(b))].Put(b) -} - -func (p *BufferPool) String() string { - return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v G·%d P·%d <·%d =·%d >·%d M·%d}", - p.baseline0, p.size, p.sizeMiss, p.get, p.put, p.less, p.equal, p.greater, p.miss) -} - -// NewBufferPool creates a new initialized 'buffer pool'. -func NewBufferPool(baseline int) *BufferPool { - if baseline <= 0 { - panic("baseline can't be <= 0") - } - return &BufferPool{ - baseline0: baseline, - baseline1: baseline * 2, - baseline2: baseline * 4, - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util/buffer_pool_legacy.go b/kit/github.com/syndtr/goleveldb/leveldb/util/buffer_pool_legacy.go deleted file mode 100644 index 368248e..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util/buffer_pool_legacy.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.3 - -package util - -import ( - "fmt" - "sync/atomic" -) - -type buffer struct { - b []byte - miss int -} - -// BufferPool is a 'buffer pool'. -type BufferPool struct { - pool [4]chan []byte - size [3]uint32 - sizeMiss [3]uint32 - baseline0 int - baseline1 int - baseline2 int - - get uint32 - put uint32 - less uint32 - equal uint32 - greater uint32 - miss uint32 -} - -func (p *BufferPool) poolNum(n int) int { - switch { - case n <= p.baseline0: - return 0 - case n <= p.baseline1: - return 1 - case n <= p.baseline2: - return 2 - default: - return 3 - } -} - -// Get returns buffer with length of n. -func (p *BufferPool) Get(n int) []byte { - atomic.AddUint32(&p.get, 1) - - poolNum := p.poolNum(n) - pool := p.pool[poolNum] - if poolNum == 0 { - // Fast path. - select { - case b := <-pool: - switch { - case cap(b) > n: - atomic.AddUint32(&p.less, 1) - return b[:n] - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - panic("not reached") - } - default: - atomic.AddUint32(&p.miss, 1) - } - - return make([]byte, n, p.baseline0) - } else { - sizePtr := &p.size[poolNum-1] - - select { - case b := <-pool: - switch { - case cap(b) > n: - atomic.AddUint32(&p.less, 1) - return b[:n] - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { - select { - case pool <- b: - default: - } - } - } - default: - atomic.AddUint32(&p.miss, 1) - } - - if size := atomic.LoadUint32(sizePtr); uint32(n) > size { - if size == 0 { - atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) - } else { - sizeMissPtr := &p.sizeMiss[poolNum-1] - if atomic.AddUint32(sizeMissPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(n)) - atomic.StoreUint32(sizeMissPtr, 0) - } - } - return make([]byte, n) - } else { - return make([]byte, n, size) - } - } -} - -// Put adds given buffer to the pool. -func (p *BufferPool) Put(b []byte) { - atomic.AddUint32(&p.put, 1) - - pool := p.pool[p.poolNum(cap(b))] - select { - case pool <- b: - default: - } - -} - -func (p *BufferPool) String() string { - return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v G·%d P·%d <·%d =·%d >·%d M·%d}", - p.baseline0, p.size, p.sizeMiss, p.get, p.put, p.less, p.equal, p.greater, p.miss) -} - -// NewBufferPool creates a new initialized 'buffer pool'. -func NewBufferPool(baseline int) *BufferPool { - if baseline <= 0 { - panic("baseline can't be <= 0") - } - p := &BufferPool{ - baseline0: baseline, - baseline1: baseline * 2, - baseline2: baseline * 4, - } - for i, cap := range []int{6, 6, 3, 1} { - p.pool[i] = make(chan []byte, cap) - } - return p -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/kit/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go deleted file mode 100644 index 87d9673..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -import ( - "bytes" - "io" - "math/rand" - "runtime" - "testing" -) - -const N = 10000 // make this bigger for a larger (and slower) test -var data string // test data for write tests -var testBytes []byte // test data; same as data but as a slice. - -func init() { - testBytes = make([]byte, N) - for i := 0; i < N; i++ { - testBytes[i] = 'a' + byte(i%26) - } - data = string(testBytes) -} - -// Verify that contents of buf match the string s. -func check(t *testing.T, testname string, buf *Buffer, s string) { - bytes := buf.Bytes() - str := buf.String() - if buf.Len() != len(bytes) { - t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) - } - - if buf.Len() != len(str) { - t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) - } - - if buf.Len() != len(s) { - t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) - } - - if string(bytes) != s { - t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) - } -} - -// Fill buf through n writes of byte slice fub. -// The initial contents of buf corresponds to the string s; -// the result is the final contents of buf returned as a string. -func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { - check(t, testname+" (fill 1)", buf, s) - for ; n > 0; n-- { - m, err := buf.Write(fub) - if m != len(fub) { - t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) - } - if err != nil { - t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) - } - s += string(fub) - check(t, testname+" (fill 4)", buf, s) - } - return s -} - -func TestNewBuffer(t *testing.T) { - buf := NewBuffer(testBytes) - check(t, "NewBuffer", buf, data) -} - -// Empty buf through repeated reads into fub. -// The initial contents of buf corresponds to the string s. -func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { - check(t, testname+" (empty 1)", buf, s) - - for { - n, err := buf.Read(fub) - if n == 0 { - break - } - if err != nil { - t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) - } - s = s[n:] - check(t, testname+" (empty 3)", buf, s) - } - - check(t, testname+" (empty 4)", buf, "") -} - -func TestBasicOperations(t *testing.T) { - var buf Buffer - - for i := 0; i < 5; i++ { - check(t, "TestBasicOperations (1)", &buf, "") - - buf.Reset() - check(t, "TestBasicOperations (2)", &buf, "") - - buf.Truncate(0) - check(t, "TestBasicOperations (3)", &buf, "") - - n, err := buf.Write([]byte(data[0:1])) - if n != 1 { - t.Errorf("wrote 1 byte, but n == %d", n) - } - if err != nil { - t.Errorf("err should always be nil, but err == %s", err) - } - check(t, "TestBasicOperations (4)", &buf, "a") - - buf.WriteByte(data[1]) - check(t, "TestBasicOperations (5)", &buf, "ab") - - n, err = buf.Write([]byte(data[2:26])) - if n != 24 { - t.Errorf("wrote 25 bytes, but n == %d", n) - } - check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) - - buf.Truncate(26) - check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) - - buf.Truncate(20) - check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) - - empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) - empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) - - buf.WriteByte(data[1]) - c, err := buf.ReadByte() - if err != nil { - t.Error("ReadByte unexpected eof") - } - if c != data[1] { - t.Errorf("ReadByte wrong value c=%v", c) - } - c, err = buf.ReadByte() - if err == nil { - t.Error("ReadByte unexpected not eof") - } - } -} - -func TestLargeByteWrites(t *testing.T) { - var buf Buffer - limit := 30 - if testing.Short() { - limit = 9 - } - for i := 3; i < limit; i += 3 { - s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) - empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) - } - check(t, "TestLargeByteWrites (3)", &buf, "") -} - -func TestLargeByteReads(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) - } - check(t, "TestLargeByteReads (3)", &buf, "") -} - -func TestMixedReadsAndWrites(t *testing.T) { - var buf Buffer - s := "" - for i := 0; i < 50; i++ { - wlen := rand.Intn(len(data)) - s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) - rlen := rand.Intn(len(data)) - fub := make([]byte, rlen) - n, _ := buf.Read(fub) - s = s[n:] - } - empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) -} - -func TestNil(t *testing.T) { - var b *Buffer - if b.String() != "" { - t.Errorf("expected ; got %q", b.String()) - } -} - -func TestReadFrom(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - b.ReadFrom(&buf) - empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) - } -} - -func TestWriteTo(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - buf.WriteTo(&b) - empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) - } -} - -func TestNext(t *testing.T) { - b := []byte{0, 1, 2, 3, 4} - tmp := make([]byte, 5) - for i := 0; i <= 5; i++ { - for j := i; j <= 5; j++ { - for k := 0; k <= 6; k++ { - // 0 <= i <= j <= 5; 0 <= k <= 6 - // Check that if we start with a buffer - // of length j at offset i and ask for - // Next(k), we get the right bytes. - buf := NewBuffer(b[0:j]) - n, _ := buf.Read(tmp[0:i]) - if n != i { - t.Fatalf("Read %d returned %d", i, n) - } - bb := buf.Next(k) - want := k - if want > j-i { - want = j - i - } - if len(bb) != want { - t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) - } - for l, v := range bb { - if v != byte(l+i) { - t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) - } - } - } - } - } -} - -var readBytesTests = []struct { - buffer string - delim byte - expected []string - err error -}{ - {"", 0, []string{""}, io.EOF}, - {"a\x00", 0, []string{"a\x00"}, nil}, - {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, - {"hello\x01world", 1, []string{"hello\x01"}, nil}, - {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, - {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, - {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, -} - -func TestReadBytes(t *testing.T) { - for _, test := range readBytesTests { - buf := NewBuffer([]byte(test.buffer)) - var err error - for _, expected := range test.expected { - var bytes []byte - bytes, err = buf.ReadBytes(test.delim) - if string(bytes) != expected { - t.Errorf("expected %q, got %q", expected, bytes) - } - if err != nil { - break - } - } - if err != test.err { - t.Errorf("expected error %v, got %v", test.err, err) - } - } -} - -func TestGrow(t *testing.T) { - x := []byte{'x'} - y := []byte{'y'} - tmp := make([]byte, 72) - for _, startLen := range []int{0, 100, 1000, 10000, 100000} { - xBytes := bytes.Repeat(x, startLen) - for _, growLen := range []int{0, 100, 1000, 10000, 100000} { - buf := NewBuffer(xBytes) - // If we read, this affects buf.off, which is good to test. - readBytes, _ := buf.Read(tmp) - buf.Grow(growLen) - yBytes := bytes.Repeat(y, growLen) - // Check no allocation occurs in write, as long as we're single-threaded. - var m1, m2 runtime.MemStats - runtime.ReadMemStats(&m1) - buf.Write(yBytes) - runtime.ReadMemStats(&m2) - if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { - t.Errorf("allocation occurred during write") - } - // Check that buffer has correct data. - if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { - t.Errorf("bad initial data at %d %d", startLen, growLen) - } - if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { - t.Errorf("bad written data at %d %d", startLen, growLen) - } - } - } -} - -// Was a bug: used to give EOF reading empty slice at EOF. -func TestReadEmptyAtEOF(t *testing.T) { - b := new(Buffer) - slice := make([]byte, 0) - n, err := b.Read(slice) - if err != nil { - t.Errorf("read error: %v", err) - } - if n != 0 { - t.Errorf("wrong count; got %d want 0", n) - } -} - -// Tests that we occasionally compact. Issue 5154. -func TestBufferGrowth(t *testing.T) { - var b Buffer - buf := make([]byte, 1024) - b.Write(buf[0:1]) - var cap0 int - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - if i == 0 { - cap0 = cap(b.buf) - } - } - cap1 := cap(b.buf) - // (*Buffer).grow allows for 2x capacity slop before sliding, - // so set our error threshold at 3x. - if cap1 > cap0*3 { - t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) - } -} - -// From Issue 5154. -func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf[0:1]) - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - } - } -} - -// Check that we don't compact too often. From Issue 5154. -func BenchmarkBufferFullSmallReads(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf) - for b.Len()+20 < cap(b.buf) { - b.Write(buf[:10]) - } - for i := 0; i < 5<<10; i++ { - b.Read(buf[:1]) - b.Write(buf[:1]) - } - } -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/kit/github.com/syndtr/goleveldb/leveldb/util/crc32.go deleted file mode 100644 index 631c9d6..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util/crc32.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "hash/crc32" -) - -var table = crc32.MakeTable(crc32.Castagnoli) - -// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. -type CRC uint32 - -// NewCRC creates a new crc based on the given bytes. -func NewCRC(b []byte) CRC { - return CRC(0).Update(b) -} - -// Update updates the crc with the given bytes. -func (c CRC) Update(b []byte) CRC { - return CRC(crc32.Update(uint32(c), table, b)) -} - -// Value returns a masked crc. -func (c CRC) Value() uint32 { - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util/hash.go b/kit/github.com/syndtr/goleveldb/leveldb/util/hash.go deleted file mode 100644 index 5490366..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util/hash.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "bytes" - "encoding/binary" -) - -// Hash return hash of the given data. -func Hash(data []byte, seed uint32) uint32 { - // Similar to murmur hash - var m uint32 = 0xc6a4a793 - var r uint32 = 24 - h := seed ^ (uint32(len(data)) * m) - - buf := bytes.NewBuffer(data) - for buf.Len() >= 4 { - var w uint32 - binary.Read(buf, binary.LittleEndian, &w) - h += w - h *= m - h ^= (h >> 16) - } - - rest := buf.Bytes() - switch len(rest) { - default: - panic("not reached") - case 3: - h += uint32(rest[2]) << 16 - fallthrough - case 2: - h += uint32(rest[1]) << 8 - fallthrough - case 1: - h += uint32(rest[0]) - h *= m - h ^= (h >> r) - case 0: - } - - return h -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util/pool.go b/kit/github.com/syndtr/goleveldb/leveldb/util/pool.go deleted file mode 100644 index 1f7fdd4..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util/pool.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build go1.3 - -package util - -import ( - "sync" -) - -type Pool struct { - sync.Pool -} - -func NewPool(cap int) *Pool { - return &Pool{} -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/kit/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go deleted file mode 100644 index 27b8d03..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.3 - -package util - -type Pool struct { - pool chan interface{} -} - -func (p *Pool) Get() interface{} { - select { - case x := <-p.pool: - return x - default: - return nil - } -} - -func (p *Pool) Put(x interface{}) { - select { - case p.pool <- x: - default: - } -} - -func NewPool(cap int) *Pool { - return &Pool{pool: make(chan interface{}, cap)} -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util/range.go b/kit/github.com/syndtr/goleveldb/leveldb/util/range.go deleted file mode 100644 index da05831..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util/range.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -// Range is a key range. -type Range struct { - // Start of the key range, include in the range. - Start []byte - - // Limit of the key range, not include in the range. - Limit []byte -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/util/util.go b/kit/github.com/syndtr/goleveldb/leveldb/util/util.go deleted file mode 100644 index 229c7d4..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/util/util.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package util provides utilities used throughout leveldb. -package util - -import ( - "errors" -) - -var ( - ErrNotFound = errors.New("leveldb: not found") -) - -// Releaser is the interface that wraps the basic Release method. -type Releaser interface { - // Release releases associated resources. Release should always success - // and can be called multipe times without causing error. - Release() -} - -// ReleaseSetter is the interface that wraps the basic SetReleaser method. -type ReleaseSetter interface { - // SetReleaser associates the given releaser to the resources. The - // releaser will be called once coresponding resources released. - // Calling SetReleaser with nil will clear the releaser. - SetReleaser(releaser Releaser) -} - -// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. -type BasicReleaser struct { - releaser Releaser -} - -// Release implements Releaser.Release. -func (r *BasicReleaser) Release() { - if r.releaser != nil { - r.releaser.Release() - r.releaser = nil - } -} - -// SetReleaser implements ReleaseSetter.SetReleaser. -func (r *BasicReleaser) SetReleaser(releaser Releaser) { - r.releaser = releaser -} diff --git a/kit/github.com/syndtr/goleveldb/leveldb/version.go b/kit/github.com/syndtr/goleveldb/leveldb/version.go deleted file mode 100644 index d465ea1..0000000 --- a/kit/github.com/syndtr/goleveldb/leveldb/version.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "sync/atomic" - "unsafe" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/util" -) - -var levelMaxSize [kNumLevels]float64 - -func init() { - // Precompute max size of each level - for level := range levelMaxSize { - res := float64(10 * 1048576) - for n := level; n > 1; n-- { - res *= 10 - } - levelMaxSize[level] = res - } -} - -type tSet struct { - level int - table *tFile -} - -type version struct { - s *session - - tables [kNumLevels]tFiles - - // Level that should be compacted next and its compaction score. - // Score < 1 means compaction is not strictly needed. These fields - // are initialized by computeCompaction() - cLevel int - cScore float64 - - cSeek unsafe.Pointer - - ref int - next *version -} - -func (v *version) release_NB() { - v.ref-- - if v.ref > 0 { - return - } - if v.ref < 0 { - panic("negative version ref") - } - - tables := make(map[uint64]bool) - for _, tt := range v.next.tables { - for _, t := range tt { - num := t.file.Num() - tables[num] = true - } - } - - for _, tt := range v.tables { - for _, t := range tt { - num := t.file.Num() - if _, ok := tables[num]; !ok { - v.s.tops.remove(t) - } - } - } - - v.next.release_NB() - v.next = nil -} - -func (v *version) release() { - v.s.vmu.Lock() - v.release_NB() - v.s.vmu.Unlock() -} - -func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) { - ukey := ikey.ukey() - - // Walk tables level-by-level. - for level, tables := range v.tables { - if len(tables) == 0 { - continue - } - - if level == 0 { - // Level-0 files may overlap each other. Find all files that - // overlap ukey. - for _, t := range tables { - if t.overlaps(v.s.icmp, ukey, ukey) { - if !f(level, t) { - return - } - } - } - } else { - if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { - t := tables[i] - if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - if !f(level, t) { - return - } - } - } - } - - if lf != nil && !lf(level) { - return - } - } -} - -func (v *version) get(ikey iKey, ro *opt.ReadOptions) (value []byte, tcomp bool, err error) { - ukey := ikey.ukey() - - var ( - tset *tSet - tseek bool - - l0found bool - l0seq uint64 - l0vt vType - l0val []byte - ) - - err = ErrNotFound - - // Since entries never hope across level, finding key/value - // in smaller level make later levels irrelevant. - v.walkOverlapping(ikey, func(level int, t *tFile) bool { - if !tseek { - if tset == nil { - tset = &tSet{level, t} - } else if tset.table.consumeSeek() <= 0 { - tseek = true - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - } - - ikey__, val_, err_ := v.s.tops.find(t, ikey, ro) - switch err_ { - case nil: - case ErrNotFound: - return true - default: - err = err_ - return false - } - - ikey_ := iKey(ikey__) - if seq, vt, ok := ikey_.parseNum(); ok { - if v.s.icmp.uCompare(ukey, ikey_.ukey()) != 0 { - return true - } - - if level == 0 { - if seq >= l0seq { - l0found = true - l0seq = seq - l0vt = vt - l0val = val_ - } - } else { - switch vt { - case tVal: - value = val_ - err = nil - case tDel: - default: - panic("leveldb: invalid internal key type") - } - return false - } - } else { - err = errors.New("leveldb: internal key corrupted") - return false - } - - return true - }, func(level int) bool { - if l0found { - switch l0vt { - case tVal: - value = l0val - err = nil - case tDel: - default: - panic("leveldb: invalid internal key type") - } - return false - } - - return true - }) - - return -} - -func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { - // Merge all level zero files together since they may overlap - for _, t := range v.tables[0] { - it := v.s.tops.newIterator(t, slice, ro) - its = append(its, it) - } - - strict := v.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator) - for _, tables := range v.tables[1:] { - if len(tables) == 0 { - continue - } - - it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict, true) - its = append(its, it) - } - - return -} - -func (v *version) newStaging() *versionStaging { - return &versionStaging{base: v} -} - -// Spawn a new version based on this version. -func (v *version) spawn(r *sessionRecord) *version { - staging := v.newStaging() - staging.commit(r) - return staging.finish() -} - -func (v *version) fillRecord(r *sessionRecord) { - for level, ts := range v.tables { - for _, t := range ts { - r.addTableFile(level, t) - } - } -} - -func (v *version) tLen(level int) int { - return len(v.tables[level]) -} - -func (v *version) offsetOf(ikey iKey) (n uint64, err error) { - for level, tables := range v.tables { - for _, t := range tables { - if v.s.icmp.Compare(t.imax, ikey) <= 0 { - // Entire file is before "ikey", so just add the file size - n += t.size - } else if v.s.icmp.Compare(t.imin, ikey) > 0 { - // Entire file is after "ikey", so ignore - if level > 0 { - // Files other than level 0 are sorted by meta->min, so - // no further files in this level will contain data for - // "ikey". - break - } - } else { - // "ikey" falls in the range for this table. Add the - // approximate offset of "ikey" within the table. - var nn uint64 - nn, err = v.s.tops.offsetOf(t, ikey) - if err != nil { - return 0, err - } - n += nn - } - } - } - - return -} - -func (v *version) pickLevel(umin, umax []byte) (level int) { - if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) { - var overlaps tFiles - for ; level < kMaxMemCompactLevel; level++ { - if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) { - break - } - overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false) - if overlaps.size() > kMaxGrandParentOverlapBytes { - break - } - } - } - - return -} - -func (v *version) computeCompaction() { - // Precomputed best level for next compaction - var bestLevel int = -1 - var bestScore float64 = -1 - - for level, tables := range v.tables { - var score float64 - if level == 0 { - // We treat level-0 specially by bounding the number of files - // instead of number of bytes for two reasons: - // - // (1) With larger write-buffer sizes, it is nice not to do too - // many level-0 compactions. - // - // (2) The files in level-0 are merged on every read and - // therefore we wish to avoid too many files when the individual - // file size is small (perhaps because of a small write-buffer - // setting, or very high compression ratios, or lots of - // overwrites/deletions). - score = float64(len(tables)) / kL0_CompactionTrigger - } else { - score = float64(tables.size()) / levelMaxSize[level] - } - - if score > bestScore { - bestLevel = level - bestScore = score - } - } - - v.cLevel = bestLevel - v.cScore = bestScore -} - -func (v *version) needCompaction() bool { - return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil -} - -type versionStaging struct { - base *version - tables [kNumLevels]struct { - added map[uint64]ntRecord - deleted map[uint64]struct{} - } -} - -func (p *versionStaging) commit(r *sessionRecord) { - // Deleted tables. - for _, r := range r.deletedTables { - tm := &(p.tables[r.level]) - - if len(p.base.tables[r.level]) > 0 { - if tm.deleted == nil { - tm.deleted = make(map[uint64]struct{}) - } - tm.deleted[r.num] = struct{}{} - } - - if tm.added != nil { - delete(tm.added, r.num) - } - } - - // New tables. - for _, r := range r.addedTables { - tm := &(p.tables[r.level]) - - if tm.added == nil { - tm.added = make(map[uint64]ntRecord) - } - tm.added[r.num] = r - - if tm.deleted != nil { - delete(tm.deleted, r.num) - } - } -} - -func (p *versionStaging) finish() *version { - // Build new version. - nv := &version{s: p.base.s} - for level, tm := range p.tables { - btables := p.base.tables[level] - - n := len(btables) + len(tm.added) - len(tm.deleted) - if n < 0 { - n = 0 - } - nt := make(tFiles, 0, n) - - // Base tables. - for _, t := range btables { - if _, ok := tm.deleted[t.file.Num()]; ok { - continue - } - if _, ok := tm.added[t.file.Num()]; ok { - continue - } - nt = append(nt, t) - } - - // New tables. - for _, r := range tm.added { - nt = append(nt, r.makeFile(p.base.s)) - } - - // Sort tables. - if level == 0 { - nt.sortByNum() - } else { - nt.sortByKey(p.base.s.icmp) - } - nv.tables[level] = nt - } - - // Compute compaction score for new version. - nv.computeCompaction() - - return nv -} - -type versionReleaser struct { - v *version - once bool -} - -func (vr *versionReleaser) Release() { - v := vr.v - v.s.vmu.Lock() - if !vr.once { - v.release_NB() - vr.once = true - } - v.s.vmu.Unlock() -} diff --git a/kit/github.com/syndtr/goleveldb/manualtest/filelock/main.go b/kit/github.com/syndtr/goleveldb/manualtest/filelock/main.go deleted file mode 100644 index 648eb50..0000000 --- a/kit/github.com/syndtr/goleveldb/manualtest/filelock/main.go +++ /dev/null @@ -1,85 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "os" - "os/exec" - "path/filepath" - - "github.com/gocircuit/escher/kit/github.com/syndtr/goleveldb/leveldb/storage" -) - -var ( - filename string - child bool -) - -func init() { - flag.StringVar(&filename, "filename", filepath.Join(os.TempDir(), "goleveldb_filelock_test"), "Filename used for testing") - flag.BoolVar(&child, "child", false, "This is the child") -} - -func runChild() error { - var args []string - args = append(args, os.Args[1:]...) - args = append(args, "-child") - cmd := exec.Command(os.Args[0], args...) - var out bytes.Buffer - cmd.Stdout = &out - err := cmd.Run() - r := bufio.NewReader(&out) - for { - line, _, e1 := r.ReadLine() - if e1 != nil { - break - } - fmt.Println("[Child]", string(line)) - } - return err -} - -func main() { - flag.Parse() - - fmt.Printf("Using path: %s\n", filename) - if child { - fmt.Println("Child flag set.") - } - - stor, err := storage.OpenFile(filename) - if err != nil { - fmt.Printf("Could not open storage: %s", err) - os.Exit(10) - } - - if !child { - fmt.Println("Executing child -- first test (expecting error)") - err := runChild() - if err == nil { - fmt.Println("Expecting error from child") - } else if err.Error() != "exit status 10" { - fmt.Println("Got unexpected error from child:", err) - } else { - fmt.Printf("Got error from child: %s (expected)\n", err) - } - } - - err = stor.Close() - if err != nil { - fmt.Printf("Error when closing storage: %s", err) - os.Exit(11) - } - - if !child { - fmt.Println("Executing child -- second test") - err := runChild() - if err != nil { - fmt.Println("Got unexpected error from child:", err) - } - } - - os.RemoveAll(filename) -} diff --git a/kit/io/sovereign.go b/kit/io/sovereign.go index d9c0a2c..7cc2408 100644 --- a/kit/io/sovereign.go +++ b/kit/io/sovereign.go @@ -102,7 +102,7 @@ func (x *nopWriteCloser) Close() error { return nil } -// RunOnCloseReader returns an io.ReadCloser which +// RunOnCloseReader returns an io.ReadCloser which // executes run once, on the first call to Close. func RunOnCloseReader(x io.Reader, run CloseFunc) io.ReadCloser { return &runOnCloseReader{run: run, Reader: x} @@ -124,7 +124,7 @@ func (x *runOnCloseReader) Close() (err error) { return } -// RunOnCloseWrite returns an io.WriteCloser which +// RunOnCloseWrite returns an io.WriteCloser which // executes run once, on the first call to Close. func RunOnCloseWriter(x io.Writer, run CloseFunc) io.WriteCloser { return &runOnCloseWriter{run: run, Writer: x} diff --git a/kit/plumb/client.go b/kit/plumb/client.go index c24493a..251c0f9 100644 --- a/kit/plumb/client.go +++ b/kit/plumb/client.go @@ -9,7 +9,7 @@ package plumb import "sync" type Client struct { - req chan chan<- interface{} + req chan chan<- interface{} recognize func(interface{}) sync.Mutex } diff --git a/misc/img/main.svg b/misc/img/main.svg index c24c6ec..4c77164 100644 --- a/misc/img/main.svg +++ b/misc/img/main.svg @@ -1,4 +1,231 @@ - - - - + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/scripts/build_handbook.sh b/scripts/build_handbook.sh new file mode 100755 index 0000000..594af02 --- /dev/null +++ b/scripts/build_handbook.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +# Builds the Escher handbook. +# NOTE +# * Requires the `escher` command available on the PATH. +# * Requires the `inkscape` command available on the PATH. +# * Requires the AWK script `svg_hide_group.awk`, +# which should already be in the same directory as this script. + +# Exit immediately on each error and unset variable; +# see: https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/ +#set -Eeuo pipefail +set -Eeu + +script_dir=$(dirname "$(readlink -f "${BASH_SOURCE[0]}")") +repo_root="$(cd $script_dir; cd ..; pwd)" +# NOTE We do not use this path, +# even though it would make the script position independent, +# because it would break (or worse: run the wrong code) +# when working on a fork of the repository. +#src_dir="$GOPATH/src/github.com/hoijui/escher/src/" +# This way of defning src_dir ensures that we can use relative paths, +# while the script may still be called from anywhere, +# as long as the sources are to be found +# under the same relative path within the escher repo. +escher_src_dir="$repo_root/src" +svg_hide="$script_dir/svg_hide_group.awk" +src_dir="$escher_src_dir/handbook" +export ESCHER="$escher_src_dir" + +rel_out_dir="${1:-}" +if [ "$rel_out_dir" = "" ] +then + >&2 echo "Please supply a directory path to build the handbook in." + exit 1 +fi +out_dir=$(mkdir -p "$rel_out_dir"; cd "$rel_out_dir"; pwd) +if [ "$out_dir" = "$repo_root" ] +then + >&2 echo "Please supply an output directory different then the escher repo root." + exit 1 +fi +if [ "$out_dir" = "$src_dir" ] +then + >&2 echo "Please supply an output directory different then the hanbook source directory." + exit 1 +fi + +echo "Building the handbook in '$out_dir'; press Ctrl+C to abort" +echo "Waiting 3 seconds ..." +sleep 3 + +echo +echo "Removing previous build artifacts ..." +rm -Rf "$out_dir/img" +rm -Rf "$out_dir/css" +rm -Rf "$out_dir/pdf" +rm -f "$out_dir/"*.html + +echo +echo "Copying assets from sources to output directory ..." +echo -e "\tcss ..." +cp -r "$src_dir/css" "$out_dir/" +echo -e "\timg ..." +cp -r "$src_dir/img" "$out_dir/" +echo -e "\tpdf ..." +cp -r "$src_dir/pdf" "$out_dir/" +rm -f "$out_dir/css/font/.gitignore" + +echo +echo "Generating different views of a \"packed\" SVG (using AWK) ..." +svg_in="$src_dir/img/circuit.svg" + +svg_out="$out_dir/img/circuit-parts-generated.svg" +echo -e "\t\"$svg_in\" --> \"$svg_out\"" +cat "$svg_in" | awk \ + -v label_regex=labels-instances -v do_show=0 -f "$svg_hide" \ + > "$svg_out" + +svg_out="$out_dir/img/circuit-instances-generated.svg" +echo -e "\t\"$svg_in\" --> \"$svg_out\"" +cat "$svg_in" | awk \ + -v label_regex=labels-parts -v do_show=0 -f "$svg_hide" \ + > "$svg_out" + +svg_out="$out_dir/img/circuit-raw-generated.svg" +echo -e "\t\"$svg_in\" --> \"$svg_out\"" +cat "$svg_in" | awk \ + -v label_regex='labels-.*' -v do_show=0 -f "$svg_hide" \ + > "$svg_out" + +echo +echo "Generate widely-compatible versions of our SVG images (using inkscape) ..." +# This makes the generated versions be +# not just Inkscape compatible, +# but display correctly everywhere. +for svg_in in "$out_dir/img/"*-generated.svg +do + svg_out=$(echo "$svg_in" | sed -e 's|-generated\.svg$|-plain-generated.svg|') + echo -e "\t\"$svg_in\" --> \"$svg_out\"" + inkscape --without-gui "$svg_in" --export-text-to-path --export-plain-svg "$svg_out" + rm "$svg_in" +done + +echo +echo "Convert SVGs to PNGs (using inkscape) ..." +for svg in "$out_dir/img/"*.svg +do + png=$(echo "$svg" | sed -e 's|\(-plain\)\?\(-generated\)\?\.svg$|.png|') + echo -e "\t\"$svg\" --> \"$png\"" + inkscape --without-gui "$svg" --export-png "$png" > /dev/null +done + +echo +echo "Building the handbook (using escher) ..." +escher "*handbook.main" + +echo +echo "done." diff --git a/scripts/svg_hide_group.awk b/scripts/svg_hide_group.awk new file mode 100644 index 0000000..f4eef6c --- /dev/null +++ b/scripts/svg_hide_group.awk @@ -0,0 +1,88 @@ +# Hide or show a an SVG group (tag name `g`) +# Example call: +# cat in.svg | awk \ +# -v label_regex=mySvgGroupsLabel \ +# -v do_show=0 \ +# -f svg_hide_group.awk \ +# > "out.svg" +# +# Example SVG diff: +# +#+ style="display:none"> + +BEGIN { + in_group_tag=0 + id=-1 + inkscape_label=-1 + in_chosen_element=0 + modified_style=0 + if (length(label_regex) == 0) { + print("Please set the objects inkscape:label (regex) to look for with '-v label_regex=\"my-label\"'") > "/dev/stderr" + exit(52) + } + if (length(do_show) == 0) { + print("Please set action to take (0=hide, 1=show) with '-v do_show=1'") > "/dev/stderr" + exit(53) + } + if (do_show) { + display_style_replace="inline" + } else { + display_style_replace="none" + } +} + +match($0, /[ \t]+/ { + if (in_group_tag) { + in_group_tag=0 + if (in_chosen_element && !modified_style) { + indent=$0 + sub(/[^ \t].*/, "", indent) + sub(/>/, "\n" indent "style=\"display:" display_style_replace "\">", $0) + } + } + in_chosen_element=0 +} + +{ + print($0) +} diff --git a/scripts/tests.sh b/scripts/tests.sh new file mode 100755 index 0000000..5d218c7 --- /dev/null +++ b/scripts/tests.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Runs all the go and Escher (unit-)tests. +# NOTE +# * Requires the `escher` command available on the PATH. + +# Exit immediately on each error and unset variable; +# see: https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/ +#set -Eeuo pipefail +set -Eeu + +script_dir=$(dirname "$(readlink -f "${BASH_SOURCE[0]}")") +repo_root="$(cd $script_dir; cd ..; pwd)" +# NOTE We do not use this path, +# even though it would make the script position independent, +# because it would break (or worse: run the wrong code) +# when working on a fork of the repository. +#src_dir="$GOPATH/src/github.com/hoijui/escher/src/" +# This way of defning src_dir ensures that we can use relative paths, +# while the script may still be called from anywhere, +# as long as the sources are to be found +# under the same relative path within the escher repo. +src_dir="$repo_root/src" + +which escher > /dev/null +if [ $? -ne 0 ] +then + >&2 echo "Error: Could not find 'escher' in PATH" + exit 1 +fi + +echo +echo "Running Go(lang) tests ..." +cd "$repo_root" +for go_test in $(find -name "*_test.go") +do + test_dir=$(dirname "$go_test") + cd "$test_dir" + + echo + echo "GO TESTS $go_test ..." + go test + cd "$repo_root" +done + +echo +echo "Running Escher tests ..." +ESCHER=$src_dir escher "*test.All" + +echo +echo "done." diff --git a/scripts/tutorials.sh b/scripts/tutorials.sh new file mode 100755 index 0000000..58e9c76 --- /dev/null +++ b/scripts/tutorials.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# Runs all the Escher tutorials. +# NOTE +# * Requires the `escher` command available on the PATH. + +# Exit immediately on each error and unset variable; +# see: https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/ +#set -Eeuo pipefail +set -Eeu + +script_dir=$(dirname "$(readlink -f "${BASH_SOURCE[0]}")") +repo_root="$(cd $script_dir; cd ..; pwd)" +# NOTE We do not use this path, +# even though it would make the script position independent, +# because it would break (or worse: run the wrong code) +# when working on a fork of the repository. +#src_dir="$GOPATH/src/github.com/hoijui/escher/src/" +# This way of defning src_dir ensures that we can use relative paths, +# while the script may still be called from anywhere, +# as long as the sources are to be found +# under the same relative path within the escher repo. +src_dir="$repo_root/src" +tutorials_dir="$src_dir/tutorial" + +which escher > /dev/null +if [ $? -ne 0 ] +then + >&2 echo "Error: Could not find 'escher' in PATH" + exit 1 +fi + +cd "$repo_root" + +if [ "${1:-}" = "" ] +then + find "$tutorials_dir" -regex '.*/[A-Z][^/]*.escher' > /dev/null + if [ $? -ne 0 ] + then + >&2 echo "Error: No tutorials found in '$(pwd)/$tutorials_dir'." + exit 2 + fi + + tutorial_circuits=$(find "$tutorials_dir" -regex '.*/[A-Z][^/]*.escher' \ + | xargs basename --multiple --suffix '.escher') +else + tutorial_circuits="$1" +fi +export ESCHER="$src_dir" + +for circuit in $tutorial_circuits +do + echo + echo + echo "################################################################################" + echo "### Running Escher tutorial $circuit ..." + echo "--------------------------------------------------------------------------------" + src_file="${ESCHER}/tutorial/${circuit}.escher" + main_address="tutorial.${circuit}Main" + meant_to_fail=$(cat "$src_file" | grep -q -e 'MEANT_TO_FAIL' && echo "true" || echo "false") + ## run each tutorial for at most 2 seconds + #timeout --foreground --kill-after=2 --signal=SIGINT 3s \ + if $meant_to_fail + then + ! escher "*$main_address" + else + escher "*$main_address" + fi + echo + echo "################################################################################" +done + diff --git a/see/circuit.go b/see/circuit.go index c3e4167..ebbe3e3 100644 --- a/see/circuit.go +++ b/see/circuit.go @@ -7,14 +7,11 @@ package see import ( - // "log" - // "fmt" - - . "github.com/gocircuit/escher/a" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/a" + cir "github.com/hoijui/escher/circuit" ) -func SeeCircuit(src *Src) (v Value) { +func SeeCircuit(src *a.Src) (v cir.Value) { defer func() { if r := recover(); r != nil { v = nil @@ -31,13 +28,13 @@ func SeeCircuit(src *Src) (v Value) { return } -func SeeChamber(src *Src) (v Value) { +func SeeChamber(src *a.Src) (v cir.Value) { defer func() { if r := recover(); r != nil { v = nil } }() - u := New() + u := cir.New() t := src.Copy() Space(t) var j int diff --git a/see/comment.go b/see/comment.go index 8f42877..9a525bc 100644 --- a/see/comment.go +++ b/see/comment.go @@ -7,17 +7,17 @@ package see import ( - . "github.com/gocircuit/escher/a" + "github.com/hoijui/escher/a" ) -func SpaceNoNewline(src *Src) { - if len(Whitespace(src)) > 0 { +func SpaceNoNewline(src *a.Src) { + if len(a.Whitespace(src)) > 0 { return } panic("whitespace") } -func Space(src *Src) (newLine bool) { +func Space(src *a.Src) (newLine bool) { for endOfLine(src) { newLine = true } @@ -27,7 +27,7 @@ func Space(src *Src) (newLine bool) { return } -func endOfLine(src *Src) bool { - Whitespace(src) - return len(src.Consume(IsCommaOrSemicolonOrNewline)) > 0 +func endOfLine(src *a.Src) bool { + a.Whitespace(src) + return len(src.Consume(a.IsCommaOrSemicolonOrNewline)) > 0 } diff --git a/see/match.go b/see/match.go index 882141e..efaff7e 100644 --- a/see/match.go +++ b/see/match.go @@ -7,24 +7,22 @@ package see import ( - // "fmt" - - . "github.com/gocircuit/escher/a" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/a" + cir "github.com/hoijui/escher/circuit" ) type Carry struct { - Name - Value + cir.Name + cir.Value } -func SeeLink(src *Src, nsugar int) (x []Vector, carry []*Carry) { +func SeeLink(src *a.Src, nsugar int) (x []cir.Vector, carry []*Carry) { defer func() { if r := recover(); r != nil { x = nil } }() - x, carry = make([]Vector, 2), make([]*Carry, 2) + x, carry = make([]cir.Vector, 2), make([]*Carry, 2) t := src.Copy() Space(t) // @@ -34,15 +32,15 @@ func SeeLink(src *Src, nsugar int) (x []Vector, carry []*Carry) { } if g != nil { carry[0] = &Carry{nsugar, g} - x[0] = Vector{nsugar, DefaultValve} + x[0] = cir.Vector{nsugar, cir.DefaultValve} nsugar++ } else { - x[0] = Vector{p, v} + x[0] = cir.Vector{p, v} } // - Whitespace(t) + a.Whitespace(t) t.Match("=") - Whitespace(t) + a.Whitespace(t) // g, p, v, ok = seeEndpoint(t) if !ok { @@ -50,10 +48,10 @@ func SeeLink(src *Src, nsugar int) (x []Vector, carry []*Carry) { } if g != nil { carry[1] = &Carry{nsugar, g} - x[1] = Vector{nsugar, DefaultValve} + x[1] = cir.Vector{nsugar, cir.DefaultValve} nsugar++ } else { - x[1] = Vector{p, v} + x[1] = cir.Vector{p, v} } // if !Space(t) { // require newline at end @@ -63,7 +61,7 @@ func SeeLink(src *Src, nsugar int) (x []Vector, carry []*Carry) { return } -func seeEndpoint(src *Src) (m Value, p, v Name, ok bool) { +func seeEndpoint(src *a.Src) (m cir.Value, p, v cir.Name, ok bool) { if p, v, ok = seeNameEndpoint(src); ok { // valve (or empty string) return } @@ -71,7 +69,7 @@ func seeEndpoint(src *Src) (m Value, p, v Name, ok bool) { return } -func seeValueEndpoint(src *Src) (m Value, ok bool) { +func seeValueEndpoint(src *a.Src) (m cir.Value, ok bool) { defer func() { if r := recover(); r != nil { ok = false @@ -83,7 +81,7 @@ func seeValueEndpoint(src *Src) (m Value, ok bool) { return m, true } -func seeNameEndpoint(src *Src) (gate, valve Name, ok bool) { +func seeNameEndpoint(src *a.Src) (gate, valve cir.Name, ok bool) { defer func() { if r := recover(); r != nil { ok = false @@ -91,7 +89,7 @@ func seeNameEndpoint(src *Src) (gate, valve Name, ok bool) { }() t := src.Copy() gate = SeeValue(t) - t.Match(string(ValveSelector)) + t.Match(string(a.ValveSelector)) valve = SeeValue(t) src.Become(t) ok = true diff --git a/see/meaning.go b/see/meaning.go index d4a12c1..9ffdd74 100644 --- a/see/meaning.go +++ b/see/meaning.go @@ -9,15 +9,14 @@ package see import ( "bytes" "fmt" - // "log" "strconv" "strings" - . "github.com/gocircuit/escher/a" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/a" + cir "github.com/hoijui/escher/circuit" ) -func SeeValueOrNil(src *Src) (x Value) { +func SeeValueOrNil(src *a.Src) (x cir.Value) { defer func() { if r := recover(); r != nil { x = nil @@ -26,7 +25,7 @@ func SeeValueOrNil(src *Src) (x Value) { return SeeValue(src) } -func SeeValue(src *Src) (x Value) { +func SeeValue(src *a.Src) (x cir.Value) { if x = SeeCircuit(src); x != nil { return } @@ -54,12 +53,12 @@ func SeeValue(src *Src) (x Value) { panic(0) } -func SeeName(src *Src) Name { - return src.Consume(IsIdentifier) +func SeeName(src *a.Src) cir.Name { + return src.Consume(a.IsIdentifier) } // SeeVerb ... -func SeeVerb(src *Src) interface{} { +func SeeVerb(src *a.Src) interface{} { t := src.Copy() verb := "" switch { @@ -70,24 +69,24 @@ func SeeVerb(src *Src) interface{} { default: return nil } - delimit := t.Consume(IsIdentifierOrRefineSymbol) - xx := strings.Split(delimit, RefineSymbolString) + delimit := t.Consume(a.IsIdentifierOrRefineSymbol) + xx := strings.Split(delimit, a.RefineSymbolString) if len(xx) == 1 && xx[0] == "" { xx = nil } src.Become(t) - var nn []Name + var nn []cir.Name for _, x := range xx { nn = append(nn, x) } - return Circuit(NewVerbAddress(verb, nn...)) + return cir.Circuit(cir.NewVerbAddress(verb, nn...)) } // Int … -func SeeInt(src *Src) interface{} { +func SeeInt(src *a.Src) interface{} { t := src.Copy() - l := Literal(t) - if l == "" { + l := a.Literal(t) + if l == a.NullLiteral { return nil } r := bytes.NewBufferString(l) @@ -100,10 +99,10 @@ func SeeInt(src *Src) interface{} { } // Float … -func SeeFloat(src *Src) interface{} { +func SeeFloat(src *a.Src) interface{} { t := src.Copy() - l := Literal(t) - if l == "" { + l := a.Literal(t) + if l == a.NullLiteral { return nil } r := bytes.NewBufferString(l) @@ -116,10 +115,10 @@ func SeeFloat(src *Src) interface{} { } // Complex … -func SeeComplex(src *Src) interface{} { +func SeeComplex(src *a.Src) interface{} { t := src.Copy() - l := Literal(t) - if l == "" { + l := a.Literal(t) + if l == a.NullLiteral { return nil } r := bytes.NewBufferString(l) @@ -132,7 +131,7 @@ func SeeComplex(src *Src) interface{} { } // SeeBackquoteString … -func SeeBackquoteString(src *Src) interface{} { +func SeeBackquoteString(src *a.Src) interface{} { t := src.Copy() quoted, ok := DelimitBackquoteString(t) if !ok { @@ -143,13 +142,13 @@ func SeeBackquoteString(src *Src) interface{} { return str } -func DelimitBackquoteString(src *Src) (string, bool) { +func DelimitBackquoteString(src *a.Src) (string, bool) { var m int // number of bytes accepted into the quoted portion buf := src.Buffer() // first backquote r, n, err := buf.ReadRune() if err != nil || r != '`' { - return "", false + return a.NullLiteral, false } m += n // @@ -158,7 +157,7 @@ func DelimitBackquoteString(src *Src) (string, bool) { r, n, err = buf.ReadRune() if err != nil { if q != 1 { // reached end without finding closing backquote - return "", false + return a.NullLiteral, false } return src.SkipString(src.Len() - buf.Len()), true } @@ -186,7 +185,7 @@ func DelimitBackquoteString(src *Src) (string, bool) { } // SeeDoubleQuoteString … -func SeeDoubleQuoteString(src *Src) interface{} { +func SeeDoubleQuoteString(src *a.Src) interface{} { t := src.Copy() quoted, ok := DelimitDoubleQuoteString(t) if !ok { @@ -201,13 +200,13 @@ func SeeDoubleQuoteString(src *Src) interface{} { return str } -func DelimitDoubleQuoteString(src *Src) (string, bool) { +func DelimitDoubleQuoteString(src *a.Src) (string, bool) { var m int // number of bytes accepted into the quoted portion buf := src.Buffer() // first quote r, n, err := buf.ReadRune() if err != nil || r != '"' { - return "", false + return a.NullLiteral, false } m += n // @@ -215,7 +214,7 @@ func DelimitDoubleQuoteString(src *Src) (string, bool) { for { r, n, err = buf.ReadRune() if err != nil { - return "", false // reached end of string without closing quote + return a.NullLiteral, false // reached end of string without closing quote } if backslash { backslash = false diff --git a/see/parse.go b/see/parse.go index 67d5832..446ce19 100644 --- a/see/parse.go +++ b/see/parse.go @@ -9,18 +9,18 @@ package see import ( "log" - . "github.com/gocircuit/escher/a" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/a" + cir "github.com/hoijui/escher/circuit" ) -func ParseVerb(src string) (verb Verb) { +func ParseVerb(src string) (verb cir.Verb) { defer func() { if r := recover(); r != nil { - verb = Verb{} + verb = cir.Verb{} } }() - t := NewSrcString(src) - verb = Verb(SeeVerb(t).(Circuit)) + t := a.NewSrcString(src) + verb = cir.Verb(SeeVerb(t).(cir.Circuit)) if t.Len() != 0 { log.Printf("Non-address characters at end of %q", src) panic(1) @@ -28,14 +28,14 @@ func ParseVerb(src string) (verb Verb) { return verb } -func Parse(src string) (Name, Value) { - return SeePeer(NewSrcString(src)) +func Parse(src string) (cir.Name, cir.Value) { + return SeePeer(a.NewSrcString(src)) } -func ParseCircuit(src string) Circuit { +func ParseCircuit(src string) cir.Circuit { n, v := Parse(src) if _, ok := n.(Nameless); !ok { panic("not a circuit") } - return v.(Circuit) + return v.(cir.Circuit) } diff --git a/see/parse_test.go b/see/parse_test.go index c5d6112..95e0f7f 100644 --- a/see/parse_test.go +++ b/see/parse_test.go @@ -10,7 +10,7 @@ import ( "fmt" "testing" - . "github.com/gocircuit/escher/a" + . "github.com/hoijui/escher/a" ) var testValue = []string{ diff --git a/see/peer.go b/see/peer.go index ec76f58..53c2f76 100644 --- a/see/peer.go +++ b/see/peer.go @@ -7,55 +7,53 @@ package see import ( - // "log" - - . "github.com/gocircuit/escher/a" - . "github.com/gocircuit/escher/circuit" + "github.com/hoijui/escher/a" + cir "github.com/hoijui/escher/circuit" ) -func SeePeer(src *Src) (n Name, m Value) { +func SeePeer(src *a.Src) (n cir.Name, m cir.Value) { if n, m = seeNameGate(src); n != nil { return n, m } return seeNamelessGate(src) } -func seeNameGate(src *Src) (n Name, m Value) { +func seeNameGate(src *a.Src) (n cir.Name, m cir.Value) { defer func() { if r := recover(); r != nil { n, m = nil, nil } }() t := src.Copy() - Whitespace(t) + a.Whitespace(t) left := SeeValue(t) - if len(Whitespace(t)) == 0 { + if len(a.Whitespace(t)) == 0 { panic("no whitespace after name") } right := SeeValue(t) if !Space(t) { // require newline at end return nil, nil } - if right == "" { + if right == a.NullLiteral { panic("no gate value") } src.Become(t) return left, right } -func seeNamelessGate(src *Src) (n Name, m Value) { +func seeNamelessGate(src *a.Src) (n cir.Name, m cir.Value) { defer func() { if r := recover(); r != nil { n, m = nil, nil } }() t := src.Copy() - Whitespace(t) + a.Whitespace(t) value := SeeValue(t) if !Space(t) { // require newline at end return nil, nil } - if value == "" { + if value == a.NullLiteral { panic("nameless empty-string value implicit") } src.Become(t) diff --git a/src/README b/src/README deleted file mode 100644 index 8286c10..0000000 --- a/src/README +++ /dev/null @@ -1 +0,0 @@ -This directory contains runtime faculties for Escher, implemented in Escher. diff --git a/src/basic/alt.escher b/src/basic/TestAlternate.escher similarity index 90% rename from src/basic/alt.escher rename to src/basic/TestAlternate.escher index a8fa284..a1b3243 100644 --- a/src/basic/alt.escher +++ b/src/basic/TestAlternate.escher @@ -3,7 +3,7 @@ It helps future understanding of past knowledge to save this notice, so peers of other times and backgrounds can see history clearly.` -Func { +testAlternateFunc { in *e.Alternate out *e.Alternate star *e.Star @@ -21,9 +21,9 @@ Func { star:Spark = 1 } -TestFunc { +TestAlternate { *e.Ignore = `Test over multiple values, each with simulated processing delay` - func *Func + func *testAlternateFunc func:In = 1 func:Begin = func:End func:Out = match:Got diff --git a/src/basic/fork.escher b/src/basic/TestFork.escher similarity index 100% rename from src/basic/fork.escher rename to src/basic/TestFork.escher diff --git a/src/basic/faculty.escher b/src/basic/faculty.escher new file mode 100644 index 0000000..2e29a1e --- /dev/null +++ b/src/basic/faculty.escher @@ -0,0 +1,7 @@ + +faculty { + doc `/** + Tests for basic functions of Escher, implemented in Escher. + */` +} + diff --git a/src/circuit/circuit.escher b/src/circuit/ProcessIgnoreIO.escher similarity index 99% rename from src/circuit/circuit.escher rename to src/circuit/ProcessIgnoreIO.escher index d17ea47..c58af3b 100644 --- a/src/circuit/circuit.escher +++ b/src/circuit/ProcessIgnoreIO.escher @@ -20,3 +20,4 @@ ProcessIgnoreIO { yio:Stdout = *io.Clunk yio:Stderr = *io.Clunk } + diff --git a/src/circuit/faculty.escher b/src/circuit/faculty.escher new file mode 100644 index 0000000..35f5f7d --- /dev/null +++ b/src/circuit/faculty.escher @@ -0,0 +1,7 @@ + +faculty { + doc `/** + TODO Can't see a rationale for this faculties chosen name. + */` +} + diff --git a/src/e/system.escher b/src/e/QuickMaterialize.escher similarity index 100% rename from src/e/system.escher rename to src/e/QuickMaterialize.escher diff --git a/src/e/faculty.escher b/src/e/faculty.escher new file mode 100644 index 0000000..35f5f7d --- /dev/null +++ b/src/e/faculty.escher @@ -0,0 +1,7 @@ + +faculty { + doc `/** + TODO Can't see a rationale for this faculties chosen name. + */` +} + diff --git a/src/faculty.escher b/src/faculty.escher new file mode 100644 index 0000000..41c1705 --- /dev/null +++ b/src/faculty.escher @@ -0,0 +1,7 @@ + +faculty { + doc `/** + Root of the runtime faculties for Escher, implemented in Escher. + */` +} + diff --git a/src/handbook/.gitignore b/src/handbook/.gitignore index 2d19fc7..9604d8e 100644 --- a/src/handbook/.gitignore +++ b/src/handbook/.gitignore @@ -1 +1,3 @@ *.html +/img/*.png +/img/*-generated.* diff --git a/src/handbook/README.md b/src/handbook/README.md new file mode 100644 index 0000000..eeddbaf --- /dev/null +++ b/src/handbook/README.md @@ -0,0 +1,4 @@ +# Escher - Handbook + +To build this handbook, +run [$ESCHER/scripts/build_handbook.sh](../../scripts/build_handbook.sh). diff --git a/src/handbook/basis-escher.escher b/src/handbook/basis-escher.escher index fba6ade..5340eb8 100644 --- a/src/handbook/basis-escher.escher +++ b/src/handbook/basis-escher.escher @@ -1,9 +1,9 @@ BuildBasisEscherPage { - wf *io.WriteFile - wf:Content = *BasisEscherPage - wf:Name = "basis-escher.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *BasisEscherPage + wf:Name = "basis-escher.html" + : = wf:Ready } BasisEscherPage { @@ -23,57 +23,57 @@ BasisEscherPage {

Index reflex

Every running Escher circuit program has been materialized relative to an index. -The index reflex, escher.Index, is a noun reflex that emits the index relative to which +The index reflex, e.Index, is a noun reflex that emits the index relative to which the current circuit has been materialized.

The following program, for instance, will print out the index used to materialize the invoking circuit program: -

+
 {
-	*Show = *escher.Index
+	*e.Show = *e.Index
 }
 

Materialize reflex

-

The materialize reflex, named escher.Materialize, +

The materialize reflex, named e.Materialize, materializes a program circuit relative to an index of faculties.

The reflex requires that two valves, :View and :Residue, be connected. Values sent to :View must be circuits containing gates named Index and Program. -The value of the Program gate must be a circuit program (or any +The value of the Program gate must be a circuit program (or any gate value allowed within a circuit program). Whereas the value of the Index gate should hold the index, relative to which the program will be materialized. -

When a value is received at :View, the materialize reflex will materialize the +

When a value is received at :View, the materialize reflex will materialize the program relative to the given index and will return the residue to the valve :Residue.

Consider the following example program: -

+
 {
-	m *escher.Materialize
+	m *e.Materialize
 	f *e.Fork
 
 	m:View = f:
 	f:Program = {
-		*Show = "Hello from the child circuit program."
+		*e.Show = "Hello from the child circuit program."
 	}
-	f:Index = *escher.Index
-	m:Residue = *Show
+	f:Index = *e.Index
+	m:Residue = *e.Show
 }
 

This program will materialize the child program -

+
 {
-	*Show = "Hello from the child circuit program."
+	*e.Show = "Hello from the child circuit program."
 }
 

using the same index that was used to materialize the parent program, as acquired from -the *escher.Index reflex in the parent program. +the *e.Index reflex in the parent program. ` } diff --git a/src/handbook/basis-flow.escher b/src/handbook/basis-flow.escher index 16ee842..2143954 100644 --- a/src/handbook/basis-flow.escher +++ b/src/handbook/basis-flow.escher @@ -1,9 +1,9 @@ BuildBasisFlowPage { - wf *io.WriteFile - wf:Content = *BasisFlowPage - wf:Name = "basis-flow.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *BasisFlowPage + wf:Name = "basis-flow.html" + : = wf:Ready } BasisFlowPage { @@ -26,54 +26,61 @@ implementations to realize the huge variety of other possible designs.

Star reflex

-

The star is the simplest flow reflex. It accepts any number of +

(The implementation can be found in +faculty/basic/star.go.) + +

The star is the simplest flow reflex. It accepts any number of connected valves and ignores their specific names.

When a value is received on any one of its connected valves, the star reflex forwards that value to all other connected valves. -

Every forwarded value is emitted (i.e. sent) to its respective -valve on a dedicated goroutine. Therefore the star reflex never -blocks. +

Every forwarded value is emitted (i.e. sent) to its respective +valve on a dedicated Go routine. Therefore, the star reflex never +blocks.

An example usage: -

+
 {
-	star *Star
+	star *e.Star
 	one 1
 
 	star:X = one:
-	star:Y = *Show
-	star:Z = *Show
-	star:W = *Show
+	star:Y = *e.Show
+	star:Z = *e.Show
+	star:W = *e.Show
 }
 

When this circuit is materialized, the constant 1 will be emitted from gate one to valve X of gate star. The star gate will then forward the value to each of the valves Y, Z and W in parallel. Consequently -it will be printed on the standard output three times by the receiving *Show reflexes. +it will be printed on the standard output three times by the receiving *e.Show reflexes.

Fork reflex

+

(The implementation can be found in +be/union.go.) +

In our experience, the fork is the most commonly used synchronization primitive in Escher. -It requires that the distinguished empty-string valve be connected, as well as one or more +It requires that the distinguished empty-string valve be connected, as well as one or more freely-named (string or integer) other valves. -

Fork can be described as two entirely independent reflexes, let us call -them merge and split, embodied in one. +

Fork can be described as two entirely independent reflexes. +Let us call them merge and split, embodied in one.

Split direction

-

Whenever a value W is received on the empty string valve of a fork, -the reflex will process it using the split logic. The received value must be of type -circuit. For every valve whose name N is not the empty string, fork will -send the value of the gate named N from circuit W to that valve. +

Whenever a value W (must be of type Circuit) +is received on the empty string valve of a fork, +the reflex will process it using the split logic. +For every valve whose name N is not the empty string, +fork will send the value of the gate named N from circuit W to that valve.

Take for instance this program: -

+
 {
 	f *e.Fork
 	f: = {
@@ -81,14 +88,14 @@ send the value of the gate named N from circuit W to that valve.
 		y "World"
 		z "Foo"
 	}
-	f:x = *Show
-	f:y = *Show
+	f:x = *e.Show
+	f:y = *e.Show
 }
 
-

The values "Hello" and "World" will be sent -to and printed by the connected *Show reflexes. Whereas the value "Foo" -will be ignored. +

The values "Hello" and "World" will be sent +to and printed by the connected *e.Show reflexes. +Whereas the value "Foo" will be ignored.

Merge direction

@@ -97,16 +104,16 @@ this condition is met, it will merge all such values as the gates of a single ci gate names follow respective valve names, and will send this circuit out to its empty string valve.

Note that in the merge direction fork reflexes act as powerful synchronization primitives. -They effectively wait, blocking any other receptions on the non-empty-string valves, until -one value is available on each such valve. Subsequently these values are packed into a -single circuit and sent out. +They effectively wait, blocking any other receptions on the non-empty-string valves, until +one value is available on each such valve. +Subsequently, these values are packed into a single circuit and sent out.

Consider the following program, for instance: -

+
 {
 	f *e.Fork
-	f: = *Show
+	f: = *e.Show
 	f:x = "New"
 	f:y = "York"
 }
@@ -115,14 +122,14 @@ single circuit and sent out.
 

Fork will wait until "New" and "York" are received on valves x and y, respectively. Then the value -

+
 {
 	x "New"
 	y "York"
 }
 
-

will be sent to and printed by the *Show reflex. +

will be sent to and printed by the *e.Show reflex. ` } diff --git a/src/handbook/basis-os.escher b/src/handbook/basis-os.escher index 1e9913c..854eb4c 100644 --- a/src/handbook/basis-os.escher +++ b/src/handbook/basis-os.escher @@ -1,9 +1,9 @@ BuildBasisOSPage { - wf *io.WriteFile - wf:Content = *BasisOSPage - wf:Name = "basis-os.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *BasisOSPage + wf:Name = "basis-os.html" + : = wf:Ready } BasisOSPage { @@ -21,7 +21,7 @@ BasisOSPage {

The os faculty contains various reflexes for interacting with the POSIX environment within which an Escher program executes. It contains a few simple reflexes for accessing things like command-line arguments, environment variables, standard file -descriptors, process execution and the like. +descriptors, process execution and the like.

Most reflexes in os are implemented in less than 10 lines of code and in that sense their implementation is their best documentation. Here we detail @@ -39,7 +39,7 @@ a circuit value, containing the exit code of the process, is sent out to the An example of the command circuit value is as follows: -

+
 {
 	Env {
 		"PATH=/abc:/bin"
@@ -53,7 +53,7 @@ a circuit value, containing the exit code of the process, is sent out to the The returned IO circuit value is of the following form:
 
-
+
 {
 	Stdin (io.WriteCloser)
 	Stdout (io.ReadCloser)
@@ -66,7 +66,7 @@ a circuit value, containing the exit code of the process, is sent out to the The exit circuit is of the form
 
-
+
 {
 	Exit (int)
 }
@@ -75,7 +75,7 @@ a circuit value, containing the exit code of the process, is sent out to the The following example demonstrates invoking the /bin/ls command
 and forwarding its standard output and error to those of the Escher program itself.
 
-
+
 {
 	proc *os.Process
 	proc:Command = {
@@ -86,21 +86,21 @@ and forwarding its standard output and error to those of the Escher program itse
 	yio *e.Fork
 	proc:IO = yio:
 
-	yio:Stdin = *Ignore
+	yio:Stdin = *e.Ignore
 	yio:Stdout = *os.Stdout
 	yio:Stderr = *os.Stderr
 
-	yexit *e.Fork
-	proc:Exit = yexit:
-	
+	yExit *e.Fork
+	proc:Exit = yExit:
+
 	exit *os.Exit
-	yexit:Exit = exit:
+	yExit:Exit = exit:
 }
 

The standard file descriptors of the child process must always be handled. In this example, standard output and error are forwarded while standard input is -“ignored”. The reflex *Ignore is a “smart” reflex which +“ignored”. The reflex *e.Ignore is a “smart” reflex which ignores primitive values (integers, floats, etc.), whereas it closes io.Closer objects and it drains io.Reader objects. diff --git a/src/handbook/basis.escher b/src/handbook/basis.escher index b2d2456..ec467de 100644 --- a/src/handbook/basis.escher +++ b/src/handbook/basis.escher @@ -1,9 +1,9 @@ BuildBasisPage { - wf *io.WriteFile - wf:Content = *BasisPage - wf:Name = "basis.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *BasisPage + wf:Name = "basis.html" + : = wf:Ready } BasisPage { @@ -18,15 +18,22 @@ BasisPage {

Reflex basis and faculties

-

There are two kinds of reflexes in Escher: +

A faculty is a set of reflexes, similar to a package in Java, or a namespace in C++. + +

There are two kinds of reflexes in Escher:

    -
  • Those that are implemented in the underlying technology, the Go language, and linked into the runtime, which we call -basis reflexes. -
  • And those that are compositions of other reflexes, described by program circuits, which we call -derivative reflexes. +
  • Basis reflexes + are implemented in the underlying technology — + the Go language — and linked into the runtime +
  • Derivative reflexes + are compositions of other reflexes, + described by program circuits
-

Basis reflexes determine the basic arithmetic and data manipulation +

We will now have a look at the equivalent of std in C++, +the Escher basis faculty. + +

Basis reflexes determine the basic arithmetic and data manipulation operations that Escher programs can ultimately perform, as well as external technologies that Escher programs might have access to. @@ -40,22 +47,26 @@ and use cases might inform choices that we cannot predict from scratch.

Information flow

We find that most Escher programs benefit from -a few basic reflexes that control information flow. -We have included a few in the default runtime and they are described in the following -sections. These gates can be viewed as Escher's “synchronization” facilities. +a few basic reflexes that control information flow. +We have included a few in the default runtime, +and they are described in a separate page. +These gates can be viewed as Escher's “synchronization” facilities.

External technologies

-

Basis reflexes are also Escher's way of interacting with external technologies, +

Basis reflexes are also Escher's way of interacting with external technologies such as input/output devices. The POSIX systems is a canonical example of an -external technology and Escher has a dedicated os faculty for it. +external technology, and Escher has a dedicated +os +faculty for it.

Escher within Escher

-

The most powerful feature of Escher is its recursive nature: Circuit programs -can create program circuits and materialize them into other circuit programs. -This programming pattern is enabled by the escher faculty, -which among other things offers reflexes that materialize program circuits. +

The most powerful feature of Escher is its recursive nature: +Circuit programs can create program circuits and materialize them into other circuit programs. +This programming pattern is enabled by the +escher +faculty, which among other things offers reflexes that materialize program circuits. ` } diff --git a/src/handbook/cloud.escher b/src/handbook/cloud.escher index a7ed6f3..e419b95 100644 --- a/src/handbook/cloud.escher +++ b/src/handbook/cloud.escher @@ -1,9 +1,9 @@ BuildCloudPage { - wf *io.WriteFile - wf:Content = *CloudPage - wf:Name = "cloud.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *CloudPage + wf:Name = "cloud.html" + : = wf:Ready } CloudPage { @@ -26,52 +26,58 @@ CloudPage {

Paradigm for building clouds with Circuit and Escher

-

View a short slide deck with the key points in this article. +

+View a short slide deck +(or download it) +with the key points in this article.

This article is a design document that describes a framework for building and maintaining cloud applications comprised of large numbers of interconnected services in a manner that is intuitive and understandable to users.

We propose a syntactic abstraction, called Escher circuits, for representing the state of the cloud. -The abstraction enables modular compositing of large circuits from smaller components, facilitating -manual descriptions of cloud topologies. Further, it supports a circuit “difference” calculation +The abstraction enables modular composition of large circuits from smaller components, facilitating +manual descriptions of cloud topologies. Further, it supports a circuit “difference” calculation to facilitate the representation of incremental system changes.

The result is a system that provides a 3-step workflow for the Operations Engineer, which is captured in the following command-line operations: -

+
 cloud sense > current_state.circuit
 cloud diff desired_state.circuit current_state.circuit > diff.circuit
 cloud materialize diff.circuit
 

All .circuit files involved in the control of the cloud are simple text files -(that use Escher syntax) and as such all changes to the cloud +that use Escher syntax. Therefore, all changes to the cloud integrate cleanly with versioning systems like Git, -resulting in full hitorical accountability of cloud state changes. +which when used gives us full historical accountability of cloud state changes.

Framework

-

Every well-defined system requires a clear specification of the objects -at play, their possible interrelations in any moment in time, as well as the allowable operations -that can be performed to its components. +

Every well-defined system requires a clear specification of the objects at play, +their possible interrelations in any moment in time, +as well as the allowable operations that can be performed to its components. -

The systems of interest here, which model cloud applications in the datacenter, have +

The systems of interest here, which model cloud applications in the data-center, have three types of components: hosts, services and links. We will treat these objects cleanly in -a formal manner, but it should be clear that they will end up corresponding to well-known +a formal manner, but it should be clear that they will end up corresponding to well-known, real technologies utilized in specific manners.

Our hosts will correspond to physical machines (or virtual machines, as the case might be). Our services will correspond to Docker containers, whose images are configured in a standard manner to expect a number of named incoming or outgoing TCP connections. -And each of our links will correspond to a pair of lightweiht DNS servers, one at each endpoint host, +And each of our links will correspond to a pair of lightweight DNS servers, one at each endpoint host, configured to point the respective Docker TCP connections at each other. -

The exact details of the correspondence between hosts, services and links, and machines, Docker -containers and DNS servers, respectively, will be fleshed out in a later section. For now, suffice it to say -that this correspondence will be made simple and natural through the use of the gocircuit.org -tool for dynamic cloud orchestration (although with an appropriate driver a similar result can be accomplished -with any cloud provider like Google Compute Engine +

The exact details of the correspondence between hosts, services and links on the one hand, +and machines, Docker containers and DNS servers on the other, +will be fleshed out in a later section. +For now, suffice it to say that this correspondence will be made simple and natural +through the use of the gocircuit.org tool +for dynamic cloud orchestration (although with an appropriate driver, +a similar result can be accomplished with any cloud provider like +Google Compute Engine or Amazon EC2, for instance).

Getting back to the abstract system framework, the allowed relationships between hosts, services and links @@ -79,29 +85,32 @@ are described in a few simple postulates:

  • Every host in the system is identified by a unique string identifier -
  • Every service “resides” on one host and every such service has a string identifier, unique only across the services residing -on the same host. -
  • Every service has a “type” denoted by a string (which will correspond to the Docker image name of its container). -
  • Every service can have zero or more named “valves” (where a valve will correspond to a TCP connection, client or server) -under the requirement that valve names are unique within one service. -
  • Every link “connects” one service-valve pair to another, so that no such pair is connected more than once. +
  • Every service “resides” on one host and every such service has a string identifier, + unique only across the services residing on the same host. +
  • Every service has a “type” denoted by a string + (which will correspond to the Docker image name of its container). +
  • Every service can have zero or more named “valves” + (where a valve will correspond to a TCP connection, client or server) + under the requirement that valve names are unique within one service. +
  • Every link “connects” one service-valve pair to another, + so that no such pair is connected more than once.
-

Relationships between the components of a system can be represented visually using the same +

Relationships between the components of a system can be represented visually using the same symbolism employed by Escher for representing nested circuits: {{.Gate.Diagram}} -

In the illustration above there are two hosts named host1 and host2. -Two services, named cache and server, reside on host1. -One service, named database, resides on host2. Service cache +

In the illustration above, there are two hosts named host1 and host2. +Two services — named cache and server — reside on host1. +One service — named database — resides on host2. Service cache is of type MemCache, service server is of type Http and service database is of type Redis. There are two links in the system: one connecting the service-valve pair (server, x) to (cache, y), and one connecting (cache, z) to (database, w). (Disregard the labels p and q for now.) -

Thus far we have addressed the properties describing the state of a system in a singular moment in time. +

Thus far, we have addressed the properties describing the state of a system in a singular moment in time. System state can change over time, or “dynamically”, according to the following additional postulates:

    @@ -112,11 +121,14 @@ System state can change over time, or “dynamically”, according to the follow

    In particular, hosts, services and links can appear independently of each other. -

    Some of these dynamic events (of emergence or disappearance) will be caused by external -factors (for instance a host might die due to bad hardware) and others will be caused by operations -that we perform with the system (for instance, we might start a service). No matter what the cause -for an event is, the important thing is that these are the only changes of state that can happen to -the system. +

    Some of these dynamic events (of emergence or disappearance) +will be caused by external factors +(for instance a host might die due to bad hardware) +and others will be caused by operations that we perform with the system +(for instance, we might start a service). +No matter what the cause for an event is, +the important thing is that these are the only changes of state +that can happen to the system.

    The resulting UI to the engineer

    @@ -125,20 +137,20 @@ Some of the changes to the cloud will be caused by external factors, for instanc failures in the hosting hardware. Other changes will be caused by commands initiated by the user. -

    Since user-initiated changes and external changes are mutually asynchronous, we +

    Since user-initiated changes and external changes are mutually asynchronous, we propose the following simple workflow for the user's point-of-view or point-of-control, as the case might be:

    1. Connect to the “cloud” and retrieve a consistent representation of the “current” cloud state. -
    2. Compute the difference between a representation of the “desired” state of the cloud and the -retrieved “current” state. -
    3. Send a minimal stream of “commands” to the cloud, aimed at modifying its state from -“current” to “desired”. +
    4. Compute the difference between a representation of the “desired” state of the cloud + and the retrieved “current” state. +
    5. Send a minimal stream of “commands” to the cloud, + aimed at modifying its state from “current” to “desired”.

    In the remainder of this document, we describe the design of a command-line tool -cloud which embodies the above three operations as— +cloud, which embodies the above three operations as:

    1. cloud sense > current.circuit @@ -154,14 +166,14 @@ other backends, such as Amazon EC2 or Google Compute Engine, be used.

      The symbolic visual representation of system state, exemplified above, can very well be used as a formal representation, much like architectural blueprints are used as formal representations -of building design. However, this visual representation while natural for people is not easy to use +of building design. However, this visual representation, while natural for people, is not easy to use by machines.

      As we explain in the section on Escher syntax, this visual representation has an equivalent syntactic (i.e. written) form, which is well-suited for machine manipulations. In particular, the syntactic representation of the diagram above would be as follows: -

      +
       {
       	host1 {
       		cache MemCache
      @@ -177,21 +189,21 @@ In particular, the syntactic representation of the diagram above would be as fol
       }
       
      -

      In other words, every system state can be represented in the form of an Escher circuit. This gives -us a two-fold benefit. +

      In other words, every system state can be represented in the form of an Escher circuit. +This gives us a two-fold benefit.

      On the one hand, Escher circuits can be manipulated programmatically (both from Go and from Escher) simply as data structures. This allows flexible programmatic investigation of system state through familiar technologies.

      On the other hand, Escher's programming and materialization mechanism -allows for such circuits to be built out modularly from smaller component circuits. In other words, large -datacenter topologies can be composed out of smaller standard components, whereby even the components -circuits can span multiple machines and themselves be non-trivial subsystems. +allows for such circuits to be built out in a modular way from smaller component circuits. +In other words, large data-center topologies can be composed out of smaller standard components, +whereby even the components circuits can span multiple machines and themselves be non-trivial subsystems.

      For instance, our example system state could be generated out of smaller components in the following manner. Let the following circuit be an index (i.e. a library), consisting of two circuits designs: -

      +
       Index {
       	HttpHost {
       		cache MemCache
      @@ -206,9 +218,9 @@ Index {
       }
       
      -

      Then, if we materialize the program +

      Then, if we materialize the program relative to Index, -

      +
       {
       	host1 HttpHost
       	host2 DbHost
      @@ -216,23 +228,29 @@ Index {
       }
       
      -relative to Index, the resulting residue will be precisely the system state circuit that +the resulting residue will be precisely the system state circuit that we started with, i.e. the one illustrated in the drawing above.

      Dual representation

      -

      We call the circuit representation of system state, described thus far, a “primal” representation or simply a primal. -Every primal has an equivalent “dual” representation. Transitioning from primal to dual and vice-versa is a matter of -a simple transformation, as we shall see. +

      We call the circuit representation of system state, described thus far, +a “primal” representation or simply a primal. +Every primal has an equivalent “dual” representation. +Transitioning from primal to dual and vice-versa is a matter of +a simple transformation, as we shall see. -

      The dual representation of system state is useful to us, as it is more convenient to carry out certain manipulations -within this representation. In particular, it will be easier to compute the difference between two states in the dual. -As well as it will be easier to “materialize” a dual system state description into an actual running datacenter topology. +

      The dual representation of system state is useful to us, +as it is more convenient to carry out certain manipulations within this representation. +In particular, it will be easier to compute the difference between two states in the dual. +As well as it will be easier to “materialize” a dual system state description +into an actual running data-center topology. -

      The dual representation of a system state primal consists of two lists: a list of services and a list of links. +

      The dual representation of a system state primal consists of two lists: +a list of services and a list of links. -

      The list of services simply enumerates all services found in the primal, each specified by its “full path” -in the primal, accompanied by its type. For our running example, the list of services would be +

      The list of services simply enumerates all services found in the primal, +each specified by its “full path” in the primal, accompanied by its type. +For our running example, the list of services would be

       (host1.cache, MemCache)
      @@ -240,7 +258,7 @@ in the primal, accompanied by its type. For our running example, the list of ser
       (host2.database, Redis)
       
      -

      The list of links enumerates all service-to-service links present in the primal representation as pairs +

      The list of links enumerates all service-to-service links present in the primal representation as pairs of endpoints, wherein each endpoint (a service-valve pair) is also specified by its “full path” in the primal. In our example, that list would be: @@ -252,11 +270,11 @@ In our example, that list would be:

      It is not hard to see how the primal can be derived from the dual by reversing this process.

      Furthermore, it is self-evident that one can compute the “difference” between two systems, when -this makes sense, by simply computing the difference of their corresponding dual representations elementwise. +this makes sense, by simply computing the difference of their corresponding dual representations element-wise.

      Sensing and materializing

      -

      Sensing and materializing are the two operations that convert between the abstract circuit +

      Sensing and materializing are the two operations that convert between the abstract circuit representation of a cloud topology and the actual physical topology that executes on the cloud.

      Sensing is the operation of “reading” the current state of the cloud and representing it in the @@ -269,8 +287,8 @@ into an actual physical network of services running in the cloud. work. The subsequent conversions from dual to primal, a mere data structure transformation, was explained in the previous section. -

      The specific API for manipulating the cloud can be any: -Google Compute Engine, Amazon EC2, Circuit, and +

      The specific API for manipulating the cloud can be any: +Google Compute Engine, Amazon EC2, Circuit, and so forth. Our following explanations will be based on the Circuit as its simple API provides exactly the minimum necessary for such manipulations. @@ -278,33 +296,40 @@ exactly the minimum necessary for such manipulations.

      We have chosen to use executable Docker containers as embodiment for services. -

      Each service communicates with the outside—with other services—through a set +

      Each service communicates with the outside — with other services — through a set of zero or more named valves. A valve corresponds to a TCP client connection, a TCP server connection or both. -

      Service container images must be prepared in a standardized manner so that after the -execution of a container, our framework can (i) obtain the TCP server address corresponding -to each valve (if there is one), as well as (ii) supply the remote TCP server address if the -valve also corresponds to a TCP client connection. +

      Service container images must be prepared in a standardized manner, so that after the +execution of a container, our framework can +

        +
      • (i) obtain the TCP server address corresponding to each valve + (if there is one), as well as +
      • (ii) supply the remote TCP server address + if the valve also corresponds to a TCP client connection. +
      -

      There are various ways to prepare Docker containers to accomplish this and we do not +

      There are various ways to prepare Docker containers to accomplish this, and we do not mandate a specific one. Here, we merely suggest one way of doing it without going into unnecessary technical detail. -

      To accomplish (i), one can utilize the Docker -port mapping mechanism. In particular, the software inside the container can be hardwired to -listen to specific port numbers which, in lexicographic order, correspond to the valve names -of the service. Once the container is executed, the effective TCP server addresses—those visible to -other containers in the cloud network—can be automatically obtained using the docker port -command. They will be utilized by our system to “link” containers (i.e. service valves) in a manner described later. - -

      To accomplish (ii), we propose configuring each Docker service container to use a DNS +

      To accomplish (i), +one can utilize the +Docker port mapping mechanism. In particular, the software inside the container can be hardwired to +listen to specific port numbers, which — in lexicographic order — correspond to the valve names +of the service. Once the container is executed, the effective TCP server addresses — those visible to +other containers in the cloud network — can be automatically obtained using the docker port +command. They will be utilized by our system to “link” containers +(i.e. service valves) in a manner described later. + +

      To accomplish (ii), +we propose configuring each Docker service container to use a DNS server whose address is passed on it upon execution, using any one of the various mechanisms available for passing arguments to containers upon execution, provided by Docker itself. Subsequently, the software executing inside the Docker container should simply be hardwired to obtain the IP address for any given valve name by simply looking up that valve name (perhaps prefixed by a standard domain name) through the DNS system. Our framework, described later, -which executes the Docker containers will arrange for a light-weight dedicated DNS server +which executes the Docker containers will arrange for a light-weight, dedicated DNS server for each container, whose sole job would be to resolve these queries appropriately.

      Materializing a dual form to the cloud

      @@ -329,131 +354,158 @@ And the list of links is:
      1. Obtain a list of available and unused hosts in the cloud. -

        The Circuit API -presents all of its resources uniformly as a file system, where root level directories correspond to available hosts. -Unused hosts are precisely those root level directories that have no children (i.e. no services or other Circuit elements -are running on them). Such a list can be obtained through the API or through the command line using -circuit ls /.... Let us assume, for instance, that the list of available and unused hosts is -

        -/X65cc3c8e31817756
        -/Xe4abe0c286b0e0bc
        -/X9738a5e29e51338e
        -
        - -
      2. Group the elements of the list of services (from the dual) by host and assign a unique (available and unused) -Circuit host to each of the hosts from dual. For instance: -
        -(/X65cc3c8e31817756, host1)
        -(/Xe4abe0c286b0e0bc, host2)
        -
        - -
      3. Execute every service in the dual, as follows. Take, for instance, the service -
        -(host1.cache, MemCache)
        -
        - -
          -
        • Create a dedicated light-weight DNS server for this service, on the Circuit host assigned to this service in the previous step. -Using the Circuit, we spawn a DNS element and choose its name to follow this convention: -
          -/X65cc3c8e31817756/host1/cache/dns
          -
          -

          This is accomplished using the Circuit circuit mkdns command. The details of this are omitted for brevity. -Initially the DNS server will have no resource records, i.e. it will not resolve any lookups. Appropriate records will be added -to it later, when we materialize the list of links from the dual form. -

        • Execute the service's Docker container on that same host using a similar naming convention: -
          -/X65cc3c8e31817756/host1/cache/service
          -
          -

          This is accomplished using the Circuit's circuit mkdkr command, and recall that the service type, -MemCache in this case, is used as the name of the Docker image to be used. Furthermore, the IP address of the DNS server created in the previous step is passed to the Docker container on execution. -

        - -
      4. For each link in the list of links, add DNS resource records to the appropriate DNS servers. -Take for instance the link: -
        -(host1.cache:z, host2.database:w)
        -
        - -
          -
        • First, we inquire into the TCP server address for host1.cache:z, if one is available. -To do so, we access the Docker container -
          -/X65cc3c8e31817756/host1/cache/service
          -
          -and we query the TCP server address for valve named z, using the Docker port exporting -provisions set in place as described earlier. - -
        • Next, we access the Circuit DNS element -
          -/Xe4abe0c286b0e0bc/host2/database/dns
          -
          -and set the resource record for the domain name w to that TCP server address obtained in the previous step. -In addition to setting a DNS A record for the name w, we also set a -DNS TXT record for the same record with the value of host1.cache:z. -This TXT record will later facilitate recovering the dual form for this link directly from the DNS server itself. - -
        • Finally, we repeat the same process with the roles of host1.cache:z -and host2.database:w reversed. - -
        +

        The Circuit API + presents all of its resources uniformly as a *nix style file system, + where root level directories correspond to available hosts. + Unused hosts are precisely those root level directories, + that have no children + (i.e. no services or other Circuit elements are running on them). + Such a list can be obtained through the API, + or through the command line using circuit ls /.... + Let us assume, for instance, that the list of available and unused hosts is +

        +	/X65cc3c8e31817756
        +	/Xe4abe0c286b0e0bc
        +	/X9738a5e29e51338e
        +	
        + +
      5. Group the elements of the list of services (from the dual) by host, + and assign a unique (available and unused) Circuit host to each of the hosts from dual. + For instance: +
        +	(/X65cc3c8e31817756, host1)
        +	(/Xe4abe0c286b0e0bc, host2)
        +	
        + +
      6. Execute every service in the dual as follows. + Take, for instance, the service +
        +	(host1.cache, MemCache)
        +	
        + +
          +
        • Create a dedicated, light-weight DNS server for this service, + on the Circuit host assigned to this service in the previous step. + Using the Circuit, we spawn a DNS element and choose its name to follow this convention: +
          +		/X65cc3c8e31817756/host1/cache/dns
          +		
          +

          This is accomplished using the Circuit command circuit mkdns. + The details of this are omitted for brevity. + Initially, the DNS server will have no resource records, + i.e. it will not resolve any lookups. + Appropriate records will be added to it later, + when we materialize the list of links from the dual form. +

        • Execute the service's Docker container on that same host, + using a similar naming convention: +
          +		/X65cc3c8e31817756/host1/cache/service
          +		
          +

          This is accomplished using the Circuit command circuit mkdkr. + Recall that the service type — MemCache in this case — + is the name of the Docker image to be used. + Furthermore, the IP address of the DNS server created in the previous step + is passed to the Docker container on execution. +

        + +
      7. For each link in the list of links, + add DNS resource records to the appropriate DNS servers. + Take for instance the link: +
        +	(host1.cache:z, host2.database:w)
        +	
        + +
          +
        • First, we inquire into the TCP server address for host1.cache:z, + if one is available. + To do so, we access the Docker container +
          +		/X65cc3c8e31817756/host1/cache/service
          +		
          + and we query the TCP server address for the valve named z, + using the Docker port exporting provisions set in place as described earlier. + +
        • Next, we access the Circuit DNS element +
          +		/Xe4abe0c286b0e0bc/host2/database/dns
          +		
          + and set the resource record for the domain name w + to that TCP server address obtained in the previous step. + In addition to setting a DNS A record for the name w, + we also set a DNS TXT record for the same record with the value of host1.cache:z. + This TXT record will later facilitate recovering the dual form + for this link directly from the DNS server itself. + +
        • Finally, we repeat the same process with the roles + of host1.cache:z + and host2.database:w reversed. + +

      Sensing the cloud state into a dual form

      -

      Reading the current state of the cloud is fairly straightforward. -After listing the contents of the Circuit, using circuit ls /..., -there will only be paths ending in /service and paths -ending in /dns. We are going to read the list of services -from the former ones, and then the list of links from the latter one. +

      Reading the current state of the cloud is fairly straightforward. +After listing the contents of the Circuit using circuit ls /..., +there will only be paths ending in /service, +and paths ending in /dns. +We are going to read the list of services from the former ones, +and then the list of links from the latter one.

      To read the list of services, we consider each path ending in /service. For instance, the path

       /X65cc3c8e31817756/host1/cache/service
       
      -will correspond to a service named host1.cache (simply drop the first and last path elements -and replace slashes with dots). -Then we query the configuration of the underlying Docker container, using the -circuit peek command. This gives us the Docker image name of the -container—which is the service type—and thus the service entry has been recovered. +will correspond to a service named host1.cache +(simply drop the first and last path elements, and replace slashes with dots). +Then we query the configuration of the underlying Docker container, +using the circuit peek command. +This gives us the Docker image name of the container — which is the service type — +and thus the service entry has been recovered.

      To read the list of links, we consider in turn each path ending in /dns -unless it has already been considered. For instance— +unless it has already been considered. +For instance:

       /X65cc3c8e31817756/host1/cache/dns
       
      -

      This path will be a link endpoint with a prefix host1.cache:, as follows -from the manner in which we materialized links in the previous section. +

      This path will be a link endpoint with a prefix host1.cache:, +as follows from the manner in which we materialized links in the previous section.

      We then list the DNS resource records at this path, using circuit peek, -and in the case of this example we will see resource records for the domain names -y and z. In other words, the names correspond to valve -names of the service. And so each name gives us one endpoint in a link. In this case— +and in the case of this example, +we will see resource records for the domain names y and z. +In other words, the names correspond to valve names of the service. +And so each name gives us one endpoint in a link. +In this case:

       (host1.cache:y, …)
       (host1.cache:z, …)
       
      -

      To recover the other endpoint in each of the links, it suffices to look at the DNS TXT -record accompanying each of the names, y and z. -These TXT records will contain, as per the materialization process, the other endpoint -of the respective link, thus allowing is to recover the whole links— +

      To recover the other endpoint in each of the links, +it suffices to look at the DNS TXT record accompanying each of the names, +y and z. +These TXT records will contain, as per the materialization process, +the other endpoint of the respective link, +thus allowing us to recover the whole links:

       (host1.cache:y, host1.server:x)
       (host1.cache:z, host2.database:w)
       
      -

      Before we add these links to the list of links, we also verify that the opposing service -is still alive. Otherwise by convention we treat the link as not present. +

      Before we add these links to the list of links, +we also verify that the opposing service is still alive. +Otherwise — by convention — we treat the link as not present. For instance, if we want to verify that the endpoint host2.database is alive, -we simply consider the Circuit path list, obtained with circuit ls /..., and -look for the glob pattern /*/host2/database/service. +we simply consider the Circuit path list, obtained with circuit ls /..., +and look for the glob pattern /*/host2/database/service. ` } diff --git a/src/handbook/cmd.escher b/src/handbook/cmd.escher index 8920e9d..30916d5 100644 --- a/src/handbook/cmd.escher +++ b/src/handbook/cmd.escher @@ -1,9 +1,9 @@ BuildCommandPage { - wf *io.WriteFile - wf:Content = *CommandPage - wf:Name = "cmd.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *CommandPage + wf:Name = "cmd.html" + : = wf:Ready } CommandPage { @@ -21,7 +21,7 @@ CommandPage {

      The Escher binary is intended to be a general-purpose execution environment for Escher programs. It is invoked with two parameters: -

      +
       escher -src SourceDirectory MainDirective
       
      @@ -33,26 +33,30 @@ The following steps are performed upon invocation:
    2. The supplied source directory is traversed recursively:
      • Files with extension .escher are parsed as Escher source files. - Circuits found therein are placed in the startup index, such that their location in the index - is the same as the path of their source file relative to the source directory, followed by their circuit names. - For instance, the source file a/b/c.escher, containing: -
        -		CircuitName { … }
        -	
        - Will be registered under the index address a.b.c.CircuitName. + Circuits found therein are placed in the startup index, such that their location in the index + is the same as the path of their source file relative to the source directory, followed by their circuit names. + For instance, the source file a/b/c.escher, containing: +
        +	CircuitName { … }
        +
        + Will be registered under the index directive a.b.c.CircuitName.
      • Files with other extensions will be converted into SourceFile reflex materializers, - located in the index at the path of their source file relative to the source directory. SourceFile - reflexes, when materialized, return an io.ReadCloser for the content of the source file. + located in the index at the path of their source file relative to the source directory. SourceFile + reflexes, when materialized, return an io.ReadCloser for the content of the source file. +
    3. Finally, the main directive is materialized.
    +

    The following command, for instance, will generate the contents of this handbook and place it in the current working directory where it is invoked: -

    -escher -src github.com/gocircuit/escher/src *handbook.main
    +
    +
    +escher -src "$GOPATH/src/github.com/hoijui/escher/src/" "*handbook.main"
     
    +

    The -src can be omitted in favor of setting the ESCHER environment variable. ` diff --git a/src/handbook/css/main.css b/src/handbook/css/main.css index aff1125..64003fc 100644 --- a/src/handbook/css/main.css +++ b/src/handbook/css/main.css @@ -1,14 +1,61 @@ -*, body, li, ul, table, td, ul, ol, dt, dd { +body { /*background: rgb(240,255,255);*/ - background: rgb(245,255,255); + background: rgb(245, 255, 255); /*color: rgba(95,95,95, 0.8);*/ - color: rgba(30,30,30, 0.7); + color: rgba(30, 30, 30, 0.7); font-family: 'helvetica', sans-serif; font-weight: 400; line-height: 1.4em; /*letter-spacing: -0.05em;*/ } +table.dataRows { + /* This makes highlighted table rows be a single area, instead of being separated column-fields. */ + border-collapse: collapse; + width: 100%; +} + +table.dataRows th { + padding-top: 12px; + padding-bottom: 12px; + text-align: left; + background-color: rgb(150, 200, 255); +} + +table.dataRows td, table.dataRows th { + border-right: 1px dashed lightgray; + padding: 8px; +} + +table.dataRows td:last-child, #indexTable th:last-child { + border-right: none; +} + +table.dataRows tr:last-child td { + border-bottom: none; +} + +table.dataRows tr:nth-child(even) { + background-color: rgb(235, 255, 255); +} + +table.dataRows tr:nth-child(odd) { + background-color: rgb(215, 255, 255); +} + +/* Highlights link targets: The tag with the id set to the part of the URL after the '#'. */ +table.dataRows tr:target { + animation: flashTableRow 2s linear; +} + +@keyframes flashTableRow { + 50% { background: rgb(255, 255, 50) } +} + +.paddingBetweenRows td { + padding: 0 15px 0 15px; +} + body, div { padding: 0; margin: 0; @@ -64,7 +111,7 @@ pre { div.page { margin: auto auto; padding: 4em; - //width: 750px; + /*width: 750px;*/ } div.header { diff --git a/src/handbook/debug.escher b/src/handbook/debug.escher index 187d689..c03938f 100644 --- a/src/handbook/debug.escher +++ b/src/handbook/debug.escher @@ -1,9 +1,9 @@ BuildDebugPage { - wf *io.WriteFile - wf:Content = *DebugPage - wf:Name = "debug.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *DebugPage + wf:Name = "debug.html" + : = wf:Ready } DebugPage { @@ -18,121 +18,140 @@ DebugPage {

    Debugging and panics

    -

    Similarly to other languages like Go, Escher has two mechanisms for -debugging programs: panic traces and user-controlled code instrumentation. +

    Similarly to other languages, like Go, +Escher has two mechanisms for debugging programs. -

    Panic traces

    +

    1. Panic traces

    There are two ways in which a running Escher program can panic: -

      +
      1. While a reflex is processing an incoming event in a Cognize method, or
      2. During materialization of a program circuit which is invalid.
      3. -
    +

    In both cases, two types of “traces” will be printed out automatically before the process exits. -One of these traces is the standard Go stack trace. This is useful to pin-point the location in the -Go implementation of a reflex where the panic occurs, in the event of panics occuring in Cognize -methods. The Go stack trace, however, will not reflect the materialization path that lead to the -creation of the problematic reflex. This is reflected by the second type of trace, which we demonstrate -by example. - -

    Consider the following toy Escher program: - -

    -{
    -	*Show = "Parent circuit"
    -
    -	m *escher.QuickMaterialize
    -	m:Residue = *Ignore
    -	m:Index = *escher.Index
    +One of these traces is the standard Go stack trace.
    +This is useful to pin-point the location in the Go implementation of a reflex where the panic occurs,
    +in the event of panics occurring in Cognize methods.
    +The Go stack trace, however, will not reflect the materialization path
    +that lead to the creation of the problematic reflex.
    +This is reflected by the second type of trace,
    +which we demonstrate by example.
    +
    +

    Consider +the following toy Escher program: + +

    +Debug {
    +	*e.Show = "Parent circuit"
    +
    +	m *e.QuickMaterialize
    +	m:Residue = *e.Ignore
    +	m:Index = *e.Index
     	m:Program = {
    -		*escher.Breakpoint = 1
    +		*e.Breakpoint = 1
     	}
     }
     
    -

    This program will first materialize the inner program, which in turn will send the constant 1 -to the breakpoint reflex, causing it to panic. In other words, an outer circuit materializes an inner circuit -and subsequnetly a panic occurs in the inner circuit. The goal of the Escher trace is to reflect that. +

    This program will first materialize the inner program, +which in turn will send the constant 1 to the breakpoint reflex, +causing it to panic. +In other words, an outer circuit materializes an inner circuit, +and subsequently a panic occurs in the inner circuit. +The goal of the Escher trace is to reflect that. + +

    When run with: + +

    +src_dir="$GOPATH/src/github.com/hoijui/escher/src/"
    +escher -src "$src_dir" "*tutorial.Debug"
    +
    + The following Escher trace will be printed:
     BASIS(:)
    -DIRECTIVE(:) *escher.Breakpoint/*escher.Breakpoint
    +DIRECTIVE(:) *e.Breakpoint/*e.Breakpoint
     CIRCUIT() {
    -        0 *escher.Breakpoint
    -        1 1
    -        0: = 1:
    +		0 *e.Breakpoint
    +		1 1
    +		0: = 1:
     }
     MATERIALIZE() {
    -        0 *escher.Breakpoint
    -        1 1
    -        0: = 1:
    +		0 *e.Breakpoint
    +		1 1
    +		0: = 1:
     }
     BASIS(:Residue :View)
    -DIRECTIVE(:Residue :View) *escher.Materialize/*escher.Materialize
    +DIRECTIVE(:Residue :View) *e.Materialize/*e.Materialize
     CIRCUIT(:Index :Program :Residue) {
    -        x *escher.Materialize
    -        y *e.Fork
    -        :Residue = x:Residue
    -        :Index = y:Index
    -        :Program = y:Program
    -        x:View = y:
    +		x *e.Materialize
    +		y *e.Fork
    +		:Residue = x:Residue
    +		:Index = y:Index
    +		:Program = y:Program
    +		x:View = y:
     }
    -DIRECTIVE(:Index :Program :Residue) *escher.QuickMaterialize/*escher.QuickMaterialize
    +DIRECTIVE(:Index :Program :Residue) *e.QuickMaterialize/*e.QuickMaterialize
     CIRCUIT() {
    -        m *escher.QuickMaterialize
    -        0 *Show
    -        1 "Parent circuit"
    -        2 *Ignore
    -        3 *escher.Index
    -        4 {
    -                0 *escher.Breakpoint
    -                1 1
    -                0: = 1:
    -        }
    -        0: = 1:
    -        m:Program = 4:
    -        m:Residue = 2:
    -        m:Index = 3:
    +		m *e.QuickMaterialize
    +		0 *e.Show
    +		1 "Parent circuit"
    +		2 *e.Ignore
    +		3 *e.Index
    +		4 {
    +				0 *e.Breakpoint
    +				1 1
    +				0: = 1:
    +		}
    +		0: = 1:
    +		m:Program = 4:
    +		m:Residue = 2:
    +		m:Index = 3:
     }
     DIRECTIVE() *tutorial.Debug/*tutorial.Debug
     MATERIALIZE() *tutorial.Debug
     MAIN()
     
    -

    Escher traces consist of frames, indicated by capital letters. Frames correspond to -reflexes (basis or derivative) or directives. They are listed in most-specific to least-specific -order: The first frame corresponds to the problematic reflex, whereas the last one corresponds -to the main circuit being materialized. +

    Escher traces consist of frames, indicated by capital letters. +Frames correspond to reflexes (basis or derivative) or directives. +They are listed in most-specific to least-specific order: +The first frame corresponds to the problematic reflex, +whereas the last one corresponds to the main circuit being materialized. -

    Since every frame corresponds to a reflex that is materialized, a list of valves connected -to this reflex is given in brackets next to the frame name. Following the brackets is a frame -argument whose meaning depends on the type of frame: +

    Since every frame corresponds to a reflex that is materialized, +a list of valves connected to this reflex is given in brackets next to the frame name. +Following the brackets is a frame argument whose meaning depends on the type of frame:

    • MAIN marks the start of the Escher runtime. -
    • MATERIALIZE frames mark the beginning of materialization. The argument of such frames -describe the program that is being materialized. +
    • MATERIALIZE frames mark the beginning of materialization. + The argument of such frames describe the program that is being materialized.
    • DIRECTIVE frames indicate that a directive gate value is being resolved. -The argument equals the source of the directive, followed by the computed fully-qualified -source of directive (in case the directive uses local addressing). -
    • CIRCUIT frames indicate that a program circuit is being materialized. Their -argument equals the circuit source. + The argument equals the source of the directive, + followed by the computed, fully-qualified source of directive + (in case the directive uses local addressing). +
    • CIRCUIT frames indicate that a program circuit is being materialized. + Their argument equals the circuit source.
    • BASIS frames indicate that a basis reflex is being materialized.
    • NOUN frames indicate that a noun reflex is being materialized.
    -

    Instrumentation reflexes

    +

    2. Instrumentation reflexes

    -

    In many lanuages the simplest instrumentation technique is the insertion of “printf” -statements. Escher has its own analog. Given a link in a circuit program, the idea is to print out -the values that flow through that link without otherwise affecting the execution of the program. +

    In many languages, the simplest instrumentation technique +is the insertion of “printf” statements. +Escher has its own analog. +Given a link in a circuit program, +the idea is to print out the values that flow through that link +without otherwise affecting the execution of the program. -

    This is accomplished with the use of a *Show reflex, which simply lets values -pass through it while printing them on standard error together with the name of the valve they -were received on. +

    This is accomplished with the use of a *e.Show reflex, +which simply lets values pass through it while printing them on standard error +together with the name of the valve they were received on.

    Suppose the following program is to be debugged: @@ -144,14 +163,14 @@ were received on. }

    -

    We could then add a debug *Show reflex to “eavesdrop” on the link +

    We could then add a debug *e.Show reflex to “eavesdrop” on the link from source to sink, like so:

     {
     	source *Source
     	sink *Sink
    -	eve *Show
    +	eve *e.Show
     	source: = eve:Source
     	eve:Sink = sink:
     }
    diff --git a/src/handbook/faculty.escher b/src/handbook/faculty.escher
    new file mode 100644
    index 0000000..fdb5783
    --- /dev/null
    +++ b/src/handbook/faculty.escher
    @@ -0,0 +1,9 @@
    +
    +faculty {
    +	doc `/**
    +		This package contains the sources for escher language user guide.
    +		You may build it by running
    +		$ESCHER/scripts/build_handbook.sh.
    +		*/`
    +}
    +
    diff --git a/src/handbook/figures.escher b/src/handbook/figures.escher
    index 521c170..ab1a31b 100644
    --- a/src/handbook/figures.escher
    +++ b/src/handbook/figures.escher
    @@ -10,7 +10,7 @@ FigureTelescope {
     	fig2 *FigurePngSvg
     	fig2:Image = "telescope"
     	fig2:Width = "600px"
    -	fig2:Caption = `This is a symbolic representation 
    +	fig2:Caption = `This is a symbolic representation
     	of two circuit designs, named App and Database.
     	The illustration omits valve names.`
     	fig2: = :
    @@ -20,8 +20,8 @@ FigureExpanded {
     	fig3 *FigurePngSvg
     	fig3:Image = "expanded"
     	fig3:Width = "500px"
    -	fig3:Caption = `The inversion of colors—from white-on-black in the enclosing circuit, to
    -	black-on-white in the substituted circuits—is a visual symbolism indicating that 
    +	fig3:Caption = `The inversion of colors — from white-on-black in the enclosing circuit, to
    +	black-on-white in the substituted circuits — is a visual symbolism indicating that
     	the substitutions are separate data-structures from the enclosing one. Specifically,
     	the visual links that cross circuits are not explicitly represented in the data structure.
     	This can be accomplished by flattening (see below).`
    diff --git a/src/handbook/glossary.escher b/src/handbook/glossary.escher
    new file mode 100644
    index 0000000..b8e5af7
    --- /dev/null
    +++ b/src/handbook/glossary.escher
    @@ -0,0 +1,476 @@
    +
    +BuildGlossaryPage {
    +		wf *io.WriteFile
    +		wf:Content = *GlossaryPage
    +		wf:Name = "glossary.html"
    +		: = wf:Ready
    +}
    +
    +GlossaryPage {
    +	h *Html
    +	h: = :
    +	h:Title = "Escher - Explanation of important words"
    +	h:Body = t:
    +
    +	t *text.QuickForm
    +	t:Data = {}
    +	t:Form = `
    +
    +

    Explanation of important words

    + +

    Gate statements begin on a new line with a gate name identifier, space, and a gate value expression. + There are six value types that can be expressed: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    WordAlternative WordsMeaningSyntaxExampleExplanation
    Address--- + rootFaculty.parentFac.childFac.theGate
    + TODO or is it "topMostGate.lowerGate.lowestGate" ?
    + I think it is the first one, + but what is the second one? or is it an address too?
    + I guess it is practically simply never used/required. +
    + The fully qualified path to a gate, relative to a certain Index. + An address is represented by series circuits. +
    Circuit-meaningsyntax + A simple circuit called Nand, + with 2 gates and 4 links: +
    +Nand {
    +	and *binary.And
    +	not *binary.Not
    +
    +	and:X = :X
    +	and:Y = :Y
    +	and:XAndY = not:Z
    +	not:NotZ = :
    +}
    +			
    +
    + A circuit is the central unit of declaration in Escher. + It specifies gates and links + immediately contained within that (class of) circuit + (no gates or links outside or inside its own gates). +
    Default Valve + gateX: (vector on gate gateX with the default valve)
    + : (vector on the super gate with the default valve) +
    + The valve denoted by the empty string "".
    + You may think of it as the default input/output of a circuit.
    + It has not special logic within Escher, other then not requiring to be named in code. +
    Directive-meaning- + materialize: + *fully.qualified.Name
    + recall: + @fully.qualified.Name +
    + A combination of a verb and an address.
    + It means:
    + "Do 'verb' with 'address'." +
    Facultypackage?meaning-- + Eschers word for a namespace; a group of escher source files in a single directory, + respectively the circuits therein. +
    Flow---- + XXX (A synonym for Link?)
    + TODO Should probably be renamed to Link
    + See the go code of the circuit struct at + circuit/circuit.go. +
    GateMembrane?
    Brane?
    -syntax + A circuit with 7 gates: +
    +alpha {
    +	directive1 *fully.qualified.Name
    +	directive2 @fully.qualified.Name
    +	integral   123
    +	floating   3.14
    +	complex    (1-3i)
    +	quoted     "abcd\n\tefgh"
    +	backQuoted {{ .Gate.BackQuoted }}
    +}
    +			
    +
    + An instantiation of a circuit inside an other circuit.
    + Gates are the nodes of an Escher circuit, if interpreted as a graph.
    + They are connected to each other by creating links between their valves. +
    Index-meaning- + This exemplifies part of a typical index: +
    +{
    +	e {
    +		Alt (be.Materializer)
    +		Alternate (be.Materializer)
    +		Breakpoint (be.Materializer)
    +		Fork (be.Materializer)
    +		Grow (be.Materializer)
    +		Help (be.Materializer)
    +		Ignore (be.Materializer)
    +		...
    +	}
    +	element {
    +		Docker (be.Materializer)
    +		Process (be.Materializer)
    +	}
    +	...
    +}
    +			
    +
    + An Escher index is a tree circuit, + which we interpret as a an list containing the addresses to all gates. +
    Map-meaning- +
    +ImplicitIntMap {
    +	*fully.qualified.Name
    +	123
    +	3.14
    +}
    +
    +ExplicitIntMap {
    +	3   *fully.qualified.Name
    +	6   @fully.qualified.Name
    +	1   123
    +}
    +
    +StringMap {
    +	directive  *fully.qualified.Name
    +	integral   123
    +	floating   3.14
    +}
    +
    +MixedMap {
    +	directive  *fully.qualified.Name
    +	1          123
    +	2          3.14
    +}
    +			
    +
    + A circuit with the limitation that it has no links.
    + While we might think of a general circuit more of as a set of instructions plus data, + a map is rather purely data.
    + It maps keys of type int or string to arbitrary values, + quite like maps in other programming languages. +
    materialize--- + *fully.qualified.Name
    + or
    +
    +*{
    +	fully
    +	qualified
    +	Name
    +}
    +			
    +
    TODO
    Name---- + The name part of a gate. + Each gate is comprised of a name and a value. + A name can be any string without spaces, + but in practise you probably want to limit it more, + say to a common definition of a variable name as found in many other languages, + for example using the regex: [a-zA-Z0-9_]+ +
    Programrunnable?, executable circuit?meaning- + See any of the *Main circuits in the + Escher tutorials + Programs are circuits that describe executable systems.
    recall--- + @fully.qualified.Name
    + or
    +
    +@{
    +	fully
    +	qualified
    +	Name
    +}
    +			
    +
    TODO
    Seriesmeaningsyntax + implicit series: +
    +alpha {
    +	*fully.qualified.Name
    +	@fully.qualified.Name
    +	123
    +	3.14
    +	(1-3i)
    +	"abcd\n\tefgh"
    +	{{ .Gate.BackQuoted }}
    +	{
    +		A 1
    +		B "C"
    +	}
    +}
    +			
    + + which is equivalent to this explicit series: + +
    +alpha {
    +	0 *fully.qualified.Name
    +	1 @fully.qualified.Name
    +	2 123
    +	3 3.14
    +	4 (1-3i)
    +	5 "abcd\n\tefgh"
    +	6 {{ .Gate.BackQuoted }}
    +	7 {
    +		A 1
    +		B "C"
    +	}
    +}
    +			
    +
    + A series is a map with the additional restrictions that:
    +
      +
    • it can only have int names
    • +
    • the names have to form a consecutive series, starting from 0
    • +
    + They are an analogue to arrays in other languages. +
    Super Gatesuper-membrane
    super-brane
    -- + :valveNo3 (vector with valve valveNo3 on the super gate)
    + : (vector with default valve on the super gate) +
    + The empty-string named gate is called the super gate.
    + While one cannot assign a value to it through syntax, it is possible to connect links to it. + +

    When materializing, + the links connected to the super gate are exposed to the higher-level/enclosing/“super” circuit. +

    Tree-meaning- +
    +Tree {
    +	Trunk {
    +		Branches {
    +			"Johnny"
    +			"Katie"
    +		}
    +	}
    +	Root {
    +		Tentacles {
    +			"Grandpa"
    +			"Grandma"
    +		}
    +	}
    +}
    +			
    +
    + A recursive structure of maps, + where maps can contain other maps. +
    Value--- +
    +SomeCircuit {
    +	directive1 *fully.qualified.Name
    +	directive2 @fully.qualified.Name
    +	integral   123
    +	floating   3.14
    +	complex	(1-3i)
    +	quoted	 "abcd\n\tefgh"
    +	backQuoted {{ .Gate.BackQuoted }}
    +}
    +			
    +
    + The value part of a gate. + Each gate is comprised of a name and a value.
    + FIXME Some point of the documentation says, the value cna be any Go value, while an other part states, + that it can be one of Integer, Float, complex-number, string, directive or circuit. Both can't be true. +
    ValveI/O whole/connector--- + An input and-or output "connector" between the inside and the outside of a gate. + It has a unique name within the circuit it is declared. + It can be connected to at most one other valve (of the same gate or an other) + using a link. +
    Vectorvalve-ID?-- + gateX:valveNo3
    + :valveNo3
    + gateX:
    + : +
    + A qualified valve, consisting of a gate-name + and one of its valves names, separated with a ":".
    + If the gate is being omitted, the vector refers to the super gte.
    + If the valve is being omitted, the vector refers to the default valve. +
    Verbinstruction?-- + *
    or
    @ +
    + Can be either "*" (materialize) + or "@" (recall), + and is the first part of a directive. +
    + +{{.Gate.Ticker}} + +` +} diff --git a/src/handbook/html.escher b/src/handbook/html.escher index 2ae13c5..85e463f 100644 --- a/src/handbook/html.escher +++ b/src/handbook/html.escher @@ -21,15 +21,15 @@ Html { d:Body = :Body d:Footer = `

    ` d:Header = `
    - Escher A language for connecting technologies using pure metaphors + Escher A language for connecting technologies using pure metaphors
    ` : = t: diff --git a/src/handbook/img/NAND.png b/src/handbook/img/NAND.png deleted file mode 100644 index 9815306..0000000 Binary files a/src/handbook/img/NAND.png and /dev/null differ diff --git a/src/handbook/img/circuit.svg b/src/handbook/img/circuit.svg new file mode 100644 index 0000000..bd03ac2 --- /dev/null +++ b/src/handbook/img/circuit.svg @@ -0,0 +1,850 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Circuit Valves + (= Membranes) + Gates + Links external external internal + + :valve3 + gate2:valve1 + gate3:valve2 + : + gate3 gate2 gate1 gate2:valve1 = gate3:valve2 + "" gate2:valve3 = :valve3 + : = gate1:valve1 + + + diff --git a/src/handbook/img/cloud.png b/src/handbook/img/cloud.png deleted file mode 100644 index 140ee0b..0000000 Binary files a/src/handbook/img/cloud.png and /dev/null differ diff --git a/src/handbook/img/expanded.png b/src/handbook/img/expanded.png deleted file mode 100644 index 5da6806..0000000 Binary files a/src/handbook/img/expanded.png and /dev/null differ diff --git a/src/handbook/img/flattened.png b/src/handbook/img/flattened.png deleted file mode 100644 index 0479670..0000000 Binary files a/src/handbook/img/flattened.png and /dev/null differ diff --git a/src/handbook/img/reflex.jpg b/src/handbook/img/reflex.jpg deleted file mode 100644 index 2a42107..0000000 Binary files a/src/handbook/img/reflex.jpg and /dev/null differ diff --git a/src/handbook/img/reflex.svg b/src/handbook/img/reflex.svg new file mode 100644 index 0000000..4ae3502 --- /dev/null +++ b/src/handbook/img/reflex.svg @@ -0,0 +1,159 @@ + + + + + + + +Created by potrace 1.16, written by Peter Selinger 2001-2019 + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/handbook/img/scanprint.png b/src/handbook/img/scanprint.png deleted file mode 100644 index ce2aff8..0000000 Binary files a/src/handbook/img/scanprint.png and /dev/null differ diff --git a/src/handbook/img/telescope.png b/src/handbook/img/telescope.png deleted file mode 100644 index ee9f55d..0000000 Binary files a/src/handbook/img/telescope.png and /dev/null differ diff --git a/src/handbook/img/tkr.jpg b/src/handbook/img/tkr.jpg deleted file mode 100644 index 8026f30..0000000 Binary files a/src/handbook/img/tkr.jpg and /dev/null differ diff --git a/src/handbook/img/tkr.svg b/src/handbook/img/tkr.svg new file mode 100644 index 0000000..e65e439 --- /dev/null +++ b/src/handbook/img/tkr.svg @@ -0,0 +1,204 @@ + + + + + +Created by potrace 1.16, written by Peter Selinger 2001-2019 + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/handbook/index.escher b/src/handbook/index.escher index 59ba631..3c2cc6d 100644 --- a/src/handbook/index.escher +++ b/src/handbook/index.escher @@ -1,59 +1,70 @@ BuildIndexPage { - wf *io.WriteFile - wf:Content = *IndexPage - wf:Name = "index.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *IndexPage + wf:Name = "index.html" + : = wf:Ready } IndexPage { - h *Html - h: = : - h:Title = "The Escher language" - h:Body = ` -

    Escher language

    - -

    Web pages and links form a graph. Datacenter computers and cables form a graph. -Application processes and connections form a graph. Facebook users and -friendships form a graph. Facebook groups and memberships form a graph. -Neurons and synapses form a graph. Threads and shared data structures form a graph. + h *Html + h: = : + h:Title = "The Escher language" + h:Body = ` +

    Escher programming language

    + +

    Motivation

    + +

    Web pages and links form a graph. +Data-center computers and cables form a graph. +Application processes and connections form a graph. +Facebook users and friendships form a graph. +Facebook groups and memberships form a graph. +Neurons and synapses form a graph. +Threads and shared data structures form a graph. Processes and sockets form a graph.

    Not only are all of the above situations visually described by graphs, but also their essential behavior (as best as we understand it) is the same in all cases: -Independent processing units, pairwise-interlinked by sequential channels—both channels -and processors emerging and disappearing asynchronously. +They are comprised of independent processing units, +which are pairwise-interlinked by sequential channels, +while both channels and processors are emerging and disappearing asynchronously.

    Three decades ago, before the above examples were within practical reach, -a British gentleman—named Tony -Hoare—had noticed that this essential behavior was exhibited by virtually -all identifiable interacting physical (as well as man-made abstract) entities: People interacting with people, -people interacting with vending machines, components of vending -machines interacting with each other, animals interacting with animals, -cells interacting with cells, proteins interacting with proteins, and so on. - -

    He called this high-level behavioral model of the world (or discernable subsystems thereof) +a British gentleman — named Tony +Hoare — had noticed that this essential behavior was exhibited by virtually +all identifiable interacting physical (as well as man-made abstract) entities: +People interacting with people, +people interacting with vending machines, +components of vending machines interacting with each other, +animals interacting with animals, +cells interacting with cells, +proteins interacting with proteins, +and so on. + +

    He called this high-level behavioral model of the world (or discernible subsystems thereof) Communicating Sequential Processes. Hoare's model is nothing more and nothing less than a minimal abstraction of how -we see and understand (and subsequently will to control) the world from an -observer—i.e. third person—point of view. +we see and understand (and subsequently will to control) the world +from an observer — i.e. third person — point of view. -

    I prefer to call such systems circuits both for brevity and for the fact that +

    I prefer to call such systems circuits, both for brevity and for the fact that electrical circuits were probably the first man-made manifestation of communicating sequential processes that was rich, flexible and not present in untouched nature.

    Today's connected Internet services and devices are no different than electrical components on a circuit: They are independent processing units communicating via sequential streams of data, as opposed to sequential streams of changes in electrical voltage. The difference -between circuits analog and digital is entirely linguistic: It is the difference between a +between analog and digital circuits is entirely linguistic: It is the difference between a floating-point number (the voltage) and a data structure (a digital message).

    If it is indeed the case that most things that we program or that we program about are circuits at the end of the day, then it is only appropriate to complement Hoare's -model of everything with an appropriate programming language. This is the goal of Escher. +model of everything with an appropriate programming language. +This is the goal of Escher.

    Sources

    -

    Find the source repository for Escher on GitHub. +

    Find the source repository for Escher on GitHub. Follow us on Twitter @escherio.

    Documentation

    @@ -99,5 +110,5 @@ Follow us on Twitter @escherio.

    - ` + ` } diff --git a/src/handbook/install.escher b/src/handbook/install.escher index 1604958..30f84c0 100644 --- a/src/handbook/install.escher +++ b/src/handbook/install.escher @@ -1,41 +1,44 @@ BuildInstallPage { - wf *io.WriteFile - wf:Content = *InstallPage - wf:Name = "install.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *InstallPage + wf:Name = "install.html" + : = wf:Ready } InstallPage { - h *Html - h: = : - h:Title = "Building and installing Escher" - h:Body = t: + h *Html + h: = : + h:Title = "Building and installing Escher" + h:Body = t: - t *text.QuickForm - t:Data = {} - t:Form = ` -

    Bulding and installing Escher

    + t *text.QuickForm + t:Data = {} + t:Form = ` +

    Building and installing Escher

    Escher requires the Go language toolchain to be installed first. Fetching, building and installing Escher can then be accomplished with one command: -

    -	% go get github.com/gocircuit/escher/escher
    +
    +go get github.com/hoijui/escher/escher
     
    -

    To check that installation succeeded, run - -

    -	% ESCHER=github.com/gocircuit/escher/src escher test.All
    -	+ Test basic.TestFork (ok)
    -	+ Test basic.TestFunc (ok)
    -	+ Test text.TestForm (ok)
    -	+ Test yield.TestValues (ok)
    -	…
    +

    To check whether the installation succeeded, run: +

    +ESCHER=$GOPATH/src/github.com/hoijui/escher/src escher "*test.All"
     
    -

    Note that the environment ESCHER must point to the src +NOTE The environment variable ESCHER must point to the src subdirectory of the main Escher repo on your host. - ` + +

    You should see output similar to this: +

    ++ Test *basic.TestFork (ok)
    ++ Test *basic.TestAlternate (ok)
    ++ Test *text.TestForm (ok)
    ++ Test *yield.TestFlows (ok)
    ++ Test *yield.TestValues (ok)
    +
    + ` } diff --git a/src/handbook/main.escher b/src/handbook/main.escher index a67e1ab..6236ad5 100644 --- a/src/handbook/main.escher +++ b/src/handbook/main.escher @@ -10,6 +10,7 @@ main { x:6 = *BuildMeaningPage x:7 = *BuildReflexPage x:8 = *BuildProgramPage + x:glossary = *BuildGlossaryPage x:9 = *BuildBasisPage x:10 = *BuildBasisFlowPage diff --git a/src/handbook/materialize.escher b/src/handbook/materialize.escher index c16231a..36afe19 100644 --- a/src/handbook/materialize.escher +++ b/src/handbook/materialize.escher @@ -1,8 +1,8 @@ BuildMaterializePage { - file *io.WriteFile - file:Content = *MaterializePage - file:Name = "m.html" - : = file:Ready + file *io.WriteFile + file:Content = *MaterializePage + file:Name = "m.html" + : = file:Ready } MaterializePage { @@ -35,5 +35,5 @@ MaterializePage {

    - ` + ` } diff --git a/src/handbook/meaning.escher b/src/handbook/meaning.escher index 6f403d8..df3c500 100644 --- a/src/handbook/meaning.escher +++ b/src/handbook/meaning.escher @@ -1,9 +1,9 @@ BuildMeaningPage { - wf *io.WriteFile - wf:Content = *MeaningPage - wf:Name = "meaning.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *MeaningPage + wf:Name = "meaning.html" + : = wf:Ready } MeaningPage { @@ -15,7 +15,7 @@ MeaningPage { f *e.Fork f:Ticker = tkr: - tkr *FigureJpg + tkr *FigurePngSvg tkr:Image = "tkr" tkr:Width = "500px" tkr:Caption = `` @@ -31,9 +31,10 @@ When thinking and programming in Escher, circuits will have a meaning dependent on context and/or content. A circuit interpretation will usually utilize a subset of the representational freedoms of a circuit, and will have an intuitive graphical representation. -

    Here we introduce four basic circuit interpretations—or specializations, if you will—that +

    Here we introduce four basic circuit interpretations — or specializations, if you will — that will also serve us as a vocabulary when discussing Escher in following chapters. +

    Series

    Circuit gate names, recall, can be integers or strings. If a circuit has no links and the gate @@ -42,7 +43,7 @@ Series are equivalent to arrays and/or slices in other programming languages, li Gate names are slice indices; gate values are slice element values.

    For instance, the circuit -

    +
     Singer {
     	0 "Dolly"
     	1 "Rebecca"
    @@ -50,7 +51,7 @@ Singer {
     }
     
    is analogous (in meaning) to the Go slice: -
    +
     var Singer = []interface{}{
     	"Dolly",
     	"Rebecca",
    @@ -62,7 +63,7 @@ var Singer = []interface{}{
     the syntax section, we have dedicated a shorthand
     syntax for series circuits that omits the gate names:
     
    -
    +
     Singer {
     	"Dolly"
     	"Rebecca"
    @@ -70,18 +71,47 @@ Singer {
     }
     
    -

    Indices

    -

    More generally than series, when a circuit has no links we call it an index and -we view it as a map from integers and/or strings to anything else. In this respect an index -is akin in purpose to structures, dictionaries, hash tables and maps in other languages. +

    Maps

    + +

    +More generally than series, +when a circuit has no valves/links, +we call it a map (previously index), +and we view it as a mapping from integers and/or strings to anything else. +In this respect, a map is akin in purpose to structures, dictionaries, hash tables and maps in other languages. +

    + +
    +ImplicitIntMap {
    +	*fully.qualified.Name
    +	123
    +	3.14
    +}
     
    -

    The gate values of index circuits are analogously called children and they can -be of primitive types (integers, floats, etc.) as well as recursively they can be other circuits -or indices. +ExplicitIntMap { + 3 *fully.qualified.Name + 6 @fully.qualified.Name + 1 123 +} + +StringMap { + directive *fully.qualified.Name + integral 123 + floating 3.14 +} +

    -
    -Tree {
    +
    +

    Trees

    + +

    +The gate values of map circuit are analogously called children, +and they can be of primitive types (integers, floats, etc.) +as well as recursively, they can be other circuits or maps. + +

    +TreeCircuit {
     	Trunk {
     		Branches {
     			"Johnny"
    @@ -96,16 +126,31 @@ Tree {
     	}
     }
     
    +

    + +

    +Such recursive structures of maps, or just trees for short, +serve the same purpose as file-systems, namespaces, trees and others: +To organize their internal and leaf values in a hierarchical manner, +so that each node (internal or leaf) is identifiable by a unique path. +

    -

    Such recursive structures of indices, or just indices for short, serve the same purpose as -file-systems, namespaces, trees and others: To organize their internal and leaf values in a -hierarchical manner, so that each node (internal or leaf) is identifiable by a unique -path which we shall call address of a value relative to a given index. -

    For instance, the address of "Grandma" relative to the index -Tree would be +

    Indices

    + +

    +An Escher index is basically a tree circuit, +which we interpret as a an absolute (and in future releases also relative) +tree containing all circuits/gates. +This also associates a unique path to each gate, +which we shall call address of a value relative to a given index. +

    -
    +

    +For instance, the address of "Grandma" relative to the index +Tree would be + +

     {
     	Root
     	Tentacles
    @@ -113,6 +158,8 @@ path which we shall call address of a value relative to a given index.
     }
    (Note that addresses are represented by series circuits.) +

    +

    Directives

    @@ -121,7 +168,7 @@ A directive is a pair of a string-valued verb and a target address. Directives are represented as a single circuit, wherein the empty-string gate holds the verb, while the number gates hold the components of the address. For instance, -
    +
     {
     	"" "*"
     	0 Root
    @@ -133,15 +180,15 @@ while the number gates hold the components of the address. For instance,
     

    This circuit holds the verb value "*" and the address whose components are Root, Tentacles and 1, in that order. There are only two types of verbs, signified by the verb values "*" -and "@", whose meaning is explained in later sections. -We call these verbs materialize and recall, respectively, while their +and "@", whose meaning is explained in later sections. +We call these verbs materialize and recall, respectively, while their single-characters values, "*" and "@", are a design choice of expediency.

    Due to the ubiquitous use of directives in circuit programs, directives can be written using the dedicated syntactic sugar: -

    +
     *Root.Tentacles.1
     
    @@ -154,7 +201,7 @@ Here we describe their circuit structure.

    The gates of program circuits ultimately represent independently executing services, which are interconnected according to the link pattern of the circuit. -

    Gate values designate the processing logic—i.e. they codify the service type—while +

    Gate values designate the processing logic — i.e. they codify the service type — while gate names are used solely as identifiers, needed in the description of the circuit links.

    Gate values can be of any kind: integer, float, complex, string @@ -164,18 +211,18 @@ written using the abbreviated syntax described earlier.

    Circuit links are allowed only between gate names, defined within the circuit or the empty-string gate name. -

    The empty-string gate name represents an implicit -“enclosing” or “parent” circuit. In particular, program -circuits are not allowed to define a gate with the empty-string -name. +

    The empty-string gate name represents an implicit +“enclosing” or “parent” circuit we call super gate. +In particular, program circuits are not allowed to use a vector +with the empty-string name. -

    Links whose endpoints are connected to the same +

    Links whose endpoints are connected to the same gate name are allowed, as long as they connect into different valve names.

    Here is an example of a valid program circuit: -

    +
     {
     	tkr *time.Ticker
     	sum *math.Sum
    @@ -185,7 +232,7 @@ valve names.
     	tkr: = sum:Sum
     
     	sum:X = :Phase
    -	sum:Y = *Show
    +	sum:Y = *e.Show
     }
     
    diff --git a/src/handbook/program.escher b/src/handbook/program.escher index bf15295..4677a1a 100644 --- a/src/handbook/program.escher +++ b/src/handbook/program.escher @@ -1,9 +1,9 @@ BuildProgramPage { - wf *io.WriteFile - wf:Content = *ProgramPage - wf:Name = "program.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *ProgramPage + wf:Name = "program.html" + : = wf:Ready } ProgramFigures { @@ -21,26 +21,26 @@ ProgramFigures { fig1:Right = `
     	main {
    -		scanln os.Scanln
    +		scanLn os.Scanln
     		println os.Println
    -		scanln:Read = println:Write
    +		scanLn:Read = println:Write
     	}
     	
    ` fig2 *FigurePngSvg fig2:Image = "telescope" - fig2:Width = "600px" + fig2:Width = "800px" fig2:Caption = `` fig3 *FigurePngSvg fig3:Image = "expanded" - fig3:Width = "500px" + fig3:Width = "1000px" fig3:Caption = `` fig4 *FigurePngSvg fig4:Image = "flattened" - fig4:Width = "500px" + fig4:Width = "1000px" fig4:Caption = `` } @@ -71,7 +71,7 @@ residual value.

    A program circuit describes a system of interconnected reflexes. -

    Each circuit gate corresponds to a reflex. The gate value +

    Each circuit gate corresponds to a reflex. The gate value describes the type of reflex that is to be materialized. While the gate name is merely an identifier, unique to the program circuit, used mainly to enable the specification of the reflex-to-reflex links. @@ -86,7 +86,7 @@ Links are allowed either between two gates defined within the circuit, or between a defined gate and the super gate.

    Links connected to the super gate are endpoints whose eventual connection -to a reflex is defered to the enclosing circuit. +to a reflex is deferred to the enclosing circuit.

    Circuit programs that have no links to the super gate are called closed circuits, as they describe complete materializable systems on their own. @@ -97,20 +97,23 @@ can only be materialized as reflexes within higher-level enclosing circuits.

    Gate value interpretation

    Circuit programs recognize the following types of gate values: -

      -
    • Integer, float, complex, string, or a non-directive circuit, or
    • +
        +
      1. Integer
      2. +
      3. Float
      4. +
      5. Complex
      6. +
      7. String
      8. +
      9. Circuit (non-directive)
      10. Directive circuit -
    +

    Noun reflexes

    -

    Gate values in the first group (integer, float, complex, string or a non-directive circuit) -will result in the materialization of a “noun” reflex, whose noun value is the -gate value. +

    Gate value types 1 to 5 will result in the materialization of a “noun” reflex, +whose noun value is the gate value. -

    A noun reflex is a generic built-in reflex type which, after materialization, +

    A noun reflex is a generic, built-in reflex type, which — after materialization — emits its corresponding gate value to each one of its connected valves. -If no valves are connected, the noun reflex leaves the gate value as its +If no valves are connected, the noun reflex leaves the gate value as its residue. Otherwise, it leaves no residue.

    Expanding directives

    @@ -118,24 +121,24 @@ residue. Otherwise, it leaves no residue.

    When the gate value is a directive, materialization proceeds as follows: -

      -
    • First, the runtime looks up the “target” value, which resides in the index -at the address specified in the directive.
    • -
    • Second, -
        -
      • If the directive verb is @, the gate is materialized as a noun gate -emitting the target value.
      • -
      • If the directive verb is *, the target value is substituted as the gate value, -and the materialization process described in this section is repeated now with the target -value as the gate value. -
      +
        +
      1. The runtime looks up the “target” value, which resides in the index + at the address specified in the directive.
      2. +
      3. +
          +
        • If the directive verb is @, the gate is materialized as a noun gate + emitting the target value.
        • +
        • If the directive verb is *, the target value is substituted as the gate value, + and the materialization process described in this section is repeated now with the target + value as the gate value. +
      4. -
    +

    Circuit residue

    As pointed out in the section on reflexes, every reflex -can leave a residue value as a result of being materialized, or the Go value nil +can leave a residue value as a result of being materialized, or the Go value nil, which indicates leaving no residue.

    Circuit programs are no different than reflexes (in fact they describe higher-order reflexes themselves) @@ -143,7 +146,7 @@ in that they leave a residue value as well.

    The residue of materializing a circuit program is the same circuit, wherein each gate value is replaced by the residue of materializing that gate. -Gate corresponding to reflexes that leave no residue are not present +Gates corresponding to reflexes that leave no residue are not present in the residue circuit.

    If no gates leave any residue, the circuit program itself leaves no residue. @@ -152,31 +155,31 @@ in the residue circuit.

    Consider, for instance, the following index: -

    +
     {
     	Database {
     		cache Cache
    -		left Shard
    -		right Shard
    +		shard1 Shard
    +		shard2 Shard
     		link Link
     
     		cache:Web = :Web
    -		left:Cache = cache:Left
    -		right:Cache = cache:Right
    -		left:Backup = link:Left
    -		right:Backup = link:Right
    +		shard1:Cache = cache:Left
    +		shard2:Cache = cache:Right
    +		shard1:Backup = link:Left
    +		shard2:Backup = link:Right
     		link: = :Backup
     	}
     	App {
     		web Web
    -		left Database
    -		right Database
    -		backup Backup
    -
    -		left:Web = web:Left
    -		right:Web = web:Right
    -		left:Backup = backup:Left
    -		right:Backup = backup:Right
    +		db1 Database
    +		db2 Database
    +		bkp Backup
    +
    +		db1:Web = web:Left
    +		db2:Web = web:Right
    +		db1:Backup = bkp:Left
    +		db2:Backup = bkp:Right
     	}
     	Web …
     	Cache …
    @@ -193,10 +196,10 @@ Whereas assume that Web, Cache, Shard and
     {{.Gate.Telescope}}
     
     If we materialize the program circuit App with respect to the index given above
    -(i.e. directive addresses will resolve with respect to that index), 
    +(i.e. directive addresses will resolve with respect to that index),
     we are going to get the following residue:
     
    -
    +
     {
     	web WebResidue
     	bkp BackupResidue
    @@ -220,21 +223,21 @@ we are going to get the following residue:
     		link LinkResidue
     
     		cache:Web = :Web
    -		left:Cache = cache:Left
    -		right:Cache = cache:Right
    -		left:Backup = link:Left
    -		right:Backup = link:Right
    +		shard1:Cache = cache:Left
    +		shard2:Cache = cache:Right
    +		shard1:Backup = link:Left
    +		shard2:Backup = link:Right
     		link: = :Backup
     	}
    -	left:Web = web:Left
    -	right:Web = web:Right
    -	left:Backup = backup:Left
    -	right:Backup = backup:Right
    +	db1:Web = web:Left
    +	db2:Web = web:Right
    +	db1:Backup = bkp:Left
    +	db2:Backup = bkp:Right
     }
     

    Where WebResidue, BackupResidue, Shard1Residue, etc. are -merely placeholders here for whatever the actual residue values of the respetive reflexes are. +merely placeholders here for whatever the actual residue values of the respective reflexes are. Visually the program residue could be represented as: {{.Gate.Expanded}} @@ -249,20 +252,19 @@ It is merely an illustration of the executed reflexes and their runtime connecti

    Three ways to invoke materialization

    -

    One can materialize (i.e. execute) a program circuit given an index from three different places: -from Go, from another program circuit (i.e. from Escher) and from the POSIX shell. +

    One can materialize (i.e. execute) a program circuit given an index from three different places. -

    Materializing from Go

    +

    Materializing from Go

    Package be provides the materialization method: -

    +
     func MaterializeSystem(program interface{}, index, barrier Circuit) (residue interface{})
     
    -

    Argument program contains the program circuit, of Go type Circuit, +

    Argument program contains the program circuit — of Go type Circuit — that is to be materialized. Incidentally, the value of program can be any value -recognized as a gate value in a circuit program as described earlier. Often one will pass a directive +recognized as a gate value in a circuit program as described earlier. Often, one will pass a directive circuit as program.

    Argument index holds the materialization index, relative to which @@ -270,21 +272,24 @@ directive addresses are interpreted.

    The last argument, barrier, is to be set to nil. -

    The function returns the residue of the materialization process. +

    The function returns the residue of the materialization process. -

    Materializing from Escher

    +

    Materializing from within Escher

    -

    One can recursively materialize circuits programs from within other -circuit programs. This is accomplished using the built-in reflex escher.Materialize +

    One can recursively materialize circuits programs from within other +circuit programs. This is accomplished using the built-in reflex e.Materialize which is described in the materialization faculty section. -

    Materializing from POSIX

    +

    Materializing from the command-line

    The Escher executable, which is explained in detail in the runtime section, -will materialize a directive from the command-line: +will materialize a directive from the command-line. +Given our index (project source root) is "/src/app/", +and in one of the "*.escher" files in that directory we have a gate named "Main", +we can materialize it from the command-line like this: -

    -% escher -src /src/app *app.Main
    +
    +escher -src /src/ "*app.Main"
     
    ` diff --git a/src/handbook/reflex.escher b/src/handbook/reflex.escher index ee683cd..4c17da7 100644 --- a/src/handbook/reflex.escher +++ b/src/handbook/reflex.escher @@ -1,9 +1,9 @@ BuildReflexPage { - wf *io.WriteFile - wf:Content = *ReflexPage - wf:Name = "reflex.html" - : = wf:Ready + wf *io.WriteFile + wf:Content = *ReflexPage + wf:Name = "reflex.html" + : = wf:Ready } ReflexPage { @@ -15,7 +15,7 @@ ReflexPage { f *e.Fork f:Reflex = rfx: - rfx *FigureJpg + rfx *FigurePngSvg rfx:Image = "reflex" rfx:Width = "300px" rfx:Caption = `` @@ -26,19 +26,19 @@ ReflexPage {

    Implementing reflexes

    -

    A key motivation for the design of Escher is the idea that +

    A key motivation for the design of Escher is the idea that software programs should be assembled as the interconnection of independently-executing computational devices of special-purpose logic. -In other words, computer programs—small or large—should be -no different in their essential structure than cloud applications, +In other words, computer programs — small or large — should be +no different in their essential structure than cloud applications, which are no more and no less than an interconnection of independently running special-purpose services.

    We call these “computational devices” reflexes. -Reflexes can be implemented in the language underlying Escher +Reflexes can be implemented in the language underlying Escher (the Go language) or they can be composed out of other reflexes, using circuit programs from within Escher. -Here we describe how to implement relfexes in Go and link them +Here we describe how to implement reflexes in Go and link them into the Escher runtime.

    Reflexes and the runtime

    @@ -58,7 +58,7 @@ is guided by circuit programs, described in the next

    Every reflex is embodied by a user-defined Go receiver type. -

    +
     type Receiver struct {
     	…
     }
    @@ -68,11 +68,11 @@ type Receiver struct {
     
     

    The spark

    -

    When a reflex is materialized, the Escher runtime creates a new instance of the underlying +

    When a reflex is materialized, the Escher runtime creates a new instance of the underlying Go receiver type and invokes a designated initialization method, called Spark. All receivers must implement that method. -

    +
     func (r *Receiver) Spark(eye *Eye, matter Circuit, aux ...interface{}) Value {
     	…
     }
    @@ -82,7 +82,7 @@ func (r *Receiver) Spark(eye *Eye, matter Circuit, aux ...interface{}) Value {
     
     

    The first argument eye is an object with a singleton public method: -

    +
     func (eye *Eye) Show(valve Name, value interface{})
     
    @@ -103,38 +103,38 @@ is printed out by the Escher tool when an Escher program panics.

    From a programmatic standpoint, only one of the gates of circuit matter is of interest to reflex programmers. The gate called View lists -the names of all valves connected to this reflex by the parent system which is +the names of all valves connected to this reflex by the parent system which is materializing this reflex. The View gate has a circuit value, whose gate names correspond to the names of the valves connected to the reflex being materialized.

    For instance, the names of the connected valves can be printed with this code: -

    +
     	view := matter.CircuitAt("View")
     	for _, valve := range view.SortedNames() {
    -		fmt.Printf("valve name = %v\n", vavle)
    +		fmt.Printf("valve name = %v\n", valve)
     	}
     

    Auxiliary input

    -

    The last argument aux contains user-supplied auxiliary +

    The last argument aux contains user-supplied auxiliary information that can inform the Spark method to specialize -this reflex one way or another. The auxiliary information is specified -by the user when linking the reflex to the runtime, which is explained -furhter below. +this reflex one way or another. The auxiliary information is specified +by the user when linking the reflex to the runtime, which is explained +further below.

    Return residue

    -

    The Spark method can return a value called the +

    The Spark method can return a value called the residue (of materializing this reflex). The residue value can be int, float64, complex128, string, Circuit or Materializer. The latter is a Go type that can materialize reflexes (it is essentially a factory object for reflexes), described in the linking section below. -

    The residue will be made available through the Escher +

    The residue will be made available through the Escher programming environment for further manipulations. @@ -146,12 +146,12 @@ a reflex implementation.

    Fixed valve names

    -

    The first kind are receiver methods named +

    The first kind are receiver methods named CognizeVALVE, where VALVE can be any string (including the empty string), that have the following signature: -

    +
     func (r *Receiver) CognizeVALVE(eye *be.Eye, value interface{}) {
     	…
     }
    @@ -161,26 +161,26 @@ func (r *Receiver) CognizeVALVE(eye *be.Eye, value interface{}) {
     runtime that this reflex type requires the valve named VALVE
     to be connected (when the reflex is materialized as part of a circuit of reflexes).
     
    -

    Furthermore, every event sent to this valve (of this reflex instance) -will result in an invokation of the method CognizeVALVE, wherein the event value -is held by the argument value. The eye object, supplied -for convenience, can be used to send out events to any of the reflex's connected +

    Furthermore, every event sent to this valve (of this reflex instance) +will result in an invocation of the method CognizeVALVE, wherein the event value +is held by the argument value. The eye object, supplied +for convenience, can be used to send out events to any of the reflex's connected valves. -

    We say that that the method CognizeVALVE captures the event. +

    We say that the method CognizeVALVE captures the event.

    Varying valve names

    The second kind are receiver methods with this exact signature: -

    +
     func (r *Receiver) OverCognize(eye *be.Eye, valve Name, value interface{}) {
     	…
     }
     

    If such a method is present, the runtime is informed that the reflex -accepts any number and naming of connected valves. The method +accepts any number and naming of connected valves. The method OverCognize will be invoked whenever an event is received that is not captured by a fixed-name valve method. @@ -196,7 +196,7 @@ for reflexes of a given type.

    Creating the Materializer is accomplished using the function NewMaterializer in package be: -

    +
     func NewMaterializer(receiver Material, aux ...interface{}) Materializer
     
    @@ -207,18 +207,18 @@ will be available from the Escher circuit programming environment.

    To add a materializer for a new reflex type to the Escher index, one uses the method Register in package faculty: -

    +
     func Register(v Materializer, addr ...Name)
     

    The first argument is the materializer for the reflex, obtained from NewMaterializer, and the second argument is the address within the index where the materializer will be placed. -

    Typically the user will implement a package with multiple topically-related reflex receivers, +

    Typically, the user will implement a package with multiple topically-related reflex receivers, and will register their respective materializers with the runtime as a side-effect of importing the package, using an init function: -

    +
     func init() {
     	faculty.Register(be.NewMaterializer(&Receiver{}), "example", "ReflexName")
     }
    @@ -242,13 +242,13 @@ an arbitrary value is first sent to valve Door.
     wherein each passing value is blocked until its transmission is allowed by a “strobe”
     value sent to Door.
     
    -
    +
     package example
     
     import (
    -	"github.com/gocircuit/escher/be"
    -	"github.com/gocircuit/escher/faculty"
    -	. "github.com/gocircuit/escher/circuit"
    +	"github.com/hoijui/escher/be"
    +	"github.com/hoijui/escher/faculty"
    +	. "github.com/hoijui/escher/circuit"
     )
     
     func init() {
    diff --git a/src/handbook/syntax.escher b/src/handbook/syntax.escher
    index f557b15..6a2fe7d 100644
    --- a/src/handbook/syntax.escher
    +++ b/src/handbook/syntax.escher
    @@ -1,9 +1,9 @@
     
     BuildSyntaxPage {
    -        wf *io.WriteFile
    -        wf:Content = *SyntaxPage
    -        wf:Name = "syntax.html"
    -        : = wf:Ready
    +		wf *io.WriteFile
    +		wf:Content = *SyntaxPage
    +		wf:Name = "syntax.html"
    +		: = wf:Ready
     }
     
     SyntaxPage {
    @@ -13,17 +13,25 @@ SyntaxPage {
     	h:Body = t:
     
     	f *e.Fork
    -	f:Backquoted = "`\n\t\t<html>\n\t\t\t<div>abc</div>\n\t\t</html>\n\t`"
    +	f:BackQuoted = "`\n\t\t<html>\n\t\t\t<div>abc</div>\n\t\t</html>\n\t`"
    +	f:CommentsSample = "alpha {		  `// circuit definition`\n\tfloat 1.23 ; `// gate named float with a floating-point value`\n\tbeta {}	; `// gate named beta with an empty circuit value`\n\t`/*\n\t  * We can also do this:\n\t  * A multi-line comment within a circuit definition.\n\t  * Outside the circuit though, no comments are possible.\n\t  */`\n}"
     	f:ImgNand = fig:
    +	f:ImgCircuitParts = FigureCircuitParts:
     
     	fig *FigurePngSvg
     	fig:Image = "NAND"
     	fig:Width = "500px"
     	fig:Caption = `
     		In this illustration, the depicted circuit has three valves at the super gate,
    -		labeled as “X”, “Y” and “” (the empty string). 
    +		labeled as “X”, “Y” and “” (the empty string).
     		The source for this circuit is given later below.`
     
    +	FigureCircuitParts *FigurePngSvg
    +	FigureCircuitParts:Image = "circuit-parts"
    +	FigureCircuitParts:Width = "750px"
    +	FigureCircuitParts:Caption = `
    +		Shows the different parts of a circuit, with each type of a part coded with a diferent color.`
    +
     	t *text.QuickForm
     	t:Data = f:
     	t:Form = `
    @@ -31,36 +39,116 @@ SyntaxPage {
     
     

    At heart Escher is a Go package that parses a simple written syntax into a labeled graph data structure, called a circuit. If you view -XML as a syntax that represents labeled trees, then Escher would be a +XML as a syntax that represents labeled trees, then Escher would be a syntax that represents labeled graphs. +

    Syntax comparison with Java and C++

    + +

    As Escher is a so called Conceptual Programming Language, +it uses concepts that are very different then what you may know +from Object Oriented or Functional Programming, for example. +It therefore also uses very different concepts, parts, and names thereof. +This section tries to clarify those names. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Declarative unitJavaEscherC++
    Basic declaration unitclasscircuitclass
    Logical group of basic declaration unitspackagefacultynamespace
    Variable name + type/interfacevariable name + typegatevariable name + type
    enclosing declataion unit instancethissuper gatethis
    The runtime structure containing all the codeclass-pathindexLD_LIBRARY_PATH
    Unique string identifier of a declaration unitfully-qualified (class-)nameaddressfully-qualified (class-)name
    The underlying runtimeJVMEscher runtimethe OS
    The underlying technologyJVM/C/AssemblerGolangnone/the OS
    +

    Circuits

    -

    A circuit consists of nodes, called gates, which have -a name and a value. Names are strings or integers. Gates have unique names -within a circuit. Values are anything representable by the underlying -technology, which for our implementation means any Go value, equivalently, interface{}. +{{.Gate.ImgCircuitParts}} + +

    A circuit has the strucutre of a graph, +consists of nodes, called gates, +and edges, called links. + +Gates have a name and a value: +

      +
    • + Names are strings or integers. + Gates have unique names within a circuit. +
    • +
    • + Values are anything representable by the underlying technology, + which — for this Escher implementation — means any Go value, + or equivalently interface{}. +
    • +
    -

    Additionally, a circuit has a set of links across pairs of gates. +

    A circuits links go across pairs of gates. A link has two endpoints, called vectors. -Each vector consists of a gate name and a valve -name. Vectors do not overlap in the sense that all vectors with the -same gate name have unique valve names. +Each vector consists of a gate name and a valve name. +Vectors do not overlap, in the sense that all vectors +with the same gate name (within the circuit) have unique valve names.

    Symbolism

    -

    Circuits have a standard visual representation that fully captures -the internal structure of the circuit, which consists of the -gate names and links and excludes the gate values—the external structure. +

    Circuits have a standard visual representation +that fully captures the internal structure of the circuit, +which consists of the gate names and links, +and excludes the gate values — the external structure. -

    To draw a circuit we start with a solid black oval, denoting the circuit's internal name space. -White ovals—contained inside the black one and mutually non-overlapping—denote gates. +

    To draw a circuit, we start with a solid black oval, denoting the circuit's internal name space. +White ovals — contained inside the black one and mutually non-overlapping — denote gates.

    Links are depicted as white lines that connect the outlines of gate ovals. -Link endpoints connecting to the super gate are attached to -the outline of the surrounding black oval. +Link endpoints connecting to the super gate are attached to +the outline of the surrounding black oval. -

    Valve names are written in white within the black oval, next to their +

    Valve names are written in white within the black oval, next to their respective visual connection point. Connection points where valve names are visually missing correspond to empty-string valves. @@ -99,26 +187,27 @@ Type Value designates any Go value.

    Using the Escher parser is very simple, in three steps:

      -
    • Import the parsing package "github.com/gocircuit/escher/see"
    • +
    • Import the parsing packages "github.com/hoijui/escher/a" and "github.com/hoijui/escher/see"
    • Create a parsing object for your source string
    • Repeatedly parse one circuit definition at a time
    -

    The following example illustrates this: +

    The following Go example illustrates this:

     package main
     
     import (
     	"fmt"
    -	"github.com/gocircuit/escher/see"
    +	"github.com/hoijui/escher/a"
    +	"github.com/hoijui/escher/see"
     )
     
     func main() {
    -	src = "alpha { a 123; b 3.14; a: = b:}\n beta { 1, 2, 3, \"abc\" }"
    -	p := see.NewSrcString(src) // create a parsing object
    +	src := "alpha { a 123; b 3.14; a: = b:}\n beta { 1, 2, 3, \"abc\" }"
    +	p := a.NewSrcString(src) // create a parsing object
     	for {
    -		n, v := see.See(p) // parse one circuit at a time
    +		n, v := see.SeePeer(p) // parse one circuit at a time
     		if v == nil {
     			break
     		}
    @@ -147,33 +236,49 @@ or semi-colons.
     
     

    Comments

    -

    Go-style end-of-line comments are allowed everywhere. +

    We use a trick: +We use syntactic sugared (empty string named), string valued gates, +and — purely to visually indicate a comment — +we use "//" in the beginning, or "/*" plus "*/" at the end.

    -alpha {            // circuit definition
    -	float 1.23 // gate named float with a floating-point value
    -	beta {}    // gate named beta with an empty circuit value
    -}
    +{{ .Gate.CommentsSample }}
     

    Gates

    Gate statements begin on a new line with a gate name identifier, space, and a gate value expression. There are six value types that can be expressed: -

      -
    • Integers -
    • Floating-point numbers -
    • Complex numbers -
    • Strings -
    • Directives -
    • Circuits -
    - -

    The first four correspond to the Go types int, float64, complex128 -and string and are expressed using the same syntax. -Addresses have a dedicated Go type Address. They represent a sequence of names and are -written as dot-separated fully-qualified names. Finally, circuits—whose dedicated Go type is Circuit— -can be values of gates as well. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeRepresents
    Integernative Go type int
    Floating-point numbernative Go type float64
    Complex numbernative Go type complex128
    Stringnative Go type string
    DirectiveEscher internal Go type Address, representing a sequence of names, written as dot-separated, fully-qualified names
    CircuitEscher internal Go type Circuit

    For instance, @@ -185,7 +290,7 @@ alpha { floating 3.14 complex (1-3i) quoted "abcd\n\tefgh" - backquoted {{ .Gate.Backquoted }} + backQuoted {{ .Gate.BackQuoted }} }

    @@ -203,8 +308,8 @@ alpha {

    Series

    Gate names can be omitted in circuit definitions, in which case gates are -assigned consequtive integral names, starting from zero. We call the resulting -circuits series. +assigned consecutive integral names, starting from zero. We call the resulting +circuits series.

     alpha {
    @@ -214,7 +319,7 @@ alpha {
     	3.14
     	(1-3i)
     	"abcd\n\tefgh"
    -	{{ .Gate.Backquoted }}
    +	{{ .Gate.BackQuoted }}
     	{
     		A 1
     		B "C"
    @@ -222,6 +327,24 @@ alpha {
     }
     
    +which is equivalent to: + +
    +alpha {
    +	0 *fully.qualified.Name
    +	1 @fully.qualified.Name
    +	2 123
    +	3 3.14
    +	4 (1-3i)
    +	5 "abcd\n\tefgh"
    +	6 {{ .Gate.BackQuoted }}
    +	7 {
    +		A 1
    +		B "C"
    +	}
    +}
    +
    +

    Circuit links are semantically symmetric. A link is a pair of two @@ -230,10 +353,13 @@ vectors, and a vector consists of a gate name and a valve name.

    Vectors are written as the gate name, followed by : (the colon sign), followed by the valve name. Links are written as a vector, followed by optional whitespace, followed by = (the equals sign), followed by another optional whitespace and -the second vector. For instance, +the second vector. Valid examples:

     	and:XAndY = not:X
    +	and:XAndY= not:X
    +	and:XAndY =not:X
    +	and:XAndY=not:X
     

    A few idioms are commonly useful: @@ -242,13 +368,13 @@ the second vector. For instance,

  • Gate names can be the empty string. The empty-string gate is called the super gate. While one cannot assign a value to it through syntax, it is possible to connect links to it. -

    The super gate has a distinguished role in some contexts. -For instance, when materializing circuits, +

    The super gate has a distinguished role in some contexts. +For instance, when materializing circuits, the links connected to the super gate are exposed to the higher-level “super” circuit.

  • Valve names can be the empty string. We call such valves default, as -they are commonly refered to hereinafter in various idioms. +they are commonly referred to hereinafter in various idioms. -

    For instance, it is a common pattern to name the output valve of +

    For instance, it is a common pattern to name the output valve of materializable circuits after the empty string. The default valve of the super gate, on the other hand, is a way of taking advantage of Escher's syntactic sugar rule.

@@ -269,8 +395,8 @@ Nand {

Syntactic sugar

-

When circuits are used to represent programs—in other words, -executable code—it is common to include a gate and then link to its default valve. +

When circuits are used to represent programs — in other words, +executable code — it is common to include a gate and then link to its default valve. To reduce verbosity in this case, link definitions support a piece of syntactic sugar.

Either (or both) vectors in a link definition can be substituted for a gate value. @@ -288,7 +414,7 @@ Will be expanded into sum:Summand = 0:

-

In another example both sides of the equation are sugared: +

In this example, both sides of the equation are sugared:

 	*os.Scanln = *os.Println
@@ -302,5 +428,5 @@ This will expand to:
 	0: = 1:
 
- ` + ` } diff --git a/src/handbook/util.escher b/src/handbook/util.escher index d27cf15..e46a44e 100644 --- a/src/handbook/util.escher +++ b/src/handbook/util.escher @@ -42,7 +42,7 @@ FigurePngSvg { fig *Figure fig:Caption = :Caption - ps *PngSvg + ps *PngSvg ps:Name = :Image ps:Width = :Width fig:Body = ps: diff --git a/src/http/http.escher b/src/http/Harness.escher similarity index 100% rename from src/http/http.escher rename to src/http/Harness.escher diff --git a/src/http/faculty.escher b/src/http/faculty.escher new file mode 100644 index 0000000..7684fc4 --- /dev/null +++ b/src/http/faculty.escher @@ -0,0 +1,7 @@ + +faculty { + doc `/** + HTTP server examples. + */` +} + diff --git a/src/os/exit.escher b/src/os/ExitSuccess.escher similarity index 100% rename from src/os/exit.escher rename to src/os/ExitSuccess.escher diff --git a/src/os/proc.escher b/src/os/ProcessIgnoreIO.escher similarity index 69% rename from src/os/proc.escher rename to src/os/ProcessIgnoreIO.escher index 8decdb0..5edfab8 100644 --- a/src/os/proc.escher +++ b/src/os/ProcessIgnoreIO.escher @@ -5,6 +5,7 @@ see history clearly.` ProcessIgnoreIO { *e.Ignore = `ProcessIgnoreIO starts an OS process and ignores/discards its standard streams.` + proc *os.Process :When = proc:When :Exit = proc:Exit @@ -19,17 +20,3 @@ ProcessIgnoreIO { yio:Stderr = *io.Clunk } -ProcessRun { - proc *os.Process - :When = proc:When - :Exit = proc:Exit - :Command = proc:Command - - yio *e.Fork - yio: = proc:IO - - yio:When = *e.Ignore - yio:Stdin = *io.Clunk - yio:Stdout = *os.Stdout - yio:Stderr = *os.Stderr -} diff --git a/src/os/ProcessRun.escher b/src/os/ProcessRun.escher new file mode 100644 index 0000000..f2d55be --- /dev/null +++ b/src/os/ProcessRun.escher @@ -0,0 +1,19 @@ +LICENSE `Written in 2014 by Petar Maymounkov. +It helps future understanding of past knowledge to save +this notice, so peers of other times and backgrounds can +see history clearly.` + +ProcessRun { + proc *os.Process + :When = proc:When + :Exit = proc:Exit + :Command = proc:Command + + yio *e.Fork + yio: = proc:IO + + yio:When = *e.Ignore + yio:Stdin = *io.Clunk + yio:Stdout = *os.Stdout + yio:Stderr = *os.Stderr +} diff --git a/src/os/faculty.escher b/src/os/faculty.escher new file mode 100644 index 0000000..db5092e --- /dev/null +++ b/src/os/faculty.escher @@ -0,0 +1,7 @@ + +faculty { + doc `/** + Operating-System related circuits. + */` +} + diff --git a/src/test/testing.escher b/src/test/All.escher similarity index 67% rename from src/test/testing.escher rename to src/test/All.escher index 9a758e6..3ce33b9 100644 --- a/src/test/testing.escher +++ b/src/test/All.escher @@ -3,20 +3,6 @@ It helps future understanding of past knowledge to save this notice, so peers of other times and backgrounds can see history clearly.` -MatchOneAndExit { - *e.Ignore = `MatchOneAndExit matches the singular values incoming - on :Expected and :Got and then exits successfully (with exit code 0).` - - match *test.Match - match:Expected = :Expected - match:Got = :Got - match: = door:Door - - door *e.OneWayDoor - door:From = 0 - door:To = *os.Exit -} - All { *e.Ignore = `All runs all test circuits within the default index (provided by the -src command-line option). Test circuits are named Test*, wherein * must be non-empty and must begin with an upper case letter. @@ -33,3 +19,4 @@ All { exec *test.Exec exec:Out = Ignore } + diff --git a/src/test/MatchOneAndExit.escher b/src/test/MatchOneAndExit.escher new file mode 100644 index 0000000..15e68d6 --- /dev/null +++ b/src/test/MatchOneAndExit.escher @@ -0,0 +1,19 @@ +LICENSE `Written in 2014 by Petar Maymounkov. +It helps future understanding of past knowledge to save +this notice, so peers of other times and backgrounds can +see history clearly.` + +MatchOneAndExit { + *e.Ignore = `MatchOneAndExit matches the singular values incoming + on :Expected and :Got and then exits successfully (with exit code 0).` + + match *test.Match + match:Expected = :Expected + match:Got = :Got + match: = door:Door + + door *e.OneWayDoor + door:From = 0 + door:To = *os.Exit +} + diff --git a/src/test/faculty.escher b/src/test/faculty.escher new file mode 100644 index 0000000..dec6ebc --- /dev/null +++ b/src/test/faculty.escher @@ -0,0 +1,7 @@ + +faculty { + doc `/** + Unit-Tests Helper functions. + */` +} + diff --git a/src/tutorial/Break.escher b/src/tutorial/Break.escher new file mode 100644 index 0000000..7560a55 --- /dev/null +++ b/src/tutorial/Break.escher @@ -0,0 +1,10 @@ +// MEANT_TO_FAIL + +Break { + *e.Breakpoint = 1 +} + +BreakMain { + *Break +} + diff --git a/src/tutorial/break.escher b/src/tutorial/Debug.escher similarity index 78% rename from src/tutorial/break.escher rename to src/tutorial/Debug.escher index 4cf08bd..393af88 100644 --- a/src/tutorial/break.escher +++ b/src/tutorial/Debug.escher @@ -1,6 +1,4 @@ -Break { - *e.Breakpoint = 1 -} +// MEANT_TO_FAIL Debug { *e.Show = "Parent circuit" @@ -12,3 +10,8 @@ Debug { *e.Breakpoint = 1 } } + +DebugMain { + *Debug +} + diff --git a/src/tutorial/Exec.escher b/src/tutorial/Exec.escher new file mode 100644 index 0000000..2f70f4d --- /dev/null +++ b/src/tutorial/Exec.escher @@ -0,0 +1,31 @@ +Exec { + `/** Runs "ls /" in a sub-process */` + + proc *os.Process + proc:Command = { + Path "/bin/ls" + Args { "/" } + } + + yio *e.Fork + proc:IO = yio: + + yio:Stdin = *e.Ignore + yio:Stdout = *os.Stdout + yio:Stderr = *os.Stderr + + yExit *e.Fork + proc:Exit = yExit: + + yExit:Exit = : +} + +ExecMain { + `// This is required for the program to exit` + exit *e.Fork + exit: = *os.Exit + + mainCircuit *Exec + exit:1 = mainCircuit: +} + diff --git a/src/tutorial/File.escher b/src/tutorial/File.escher new file mode 100644 index 0000000..60af3df --- /dev/null +++ b/src/tutorial/File.escher @@ -0,0 +1,23 @@ +File { + `/** Print on standard output the source file located at the given index address. */` + + *os.Stdout = { + "" "*" + "tutorial" + "data.txt" + } +} + +FileMain { + `// This is required for the program to exit` + `// We simply run it for 100ms before exiting` + exit *e.Fork + exit: = *os.Exit + + tkr *time.Ticker + tkr:Duration = 1e8 + exit:1 = tkr: + + mainCircuit *File +} + diff --git a/src/tutorial/HelloWorld.escher b/src/tutorial/HelloWorld.escher new file mode 100644 index 0000000..54a9639 --- /dev/null +++ b/src/tutorial/HelloWorld.escher @@ -0,0 +1,28 @@ +HelloWorld { + `/** Prints three lines onto stdout. */` + + exit *e.Fork + : = exit: + + x *e.Show + x:Alice = "¡Hello, world!" + exit:x = x: + + *e.Show = "How do you do?" + exit:0 = 0: + + y *e.Show + y:Bob = answer: + answer `Tippy toppy, thank you.` + exit:y = y: +} + +HelloWorldMain { + `// This is required for the program to exit` + exit *e.Fork + exit: = *os.Exit + + mainCircuit *HelloWorld + exit:1 = mainCircuit: +} + diff --git a/src/tutorial/ShowIndex.escher b/src/tutorial/ShowIndex.escher new file mode 100644 index 0000000..a5f3e0c --- /dev/null +++ b/src/tutorial/ShowIndex.escher @@ -0,0 +1,22 @@ + +ShowIndex { + `/** Prints the whole index for debugging */` + + z *e.Show ; `// exemplary EOL doc comment` + z:Index = *e.Index + : = z: + + `/* ... and here: + * a multi-line one. + */` +} + +ShowIndexMain { + `// This is required for the program to exit` + exit *e.Fork + exit: = *os.Exit + + mainCircuit *ShowIndex + exit:1 = mainCircuit: +} + diff --git a/src/tutorial/TextMerge.escher b/src/tutorial/TextMerge.escher new file mode 100644 index 0000000..0bcdc82 --- /dev/null +++ b/src/tutorial/TextMerge.escher @@ -0,0 +1,33 @@ +TextMerge { + `/** Merges multiple strings into one, and prints it onto stdout. */` + + h *tutorial.header + h:Title = "Hello, world!" + show *e.Show + show:header = h: + + : = show: +} + +TextMergeMain { + `// This is required for the program to exit` + exit *e.Fork + exit: = *os.Exit + + mainCircuit *TextMerge + exit:1 = mainCircuit: +} + +header { + f *e.Fork + f:X = ` + +` + f:Y = :Title + f:Z = ` + +` + m *text.Merge + m:In = f: + : = m:Out +} diff --git a/src/tutorial/Ticker.escher b/src/tutorial/Ticker.escher new file mode 100644 index 0000000..9b3bf5d --- /dev/null +++ b/src/tutorial/Ticker.escher @@ -0,0 +1,31 @@ +Ticker { + `/** + * Starts at 1e9, and reduces it by approximately 1e9 every second, + * showing the result each time. + */` + + tkr *time.Ticker + `// Create a tick every second (1e9 nano-seconds)` + tkr:Duration = 1e9 + `// This outputs roughtly (num-ticks * tkr:Duration)` + tkr: = sum:Sum + + sum *math.IntSum + sum:X = 1000000000 + `// Output sum:Y so that (sum:X + sum:Y = sum:Sum)` + sum:Y = *e.Show +} + +TickerMain { + `// This is required for the program to exit` + `// We simply run it for 10s before exiting` + exit *e.Fork + exit: = *os.Exit + + tkr *time.Ticker + tkr:Duration = 1e10 + exit:1 = tkr: + + mainCircuit *Ticker +} + diff --git a/src/tutorial/exec.escher b/src/tutorial/exec.escher deleted file mode 100644 index ef6dce9..0000000 --- a/src/tutorial/exec.escher +++ /dev/null @@ -1,20 +0,0 @@ -Exec { - proc *os.Process - proc:Command = { - Path "/bin/ls" - Args { "/" } - } - - yio *e.Fork - proc:IO = yio: - - yio:Stdin = *e.Ignore - yio:Stdout = *os.Stdout - yio:Stderr = *os.Stderr - - yexit *e.Fork - proc:Exit = yexit: - - exit *os.Exit - yexit:Exit = exit: -} diff --git a/src/tutorial/faculty.escher b/src/tutorial/faculty.escher new file mode 100644 index 0000000..82d98ed --- /dev/null +++ b/src/tutorial/faculty.escher @@ -0,0 +1,12 @@ + +faculty { + doc `/** + This package contains basic Escher tutorials. + Some of them are referenced in the handbook. + All of them can be executed in series by running + $ESCHER/scripts/tutorials.sh. + To run the tutorials manually, you may run a command like this: +
escher -src "$GOPATH/src/github.com/hoijui/escher/src/" "*tutorial.HelloWorld"
+ */` +} + diff --git a/src/tutorial/fs.escher b/src/tutorial/fs.escher deleted file mode 100644 index 02b6a50..0000000 --- a/src/tutorial/fs.escher +++ /dev/null @@ -1,8 +0,0 @@ -File { - `Print on standard output the source file located at the given index address.` - *os.Stdout = { - "" "*" - "tutorial" - "data.txt" - } -} diff --git a/src/tutorial/helloworld.escher b/src/tutorial/helloworld.escher deleted file mode 100644 index e0eb939..0000000 --- a/src/tutorial/helloworld.escher +++ /dev/null @@ -1,10 +0,0 @@ -HelloWorld { - x *e.Show - x:Alice = "¡Hello, world!" - - *e.Show = "How do you do?" - - y *e.Show - y:Bob = answer: - answer `Tippy toppy, thank you.` -} diff --git a/src/tutorial/text.escher b/src/tutorial/text.escher deleted file mode 100644 index ef0f596..0000000 --- a/src/tutorial/text.escher +++ /dev/null @@ -1,20 +0,0 @@ -TextMerge { - h *tutorial.header - h:Title = "Hello, world!" - show *e.Show - show: = h: -} - -header { - f *e.Fork - f:X = ` - -` - f:Y = :Title - f:Z = ` - -` - m *text.Merge - m:In = f: - : = m:Out -} diff --git a/src/tutorial/ticker.escher b/src/tutorial/ticker.escher deleted file mode 100644 index f8bbe7e..0000000 --- a/src/tutorial/ticker.escher +++ /dev/null @@ -1,9 +0,0 @@ -Ticker { - tkr *time.Ticker - tkr:Duration = 1e9 - tkr: = sum:Sum - - sum *math.IntSum - sum:X = 1000000000 - sum:Y = *e.Show -} diff --git a/src/yield/TestFlows.escher b/src/yield/TestFlows.escher new file mode 100644 index 0000000..1cd1c01 --- /dev/null +++ b/src/yield/TestFlows.escher @@ -0,0 +1,15 @@ + +TestFlows { + y *yield.Flows + y: = { + and And + not Not + and:X = :X + and:Y = :Y + and: = not:A + not: = : + } + y:End = *e.Ignore + y:Frame = *e.Ignore +} + diff --git a/src/yield/yield.escher b/src/yield/TestValues.escher similarity index 82% rename from src/yield/yield.escher rename to src/yield/TestValues.escher index 1b06746..0a66ff9 100644 --- a/src/yield/yield.escher +++ b/src/yield/TestValues.escher @@ -42,16 +42,3 @@ testValues { t:End = :End } -TestFlows { - y *yield.Flows - y: = { - and And - not Not - and:X = :X - and:Y = :Y - and: = not:A - not: = : - } - y:End = *e.Ignore - y:Frame = *e.Ignore -} diff --git a/src/yield/faculty.escher b/src/yield/faculty.escher new file mode 100644 index 0000000..3cb7869 --- /dev/null +++ b/src/yield/faculty.escher @@ -0,0 +1,7 @@ + +faculty { + doc `/** + Unit tests for the yield faculty. + */` +} +