Skip to content

Commit ec5cf8f

Browse files
committed
diff: import from Go's internal/diff
From Go tip as of March 21st 2023, at commit 5f1a0320b92a60ee1283522135e00bff540ea115. The only change is to replace the internal/txtar dependency with our own txtar package. It seems like upstream has its own tiny copy of x/tools/txtar, presumably so that even low level packages can use txtar in tests. Fixes #157.
1 parent f0583b8 commit ec5cf8f

File tree

14 files changed

+630
-0
lines changed

14 files changed

+630
-0
lines changed

diff/diff.go

Lines changed: 261 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,261 @@
1+
// Copyright 2022 The Go Authors. All rights reserved.
2+
// Use of this source code is governed by a BSD-style
3+
// license that can be found in the LICENSE file.
4+
5+
package diff
6+
7+
import (
8+
"bytes"
9+
"fmt"
10+
"sort"
11+
"strings"
12+
)
13+
14+
// A pair is a pair of values tracked for both the x and y side of a diff.
15+
// It is typically a pair of line indexes.
16+
type pair struct{ x, y int }
17+
18+
// Diff returns an anchored diff of the two texts old and new
19+
// in the “unified diff” format. If old and new are identical,
20+
// Diff returns a nil slice (no output).
21+
//
22+
// Unix diff implementations typically look for a diff with
23+
// the smallest number of lines inserted and removed,
24+
// which can in the worst case take time quadratic in the
25+
// number of lines in the texts. As a result, many implementations
26+
// either can be made to run for a long time or cut off the search
27+
// after a predetermined amount of work.
28+
//
29+
// In contrast, this implementation looks for a diff with the
30+
// smallest number of “unique” lines inserted and removed,
31+
// where unique means a line that appears just once in both old and new.
32+
// We call this an “anchored diff” because the unique lines anchor
33+
// the chosen matching regions. An anchored diff is usually clearer
34+
// than a standard diff, because the algorithm does not try to
35+
// reuse unrelated blank lines or closing braces.
36+
// The algorithm also guarantees to run in O(n log n) time
37+
// instead of the standard O(n²) time.
38+
//
39+
// Some systems call this approach a “patience diff,” named for
40+
// the “patience sorting” algorithm, itself named for a solitaire card game.
41+
// We avoid that name for two reasons. First, the name has been used
42+
// for a few different variants of the algorithm, so it is imprecise.
43+
// Second, the name is frequently interpreted as meaning that you have
44+
// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm,
45+
// when in fact the algorithm is faster than the standard one.
46+
func Diff(oldName string, old []byte, newName string, new []byte) []byte {
47+
if bytes.Equal(old, new) {
48+
return nil
49+
}
50+
x := lines(old)
51+
y := lines(new)
52+
53+
// Print diff header.
54+
var out bytes.Buffer
55+
fmt.Fprintf(&out, "diff %s %s\n", oldName, newName)
56+
fmt.Fprintf(&out, "--- %s\n", oldName)
57+
fmt.Fprintf(&out, "+++ %s\n", newName)
58+
59+
// Loop over matches to consider,
60+
// expanding each match to include surrounding lines,
61+
// and then printing diff chunks.
62+
// To avoid setup/teardown cases outside the loop,
63+
// tgs returns a leading {0,0} and trailing {len(x), len(y)} pair
64+
// in the sequence of matches.
65+
var (
66+
done pair // printed up to x[:done.x] and y[:done.y]
67+
chunk pair // start lines of current chunk
68+
count pair // number of lines from each side in current chunk
69+
ctext []string // lines for current chunk
70+
)
71+
for _, m := range tgs(x, y) {
72+
if m.x < done.x {
73+
// Already handled scanning forward from earlier match.
74+
continue
75+
}
76+
77+
// Expand matching lines as far possible,
78+
// establishing that x[start.x:end.x] == y[start.y:end.y].
79+
// Note that on the first (or last) iteration we may (or definitey do)
80+
// have an empty match: start.x==end.x and start.y==end.y.
81+
start := m
82+
for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] {
83+
start.x--
84+
start.y--
85+
}
86+
end := m
87+
for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] {
88+
end.x++
89+
end.y++
90+
}
91+
92+
// Emit the mismatched lines before start into this chunk.
93+
// (No effect on first sentinel iteration, when start = {0,0}.)
94+
for _, s := range x[done.x:start.x] {
95+
ctext = append(ctext, "-"+s)
96+
count.x++
97+
}
98+
for _, s := range y[done.y:start.y] {
99+
ctext = append(ctext, "+"+s)
100+
count.y++
101+
}
102+
103+
// If we're not at EOF and have too few common lines,
104+
// the chunk includes all the common lines and continues.
105+
const C = 3 // number of context lines
106+
if (end.x < len(x) || end.y < len(y)) &&
107+
(end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) {
108+
for _, s := range x[start.x:end.x] {
109+
ctext = append(ctext, " "+s)
110+
count.x++
111+
count.y++
112+
}
113+
done = end
114+
continue
115+
}
116+
117+
// End chunk with common lines for context.
118+
if len(ctext) > 0 {
119+
n := end.x - start.x
120+
if n > C {
121+
n = C
122+
}
123+
for _, s := range x[start.x : start.x+n] {
124+
ctext = append(ctext, " "+s)
125+
count.x++
126+
count.y++
127+
}
128+
done = pair{start.x + n, start.y + n}
129+
130+
// Format and emit chunk.
131+
// Convert line numbers to 1-indexed.
132+
// Special case: empty file shows up as 0,0 not 1,0.
133+
if count.x > 0 {
134+
chunk.x++
135+
}
136+
if count.y > 0 {
137+
chunk.y++
138+
}
139+
fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y)
140+
for _, s := range ctext {
141+
out.WriteString(s)
142+
}
143+
count.x = 0
144+
count.y = 0
145+
ctext = ctext[:0]
146+
}
147+
148+
// If we reached EOF, we're done.
149+
if end.x >= len(x) && end.y >= len(y) {
150+
break
151+
}
152+
153+
// Otherwise start a new chunk.
154+
chunk = pair{end.x - C, end.y - C}
155+
for _, s := range x[chunk.x:end.x] {
156+
ctext = append(ctext, " "+s)
157+
count.x++
158+
count.y++
159+
}
160+
done = end
161+
}
162+
163+
return out.Bytes()
164+
}
165+
166+
// lines returns the lines in the file x, including newlines.
167+
// If the file does not end in a newline, one is supplied
168+
// along with a warning about the missing newline.
169+
func lines(x []byte) []string {
170+
l := strings.SplitAfter(string(x), "\n")
171+
if l[len(l)-1] == "" {
172+
l = l[:len(l)-1]
173+
} else {
174+
// Treat last line as having a message about the missing newline attached,
175+
// using the same text as BSD/GNU diff (including the leading backslash).
176+
l[len(l)-1] += "\n\\ No newline at end of file\n"
177+
}
178+
return l
179+
}
180+
181+
// tgs returns the pairs of indexes of the longest common subsequence
182+
// of unique lines in x and y, where a unique line is one that appears
183+
// once in x and once in y.
184+
//
185+
// The longest common subsequence algorithm is as described in
186+
// Thomas G. Szymanski, “A Special Case of the Maximal Common
187+
// Subsequence Problem,” Princeton TR #170 (January 1975),
188+
// available at https://research.swtch.com/tgs170.pdf.
189+
func tgs(x, y []string) []pair {
190+
// Count the number of times each string appears in a and b.
191+
// We only care about 0, 1, many, counted as 0, -1, -2
192+
// for the x side and 0, -4, -8 for the y side.
193+
// Using negative numbers now lets us distinguish positive line numbers later.
194+
m := make(map[string]int)
195+
for _, s := range x {
196+
if c := m[s]; c > -2 {
197+
m[s] = c - 1
198+
}
199+
}
200+
for _, s := range y {
201+
if c := m[s]; c > -8 {
202+
m[s] = c - 4
203+
}
204+
}
205+
206+
// Now unique strings can be identified by m[s] = -1+-4.
207+
//
208+
// Gather the indexes of those strings in x and y, building:
209+
// xi[i] = increasing indexes of unique strings in x.
210+
// yi[i] = increasing indexes of unique strings in y.
211+
// inv[i] = index j such that x[xi[i]] = y[yi[j]].
212+
var xi, yi, inv []int
213+
for i, s := range y {
214+
if m[s] == -1+-4 {
215+
m[s] = len(yi)
216+
yi = append(yi, i)
217+
}
218+
}
219+
for i, s := range x {
220+
if j, ok := m[s]; ok && j >= 0 {
221+
xi = append(xi, i)
222+
inv = append(inv, j)
223+
}
224+
}
225+
226+
// Apply Algorithm A from Szymanski's paper.
227+
// In those terms, A = J = inv and B = [0, n).
228+
// We add sentinel pairs {0,0}, and {len(x),len(y)}
229+
// to the returned sequence, to help the processing loop.
230+
J := inv
231+
n := len(xi)
232+
T := make([]int, n)
233+
L := make([]int, n)
234+
for i := range T {
235+
T[i] = n + 1
236+
}
237+
for i := 0; i < n; i++ {
238+
k := sort.Search(n, func(k int) bool {
239+
return T[k] >= J[i]
240+
})
241+
T[k] = J[i]
242+
L[i] = k + 1
243+
}
244+
k := 0
245+
for _, v := range L {
246+
if k < v {
247+
k = v
248+
}
249+
}
250+
seq := make([]pair, 2+k)
251+
seq[1+k] = pair{len(x), len(y)} // sentinel at end
252+
lastj := n
253+
for i := n - 1; i >= 0; i-- {
254+
if L[i] == k && J[i] < lastj {
255+
seq[k] = pair{xi[i], yi[J[i]]}
256+
k--
257+
}
258+
}
259+
seq[0] = pair{0, 0} // sentinel at start
260+
return seq
261+
}

diff/diff_test.go

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
// Copyright 2022 The Go Authors. All rights reserved.
2+
// Use of this source code is governed by a BSD-style
3+
// license that can be found in the LICENSE file.
4+
5+
package diff
6+
7+
import (
8+
"bytes"
9+
"path/filepath"
10+
"testing"
11+
12+
"github.com/rogpeppe/go-internal/txtar"
13+
)
14+
15+
func clean(text []byte) []byte {
16+
text = bytes.ReplaceAll(text, []byte("$\n"), []byte("\n"))
17+
text = bytes.TrimSuffix(text, []byte("^D\n"))
18+
return text
19+
}
20+
21+
func Test(t *testing.T) {
22+
files, _ := filepath.Glob("testdata/*.txt")
23+
if len(files) == 0 {
24+
t.Fatalf("no testdata")
25+
}
26+
27+
for _, file := range files {
28+
t.Run(filepath.Base(file), func(t *testing.T) {
29+
a, err := txtar.ParseFile(file)
30+
if err != nil {
31+
t.Fatal(err)
32+
}
33+
if len(a.Files) != 3 || a.Files[2].Name != "diff" {
34+
t.Fatalf("%s: want three files, third named \"diff\"", file)
35+
}
36+
diffs := Diff(a.Files[0].Name, clean(a.Files[0].Data), a.Files[1].Name, clean(a.Files[1].Data))
37+
want := clean(a.Files[2].Data)
38+
if !bytes.Equal(diffs, want) {
39+
t.Fatalf("%s: have:\n%s\nwant:\n%s\n%s", file,
40+
diffs, want, Diff("have", diffs, "want", want))
41+
}
42+
})
43+
}
44+
}

diff/testdata/allnew.txt

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
-- old --
2+
-- new --
3+
a
4+
b
5+
c
6+
-- diff --
7+
diff old new
8+
--- old
9+
+++ new
10+
@@ -0,0 +1,3 @@
11+
+a
12+
+b
13+
+c

diff/testdata/allold.txt

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
-- old --
2+
a
3+
b
4+
c
5+
-- new --
6+
-- diff --
7+
diff old new
8+
--- old
9+
+++ new
10+
@@ -1,3 +0,0 @@
11+
-a
12+
-b
13+
-c

diff/testdata/basic.txt

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
Example from Hunt and McIlroy, “An Algorithm for Differential File Comparison.”
2+
https://www.cs.dartmouth.edu/~doug/diff.pdf
3+
4+
-- old --
5+
a
6+
b
7+
c
8+
d
9+
e
10+
f
11+
g
12+
-- new --
13+
w
14+
a
15+
b
16+
x
17+
y
18+
z
19+
e
20+
-- diff --
21+
diff old new
22+
--- old
23+
+++ new
24+
@@ -1,7 +1,7 @@
25+
+w
26+
a
27+
b
28+
-c
29+
-d
30+
+x
31+
+y
32+
+z
33+
e
34+
-f
35+
-g

0 commit comments

Comments
 (0)