diff --git a/api/next/49097.txt b/api/next/49097.txt
new file mode 100644
index 00000000000000..f7240954c66228
--- /dev/null
+++ b/api/next/49097.txt
@@ -0,0 +1,4 @@
+pkg net, method (*Dialer) DialIP(context.Context, string, netip.Addr, netip.Addr) (*IPConn, error) #49097
+pkg net, method (*Dialer) DialTCP(context.Context, string, netip.AddrPort, netip.AddrPort) (*TCPConn, error) #49097
+pkg net, method (*Dialer) DialUDP(context.Context, string, netip.AddrPort, netip.AddrPort) (*UDPConn, error) #49097
+pkg net, method (*Dialer) DialUnix(context.Context, string, *UnixAddr, *UnixAddr) (*UnixConn, error) #49097
diff --git a/api/next/67546.txt b/api/next/67546.txt
new file mode 100644
index 00000000000000..0b5b4b981c19a6
--- /dev/null
+++ b/api/next/67546.txt
@@ -0,0 +1,5 @@
+pkg database/sql/driver, type RowsColumnScanner interface { Close, Columns, Next, ScanColumn } #67546
+pkg database/sql/driver, type RowsColumnScanner interface, Close() error #67546
+pkg database/sql/driver, type RowsColumnScanner interface, Columns() []string #67546
+pkg database/sql/driver, type RowsColumnScanner interface, Next([]Value) error #67546
+pkg database/sql/driver, type RowsColumnScanner interface, ScanColumn(interface{}, int) error #67546
diff --git a/doc/go_mem.html b/doc/go_mem.html
index 7adc34828a739a..8db7d65e7342a8 100644
--- a/doc/go_mem.html
+++ b/doc/go_mem.html
@@ -231,7 +231,7 @@
Implementation Restrictions for Programs Containing Data R
A read of an array, struct, or complex number
-may by implemented as a read of each individual sub-value
+may be implemented as a read of each individual sub-value
(array element, struct field, or real/imaginary component),
in any order.
Similarly, a write of an array, struct, or complex number
diff --git a/doc/next/5-toolchain.md b/doc/next/5-toolchain.md
index 0f4a816479754c..cc32f30a521a6d 100644
--- a/doc/next/5-toolchain.md
+++ b/doc/next/5-toolchain.md
@@ -4,4 +4,9 @@
## Linker {#linker}
+## Bootstrap {#bootstrap}
+
+As mentioned in the [Go 1.24 release notes](/doc/go1.24#bootstrap), Go 1.26 now requires
+Go 1.24.6 or later for bootstrap.
+We expect that Go 1.28 will require a minor release of Go 1.26 or later for bootstrap.
diff --git a/doc/next/6-stdlib/99-minor/database/sql/driver/67546.md b/doc/next/6-stdlib/99-minor/database/sql/driver/67546.md
new file mode 100644
index 00000000000000..8cb9089583a89d
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/database/sql/driver/67546.md
@@ -0,0 +1 @@
+A database driver may implement [RowsColumnScanner] to entirely override `Scan` behavior.
diff --git a/doc/next/6-stdlib/99-minor/net/49097.md b/doc/next/6-stdlib/99-minor/net/49097.md
new file mode 100644
index 00000000000000..bb7947b0a11377
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/net/49097.md
@@ -0,0 +1 @@
+Added context aware dial functions for TCP, UDP, IP and Unix networks.
diff --git a/doc/next/7-ports.md b/doc/next/7-ports.md
index 8bea3f8fbc33f9..07445454227bcb 100644
--- a/doc/next/7-ports.md
+++ b/doc/next/7-ports.md
@@ -1,2 +1,6 @@
## Ports {#ports}
+### Windows
+
+
+As [announced](/doc/go1.25#windows) in the Go 1.25 release notes, the [broken](/doc/go1.24#windows) 32-bit windows/arm port (`GOOS=windows` `GOARCH=arm`) is removed.
diff --git a/src/cmd/asm/internal/arch/loong64.go b/src/cmd/asm/internal/arch/loong64.go
index bf5175f4a0bad6..21263d34331513 100644
--- a/src/cmd/asm/internal/arch/loong64.go
+++ b/src/cmd/asm/internal/arch/loong64.go
@@ -23,18 +23,6 @@ func jumpLoong64(word string) bool {
return false
}
-// IsLoong64MUL reports whether the op (as defined by an loong64.A* constant) is
-// one of the MUL/DIV/REM instructions that require special handling.
-func IsLoong64MUL(op obj.As) bool {
- switch op {
- case loong64.AMUL, loong64.AMULU, loong64.AMULV, loong64.AMULVU,
- loong64.ADIV, loong64.ADIVU, loong64.ADIVV, loong64.ADIVVU,
- loong64.AREM, loong64.AREMU, loong64.AREMV, loong64.AREMVU:
- return true
- }
- return false
-}
-
// IsLoong64RDTIME reports whether the op (as defined by an loong64.A*
// constant) is one of the RDTIMELW/RDTIMEHW/RDTIMED instructions that
// require special handling.
diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go
index 9a6e22cab2c282..6bdbcb9c1b7f0e 100644
--- a/src/cmd/asm/internal/asm/asm.go
+++ b/src/cmd/asm/internal/asm/asm.go
@@ -974,14 +974,6 @@ func (p *Parser) getConstant(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
return addr.Offset
}
-// getImmediate checks that addr represents an immediate constant and returns its value.
-func (p *Parser) getImmediate(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
- if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
- p.errorf("%s: expected immediate constant; found %s", op, obj.Dconv(prog, addr))
- }
- return addr.Offset
-}
-
// getRegister checks that addr represents a register and returns its value.
func (p *Parser) getRegister(prog *obj.Prog, op obj.As, addr *obj.Addr) int16 {
if addr.Type != obj.TYPE_REG || addr.Offset != 0 || addr.Name != 0 || addr.Index != 0 {
diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s
index bfff555782e9f7..72e65734666c2a 100644
--- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s
+++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s
@@ -376,6 +376,10 @@ lable2:
FTINTRNEVF F0, F2 // 02e41a01
FTINTRNEVD F0, F2 // 02e81a01
+ // FSEL instruction
+ FSEL FCC0, F1, F2, F3 // 4304000d
+ FSEL FCC1, F1, F2 // 4284000d
+
// LDX.{B,BU,H,HU,W,WU,D} instructions
MOVB (R14)(R13), R12 // cc350038
MOVBU (R14)(R13), R12 // cc352038
@@ -510,6 +514,16 @@ lable2:
VMOVQ V3.W[1], V7.W4 // 67e4f772
VMOVQ V4.V[0], V6.V2 // 86f0f772
+ // Load data from memory and broadcast to each element of a vector register: VMOVQ offset(Rj), .
+ VMOVQ (R4), V0.B16 // 80008030
+ VMOVQ 1(R4), V1.H8 // 81044030
+ VMOVQ 2(R4), V2.W4 // 82082030
+ VMOVQ 3(R4), V3.V2 // 830c1030
+ XVMOVQ (R4), X0.B32 // 80008032
+ XVMOVQ 1(R4), X1.H16 // 81044032
+ XVMOVQ 2(R4), X2.W8 // 82082032
+ XVMOVQ 3(R4), X3.V4 // 830c1032
+
// VSEQ{B,H,W,V}, XVSEQ{B,H,W,V} instruction
VSEQB V1, V2, V3 // 43040070
VSEQH V1, V2, V3 // 43840070
@@ -1035,3 +1049,58 @@ lable2:
PRELD (R4), $0 // 8000c02a
PRELD -1(R4), $8 // 88fcff2a
PRELD 8(R4), $31 // 9f20c02a
+
+ // [X]{VBITCLR/VBITSET/VBITREV}{B,H,W,V} instructions
+ VBITCLRB V1, V2, V3 // 43040c71
+ VBITCLRH V1, V2, V3 // 43840c71
+ VBITCLRW V1, V2, V3 // 43040d71
+ VBITCLRV V1, V2, V3 // 43840d71
+ VBITSETB V1, V2, V3 // 43040e71
+ VBITSETH V1, V2, V3 // 43840e71
+ VBITSETW V1, V2, V3 // 43040f71
+ VBITSETV V1, V2, V3 // 43840f71
+ VBITREVB V1, V2, V3 // 43041071
+ VBITREVH V1, V2, V3 // 43841071
+ VBITREVW V1, V2, V3 // 43041171
+ VBITREVV V1, V2, V3 // 43841171
+ XVBITCLRB X3, X2, X1 // 410c0c75
+ XVBITCLRH X3, X2, X1 // 418c0c75
+ XVBITCLRW X3, X2, X1 // 410c0d75
+ XVBITCLRV X3, X2, X1 // 418c0d75
+ XVBITSETB X3, X2, X1 // 410c0e75
+ XVBITSETH X3, X2, X1 // 418c0e75
+ XVBITSETW X3, X2, X1 // 410c0f75
+ XVBITSETV X3, X2, X1 // 418c0f75
+ XVBITREVB X3, X2, X1 // 410c1075
+ XVBITREVH X3, X2, X1 // 418c1075
+ XVBITREVW X3, X2, X1 // 410c1175
+ XVBITREVV X3, X2, X1 // 418c1175
+ VBITCLRB $7, V2, V3 // 433c1073
+ VBITCLRH $15, V2, V3 // 437c1073
+ VBITCLRW $31, V2, V3 // 43fc1073
+ VBITCLRV $63, V2, V3 // 43fc1173
+ VBITSETB $7, V2, V3 // 433c1473
+ VBITSETH $15, V2, V3 // 437c1473
+ VBITSETW $31, V2, V3 // 43fc1473
+ VBITSETV $63, V2, V3 // 43fc1573
+ VBITREVB $7, V2, V3 // 433c1873
+ VBITREVH $15, V2, V3 // 437c1873
+ VBITREVW $31, V2, V3 // 43fc1873
+ VBITREVV $63, V2, V3 // 43fc1973
+ XVBITCLRB $7, X2, X1 // 413c1077
+ XVBITCLRH $15, X2, X1 // 417c1077
+ XVBITCLRW $31, X2, X1 // 41fc1077
+ XVBITCLRV $63, X2, X1 // 41fc1177
+ XVBITSETB $7, X2, X1 // 413c1477
+ XVBITSETH $15, X2, X1 // 417c1477
+ XVBITSETW $31, X2, X1 // 41fc1477
+ XVBITSETV $63, X2, X1 // 41fc1577
+ XVBITREVB $7, X2, X1 // 413c1877
+ XVBITREVH $15, X2, X1 // 417c1877
+ XVBITREVW $31, X2, X1 // 41fc1877
+ XVBITREVV $63, X2, X1 // 41fc1977
+
+ // ALSL{W/WU/D}
+ ALSLW $4, R4, R5, R6 // 86940500
+ ALSLWU $4, R4, R5, R6 // 86940700
+ ALSLV $4, R4, R5, R6 // 86942d00
diff --git a/src/cmd/cgo/internal/test/test.go b/src/cmd/cgo/internal/test/test.go
index 844b2dd42c8cbf..fb4a8250a2666f 100644
--- a/src/cmd/cgo/internal/test/test.go
+++ b/src/cmd/cgo/internal/test/test.go
@@ -245,7 +245,7 @@ static void *thread(void *p) {
return NULL;
}
void testSendSIG() {
- const int N = 20;
+ enum { N = 20 };
int i;
pthread_t tid[N];
for (i = 0; i < N; i++) {
diff --git a/src/cmd/compile/internal/importer/support.go b/src/cmd/compile/internal/importer/support.go
index a443b4d8621e54..6ce721557a052e 100644
--- a/src/cmd/compile/internal/importer/support.go
+++ b/src/cmd/compile/internal/importer/support.go
@@ -9,20 +9,14 @@ package importer
import (
"cmd/compile/internal/base"
"cmd/compile/internal/types2"
- "fmt"
"go/token"
"internal/pkgbits"
- "sync"
)
func assert(p bool) {
base.Assert(p)
}
-func errorf(format string, args ...interface{}) {
- panic(fmt.Sprintf(format, args...))
-}
-
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
// Synthesize a token.Pos
@@ -31,108 +25,6 @@ type fakeFileSet struct {
files map[string]*token.File
}
-func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
- // TODO(mdempsky): Make use of column.
-
- // Since we don't know the set of needed file positions, we
- // reserve maxlines positions per file.
- const maxlines = 64 * 1024
- f := s.files[file]
- if f == nil {
- f = s.fset.AddFile(file, -1, maxlines)
- s.files[file] = f
- // Allocate the fake linebreak indices on first use.
- // TODO(adonovan): opt: save ~512KB using a more complex scheme?
- fakeLinesOnce.Do(func() {
- fakeLines = make([]int, maxlines)
- for i := range fakeLines {
- fakeLines[i] = i
- }
- })
- f.SetLines(fakeLines)
- }
-
- if line > maxlines {
- line = 1
- }
-
- // Treat the file as if it contained only newlines
- // and column=1: use the line number as the offset.
- return f.Pos(line - 1)
-}
-
-var (
- fakeLines []int
- fakeLinesOnce sync.Once
-)
-
-func chanDir(d int) types2.ChanDir {
- // tag values must match the constants in cmd/compile/internal/gc/go.go
- switch d {
- case 1 /* Crecv */ :
- return types2.RecvOnly
- case 2 /* Csend */ :
- return types2.SendOnly
- case 3 /* Cboth */ :
- return types2.SendRecv
- default:
- errorf("unexpected channel dir %d", d)
- return 0
- }
-}
-
-var predeclared = []types2.Type{
- // basic types
- types2.Typ[types2.Bool],
- types2.Typ[types2.Int],
- types2.Typ[types2.Int8],
- types2.Typ[types2.Int16],
- types2.Typ[types2.Int32],
- types2.Typ[types2.Int64],
- types2.Typ[types2.Uint],
- types2.Typ[types2.Uint8],
- types2.Typ[types2.Uint16],
- types2.Typ[types2.Uint32],
- types2.Typ[types2.Uint64],
- types2.Typ[types2.Uintptr],
- types2.Typ[types2.Float32],
- types2.Typ[types2.Float64],
- types2.Typ[types2.Complex64],
- types2.Typ[types2.Complex128],
- types2.Typ[types2.String],
-
- // basic type aliases
- types2.Universe.Lookup("byte").Type(),
- types2.Universe.Lookup("rune").Type(),
-
- // error
- types2.Universe.Lookup("error").Type(),
-
- // untyped types
- types2.Typ[types2.UntypedBool],
- types2.Typ[types2.UntypedInt],
- types2.Typ[types2.UntypedRune],
- types2.Typ[types2.UntypedFloat],
- types2.Typ[types2.UntypedComplex],
- types2.Typ[types2.UntypedString],
- types2.Typ[types2.UntypedNil],
-
- // package unsafe
- types2.Typ[types2.UnsafePointer],
-
- // invalid type
- types2.Typ[types2.Invalid], // only appears in packages with errors
-
- // used internally by gc; never used by this package or in .a files
- // not to be confused with the universe any
- anyType{},
-
- // comparable
- types2.Universe.Lookup("comparable").Type(),
-
- // "any" has special handling: see usage of predeclared.
-}
-
type anyType struct{}
func (t anyType) Underlying() types2.Type { return t }
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index c06f76fe9ff029..b39710548ebafa 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -1211,17 +1211,6 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
return s
}
-// numNonClosures returns the number of functions in list which are not closures.
-func numNonClosures(list []*ir.Func) int {
- count := 0
- for _, fn := range list {
- if fn.OClosure == nil {
- count++
- }
- }
- return count
-}
-
func doList(list []ir.Node, do func(ir.Node) bool) bool {
for _, x := range list {
if x != nil {
diff --git a/src/cmd/compile/internal/inline/inlheur/scoring.go b/src/cmd/compile/internal/inline/inlheur/scoring.go
index 28fa643132952a..1396c4d800331b 100644
--- a/src/cmd/compile/internal/inline/inlheur/scoring.go
+++ b/src/cmd/compile/internal/inline/inlheur/scoring.go
@@ -399,14 +399,6 @@ func LargestNegativeScoreAdjustment(fn *ir.Func, props *FuncProps) int {
return score
}
-// LargestPositiveScoreAdjustment tries to estimate the largest possible
-// positive score adjustment that could be applied to a given callsite.
-// At the moment we don't have very many positive score adjustments, so
-// this is just hard-coded, not table-driven.
-func LargestPositiveScoreAdjustment(fn *ir.Func) int {
- return adjValues[panicPathAdj] + adjValues[initFuncAdj]
-}
-
// callSiteTab contains entries for each call in the function
// currently being processed by InlineCalls; this variable will either
// be set to 'cstabCache' below (for non-inlinable routines) or to the
diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go
index d30f7bc6880938..4ec887d397ca68 100644
--- a/src/cmd/compile/internal/ir/copy.go
+++ b/src/cmd/compile/internal/ir/copy.go
@@ -32,12 +32,3 @@ func DeepCopy(pos src.XPos, n Node) Node {
}
return edit(n)
}
-
-// DeepCopyList returns a list of deep copies (using DeepCopy) of the nodes in list.
-func DeepCopyList(pos src.XPos, list []Node) []Node {
- var out []Node
- for _, n := range list {
- out = append(out, DeepCopy(pos, n))
- }
- return out
-}
diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go
index 70897fc3f9c8de..6c91560e526441 100644
--- a/src/cmd/compile/internal/ir/mini.go
+++ b/src/cmd/compile/internal/ir/mini.go
@@ -34,15 +34,6 @@ type miniNode struct {
esc uint16
}
-// posOr returns pos if known, or else n.pos.
-// For use in DeepCopy.
-func (n *miniNode) posOr(pos src.XPos) src.XPos {
- if pos.IsKnown() {
- return pos
- }
- return n.pos
-}
-
// op can be read, but not written.
// An embedding implementation can provide a SetOp if desired.
// (The panicking SetOp is with the other panics below.)
diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go
index 8dff11af335e23..c68bb5d0330337 100644
--- a/src/cmd/compile/internal/ir/visit.go
+++ b/src/cmd/compile/internal/ir/visit.go
@@ -155,19 +155,6 @@ func Any(n Node, cond func(Node) bool) bool {
return do(n)
}
-// AnyList calls Any(x, cond) for each node x in the list, in order.
-// If any call returns true, AnyList stops and returns true.
-// Otherwise, AnyList returns false after calling Any(x, cond)
-// for every x in the list.
-func AnyList(list Nodes, cond func(Node) bool) bool {
- for _, x := range list {
- if Any(x, cond) {
- return true
- }
- }
- return false
-}
-
// EditChildren edits the child nodes of n, replacing each child x with edit(x).
//
// Note that EditChildren(n, edit) only calls edit(x) for n's immediate children.
diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go
index 2d986a5ff4e67f..f8ecebb3509a77 100644
--- a/src/cmd/compile/internal/loong64/ssa.go
+++ b/src/cmd/compile/internal/loong64/ssa.go
@@ -1075,8 +1075,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
var blockJump = map[ssa.BlockKind]struct {
asm, invasm obj.As
}{
- ssa.BlockLOONG64EQ: {loong64.ABEQ, loong64.ABNE},
- ssa.BlockLOONG64NE: {loong64.ABNE, loong64.ABEQ},
+ ssa.BlockLOONG64EQZ: {loong64.ABEQ, loong64.ABNE},
+ ssa.BlockLOONG64NEZ: {loong64.ABNE, loong64.ABEQ},
ssa.BlockLOONG64LTZ: {loong64.ABLTZ, loong64.ABGEZ},
ssa.BlockLOONG64GEZ: {loong64.ABGEZ, loong64.ABLTZ},
ssa.BlockLOONG64LEZ: {loong64.ABLEZ, loong64.ABGTZ},
@@ -1102,7 +1102,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE,
+ case ssa.BlockLOONG64EQZ, ssa.BlockLOONG64NEZ,
ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ,
ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ,
ssa.BlockLOONG64BEQ, ssa.BlockLOONG64BNE,
@@ -1132,7 +1132,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.From.Type = obj.TYPE_REG
p.From.Reg = b.Controls[0].Reg()
p.Reg = b.Controls[1].Reg()
- case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE,
+ case ssa.BlockLOONG64EQZ, ssa.BlockLOONG64NEZ,
ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ,
ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ,
ssa.BlockLOONG64FPT, ssa.BlockLOONG64FPF:
diff --git a/src/cmd/compile/internal/noder/posmap.go b/src/cmd/compile/internal/noder/posmap.go
index 439daf454e6fc3..9b02765e95cfe7 100644
--- a/src/cmd/compile/internal/noder/posmap.go
+++ b/src/cmd/compile/internal/noder/posmap.go
@@ -23,7 +23,6 @@ type poser interface{ Pos() syntax.Pos }
type ender interface{ End() syntax.Pos }
func (m *posMap) pos(p poser) src.XPos { return m.makeXPos(p.Pos()) }
-func (m *posMap) end(p ender) src.XPos { return m.makeXPos(p.End()) }
func (m *posMap) makeXPos(pos syntax.Pos) src.XPos {
// Predeclared objects (e.g., the result parameter for error.Error)
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index 38b0bc1d8a4153..3cbc7989a74613 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -3681,17 +3681,6 @@ func expandInline(fn *ir.Func, pri pkgReaderIndex) {
typecheck.Target.Funcs = typecheck.Target.Funcs[:topdcls]
}
-// usedLocals returns a set of local variables that are used within body.
-func usedLocals(body []ir.Node) ir.NameSet {
- var used ir.NameSet
- ir.VisitList(body, func(n ir.Node) {
- if n, ok := n.(*ir.Name); ok && n.Op() == ir.ONAME && n.Class == ir.PAUTO {
- used.Add(n)
- }
- })
- return used
-}
-
// @@@ Method wrappers
//
// Here we handle constructing "method wrappers," alternative entry
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
index dd79c3ef4c87cb..baff0ceea0e931 100644
--- a/src/cmd/compile/internal/noder/writer.go
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -2413,11 +2413,6 @@ func (p posVar) String() string {
return p.pos.String() + ":" + p.var_.String()
}
-func (w *writer) exprList(expr syntax.Expr) {
- w.Sync(pkgbits.SyncExprList)
- w.exprs(syntax.UnpackListExpr(expr))
-}
-
func (w *writer) exprs(exprs []syntax.Expr) {
w.Sync(pkgbits.SyncExprs)
w.Len(len(exprs))
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index c1f2484bf4482a..1086a9ccbf22dc 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -14,6 +14,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
+ "internal/abi"
"internal/buildcfg"
"math"
"strings"
@@ -1913,12 +1914,90 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpPPC64LoweredPanicBoundsA, ssa.OpPPC64LoweredPanicBoundsB, ssa.OpPPC64LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+ case ssa.OpPPC64LoweredPanicBoundsRR, ssa.OpPPC64LoweredPanicBoundsRC, ssa.OpPPC64LoweredPanicBoundsCR, ssa.OpPPC64LoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpPPC64LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - ppc64.REG_R3)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - ppc64.REG_R3)
+ case ssa.OpPPC64LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - ppc64.REG_R3)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(yVal)
+ }
+ case ssa.OpPPC64LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - ppc64.REG_R3)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(xVal)
+ }
+ case ssa.OpPPC64LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
case ssa.OpPPC64LoweredNilCheck:
if buildcfg.GOOS == "aix" {
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index 4d1d7801900a2e..c561d527a7d161 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -1468,10 +1468,3 @@ func MarkUsedIfaceMethod(n *ir.CallExpr) {
Add: InterfaceMethodOffset(ityp, midx),
})
}
-
-func deref(t *types.Type) *types.Type {
- if t.IsPtr() {
- return t.Elem()
- }
- return t
-}
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 21edcabc58b3a8..ed20782a29cd91 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -14,6 +14,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
+ "internal/abi"
)
// ssaRegToReg maps ssa register numbers to obj register numbers.
@@ -416,7 +417,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
- ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX,
+ ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVXS, ssa.OpRISCV64FMVDX, ssa.OpRISCV64FMVXD,
ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW, ssa.OpRISCV64CLZ, ssa.OpRISCV64CLZW, ssa.OpRISCV64CTZ, ssa.OpRISCV64CTZW,
@@ -508,12 +509,91 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Name = obj.NAME_EXTERN
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+
+ case ssa.OpRISCV64LoweredPanicBoundsRR, ssa.OpRISCV64LoweredPanicBoundsRC, ssa.OpRISCV64LoweredPanicBoundsCR, ssa.OpRISCV64LoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpRISCV64LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - riscv.REG_X5)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - riscv.REG_X5)
+ case ssa.OpRISCV64LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - riscv.REG_X5)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(yVal)
+ }
+ case ssa.OpRISCV64LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - riscv.REG_X5)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(xVal)
+ }
+ case ssa.OpRISCV64LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
case ssa.OpRISCV64LoweredAtomicLoad8:
s.Prog(riscv.AFENCE)
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index ad66bfb5d85334..86efde4fa09b87 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -15,6 +15,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
+ "internal/abi"
)
// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
@@ -573,12 +574,92 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Name = obj.NAME_EXTERN
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpS390XLoweredPanicBoundsA, ssa.OpS390XLoweredPanicBoundsB, ssa.OpS390XLoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+
+ case ssa.OpS390XLoweredPanicBoundsRR, ssa.OpS390XLoweredPanicBoundsRC, ssa.OpS390XLoweredPanicBoundsCR, ssa.OpS390XLoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpS390XLoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - s390x.REG_R0)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - s390x.REG_R0)
+ case ssa.OpS390XLoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - s390x.REG_R0)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(yVal)
+ }
+ case ssa.OpS390XLoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - s390x.REG_R0)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(xVal)
+ }
+ case ssa.OpS390XLoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
+
case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT,
ssa.OpS390XNEG, ssa.OpS390XNEGW,
ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
index 95e63001269c2d..1e0a5995700a19 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
@@ -606,31 +606,31 @@
// mutandis, for UGE and SETAE, and CC and SETCC.
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
-((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
- => ((ULT|UGE) (BTLconst [int8(log32(c))] x))
-((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
- => ((ULT|UGE) (BTQconst [int8(log32(c))] x))
-((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
- => ((ULT|UGE) (BTQconst [int8(log64(c))] x))
+((NE|EQ) (TESTLconst [c] x)) && isUnsignedPowerOfTwo(uint32(c))
+ => ((ULT|UGE) (BTLconst [int8(log32u(uint32(c)))] x))
+((NE|EQ) (TESTQconst [c] x)) && isUnsignedPowerOfTwo(uint64(c))
+ => ((ULT|UGE) (BTQconst [int8(log32u(uint32(c)))] x))
+((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUnsignedPowerOfTwo(uint64(c))
+ => ((ULT|UGE) (BTQconst [int8(log64u(uint64(c)))] x))
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
-(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
- => (SET(B|AE) (BTLconst [int8(log32(c))] x))
-(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
- => (SET(B|AE) (BTQconst [int8(log32(c))] x))
-(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
- => (SET(B|AE) (BTQconst [int8(log64(c))] x))
+(SET(NE|EQ) (TESTLconst [c] x)) && isUnsignedPowerOfTwo(uint32(c))
+ => (SET(B|AE) (BTLconst [int8(log32u(uint32(c)))] x))
+(SET(NE|EQ) (TESTQconst [c] x)) && isUnsignedPowerOfTwo(uint64(c))
+ => (SET(B|AE) (BTQconst [int8(log32u(uint32(c)))] x))
+(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUnsignedPowerOfTwo(uint64(c))
+ => (SET(B|AE) (BTQconst [int8(log64u(uint64(c)))] x))
// SET..store variant
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
=> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
=> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
-(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c))
- => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
-(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
- => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
-(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
- => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUnsignedPowerOfTwo(uint32(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUnsignedPowerOfTwo(uint64(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUnsignedPowerOfTwo(uint64(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem)
// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
// and further combining shifts.
@@ -655,14 +655,14 @@
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
// Note: only convert OR/XOR to BTS/BTC if the constant wouldn't fit in
// the constant field of the OR/XOR instruction. See issue 61694.
-((OR|XOR)Q (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64(c))] x)
+((OR|XOR)Q (MOVQconst [c]) x) && isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64u(uint64(c)))] x)
// Recognize bit clearing: a &^= 1< (BTR(Q|L) x y)
(ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
// Note: only convert AND to BTR if the constant wouldn't fit in
// the constant field of the AND instruction. See issue 61694.
-(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64(^c))] x)
+(ANDQ (MOVQconst [c]) x) && isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64u(uint64(^c)))] x)
// Special-case bit patterns on first/last bit.
// generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
index 9d0ad0148fd010..4c195076288b33 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
@@ -517,7 +517,7 @@
(GetCallerSP ...) => (LoweredGetCallerSP ...)
(GetCallerPC ...) => (LoweredGetCallerPC ...)
-(If cond yes no) => (NE (MOVBUreg cond) yes no)
+(If cond yes no) => (NEZ (MOVBUreg cond) yes no)
(MOVBUreg x:((SGT|SGTU) _ _)) => x
(MOVBUreg x:(XOR (MOVVconst [1]) ((SGT|SGTU) _ _))) => x
@@ -755,6 +755,9 @@
(MULV x (MOVVconst [c])) && canMulStrengthReduce(config, c) => {mulStrengthReduce(v, x, c)}
+(MULV (NEGV x) (MOVVconst [c])) => (MULV x (MOVVconst [-c]))
+(MULV (NEGV x) (NEGV y)) => (MULV x y)
+
// div by constant
(DIVVU x (MOVVconst [1])) => x
(DIVVU x (MOVVconst [c])) && isPowerOfTwo(c) => (SRLVconst [log64(c)] x)
@@ -899,41 +902,46 @@
// Optimizations
// Absorb boolean tests into block
-(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
-(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
-(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
-(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
-(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
-(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
-(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
-(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
-(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
-(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
-(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
-(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
-(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
-(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
-
-(EQ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (EQ (SGTUconst [c] y) yes no)
-(NE (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (NE (SGTUconst [c] y) yes no)
-(EQ (SUBV x y) yes no) => (BEQ x y yes no)
-(NE (SUBV x y) yes no) => (BNE x y yes no)
-(EQ (SGT x y) yes no) => (BGE y x yes no)
-(NE (SGT x y) yes no) => (BLT y x yes no)
-(EQ (SGTU x y) yes no) => (BGEU y x yes no)
-(NE (SGTU x y) yes no) => (BLTU y x yes no)
+(NEZ (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NEZ (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQZ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQZ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NEZ (XORconst [1] cmp:(SGT _ _)) yes no) => (EQZ cmp yes no)
+(NEZ (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQZ cmp yes no)
+(NEZ (XORconst [1] cmp:(SGTconst _)) yes no) => (EQZ cmp yes no)
+(NEZ (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQZ cmp yes no)
+(EQZ (XORconst [1] cmp:(SGT _ _)) yes no) => (NEZ cmp yes no)
+(EQZ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NEZ cmp yes no)
+(EQZ (XORconst [1] cmp:(SGTconst _)) yes no) => (NEZ cmp yes no)
+(EQZ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NEZ cmp yes no)
+(NEZ (SGTUconst [1] x) yes no) => (EQZ x yes no)
+(EQZ (SGTUconst [1] x) yes no) => (NEZ x yes no)
+(NEZ (SGTU x (MOVVconst [0])) yes no) => (NEZ x yes no)
+(EQZ (SGTU x (MOVVconst [0])) yes no) => (EQZ x yes no)
+(NEZ (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQZ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NEZ (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
+(EQZ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
+
+// Convert EQZ/NEZ into more optimal branch conditions.
+(EQZ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (EQZ (SGTUconst [c] y) yes no)
+(NEZ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (NEZ (SGTUconst [c] y) yes no)
+(EQZ (SUBV x y) yes no) => (BEQ x y yes no)
+(NEZ (SUBV x y) yes no) => (BNE x y yes no)
+(EQZ (SGT x y) yes no) => (BGE y x yes no)
+(NEZ (SGT x y) yes no) => (BLT y x yes no)
+(EQZ (SGTU x y) yes no) => (BGEU y x yes no)
+(NEZ (SGTU x y) yes no) => (BLTU y x yes no)
+(EQZ (SGTconst [c] y) yes no) => (BGE y (MOVVconst [c]) yes no)
+(NEZ (SGTconst [c] y) yes no) => (BLT y (MOVVconst [c]) yes no)
+(EQZ (SGTUconst [c] y) yes no) => (BGEU y (MOVVconst [c]) yes no)
+(NEZ (SGTUconst [c] y) yes no) => (BLTU y (MOVVconst [c]) yes no)
// absorb constants into branches
-(EQ (MOVVconst [0]) yes no) => (First yes no)
-(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
-(NE (MOVVconst [0]) yes no) => (First no yes)
-(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
+(EQZ (MOVVconst [0]) yes no) => (First yes no)
+(EQZ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
+(NEZ (MOVVconst [0]) yes no) => (First no yes)
+(NEZ (MOVVconst [c]) yes no) && c != 0 => (First yes no)
(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
@@ -943,6 +951,18 @@
(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
+// Convert branch with zero to more optimal branch zero.
+(BEQ (MOVVconst [0]) cond yes no) => (EQZ cond yes no)
+(BEQ cond (MOVVconst [0]) yes no) => (EQZ cond yes no)
+(BNE (MOVVconst [0]) cond yes no) => (NEZ cond yes no)
+(BNE cond (MOVVconst [0]) yes no) => (NEZ cond yes no)
+(BLT (MOVVconst [0]) cond yes no) => (GTZ cond yes no)
+(BLT cond (MOVVconst [0]) yes no) => (LTZ cond yes no)
+(BLTU (MOVVconst [0]) cond yes no) => (NEZ cond yes no)
+(BGE (MOVVconst [0]) cond yes no) => (LEZ cond yes no)
+(BGE cond (MOVVconst [0]) yes no) => (GEZ cond yes no)
+(BGEU (MOVVconst [0]) cond yes no) => (EQZ cond yes no)
+
// Arch-specific inlining for small or disjoint runtime.memmove
// Match post-lowering calls, register version.
(SelectN [0] call:(CALLstatic {sym} dst src (MOVVconst [sz]) mem))
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
index 0e204c4a3c4476..75429cbffd782e 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
@@ -580,8 +580,8 @@ func init() {
}
blocks := []blockData{
- {name: "EQ", controls: 1},
- {name: "NE", controls: 1},
+ {name: "EQZ", controls: 1}, // = 0
+ {name: "NEZ", controls: 1}, // != 0
{name: "LTZ", controls: 1}, // < 0
{name: "LEZ", controls: 1}, // <= 0
{name: "GTZ", controls: 1}, // > 0
@@ -589,7 +589,7 @@ func init() {
{name: "FPT", controls: 1}, // FP flag is true
{name: "FPF", controls: 1}, // FP flag is false
{name: "BEQ", controls: 2}, // controls[0] == controls[1]
- {name: "BNE", controls: 2}, // controls[0] == controls[1]
+ {name: "BNE", controls: 2}, // controls[0] != controls[1]
{name: "BGE", controls: 2}, // controls[0] >= controls[1]
{name: "BLT", controls: 2}, // controls[0] < controls[1]
{name: "BGEU", controls: 2}, // controls[0] >= controls[1], unsigned
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
index 1749811b849030..f5e381ac413fcf 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
@@ -553,9 +553,11 @@
// Publication barrier as intrinsic
(PubBarrier ...) => (LoweredPubBarrier ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// Optimizations
// Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
index 1dae76366b7ec7..3c06208f7e21cd 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
@@ -171,10 +171,7 @@ func init() {
fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}}
fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}}
callerSave = regMask(gp | fp | gr | xer)
- r3 = buildReg("R3")
- r4 = buildReg("R4")
- r5 = buildReg("R5")
- r6 = buildReg("R6")
+ first7 = buildReg("R3 R4 R5 R6 R7 R8 R9")
)
ops := []opData{
{name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
@@ -706,12 +703,16 @@ func init() {
{name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ buildReg("R0 R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17 R20 R21 g")) | buildReg("R31"), outputs: []regMask{buildReg("R29")}}, clobberFlags: true, aux: "Int64"},
{name: "LoweredPubBarrier", argLength: 1, asm: "LWSYNC", hasSideEffects: true}, // Do data barrier. arg0=memory
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r6}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r5}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{first7, first7}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first7}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first7}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// (InvertFlags (CMP a b)) == (CMP b a)
// So if we want (LessThan (CMP a b)) but we can't do that because a is a constant,
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
index dc1cc97fb3cd05..69bf1c7c9e4f0e 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
@@ -299,6 +299,11 @@
(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
(MOV(B|BU|H|HU|W|WU|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOV(W|D)load [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
+ is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
+ (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOV(W|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
(MOV(B|H|W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
@@ -309,15 +314,26 @@
(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
(MOV(B|H|W|D)storezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOV(W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
+ is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
+ (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOV(W|D)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
(MOV(B|BU|H|HU|W|WU|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOV(B|BU|H|HU|W|WU|D)load [off1+int32(off2)] {sym} base mem)
+(FMOV(W|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (FMOV(W|D)load [off1+int32(off2)] {sym} base mem)
+
(MOV(B|H|W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
(MOV(B|H|W|D)store [off1+int32(off2)] {sym} base val mem)
(MOV(B|H|W|D)storezero [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOV(B|H|W|D)storezero [off1+int32(off2)] {sym} base mem)
+(FMOV(W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (FMOV(W|D)store [off1+int32(off2)] {sym} base val mem)
+
// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
// with OffPtr -> ADDI.
(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
@@ -407,9 +423,11 @@
// Publication barrier as intrinsic
(PubBarrier ...) => (LoweredPubBarrier ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// Small moves
(Move [0] _ _ mem) => mem
@@ -699,6 +717,13 @@
(MOVHUreg x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload [off] {sym} ptr mem)
(MOVWUreg x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload [off] {sym} ptr mem)
+// Replace load from same location as preceding store with copy.
+(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXD x)
+(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVDX x)
+(MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXS x)
+(MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWUreg (FMVXS x))
+(FMOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVSX x)
+
// If a register move has only 1 use, just use the same register without emitting instruction
// MOVnop does not emit an instruction, only for ensuring the type.
(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
index 8cb042a604bee6..d468a00b0f7416 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
@@ -49,7 +49,7 @@ func riscv64RegName(r int) string {
func init() {
var regNamesRISCV64 []string
- var gpMask, fpMask, gpgMask, gpspMask, gpspsbMask, gpspsbgMask regMask
+ var gpMask, fpMask, gpgMask, gpspMask, gpspsbMask, gpspsbgMask, first16Mask regMask
regNamed := make(map[string]regMask)
// Build the list of register names, creating an appropriately indexed
@@ -93,6 +93,9 @@ func init() {
gpspMask |= mask
gpspsbMask |= mask
gpspsbgMask |= mask
+ if r >= 5 && r < 5+16 {
+ first16Mask |= mask
+ }
}
}
@@ -429,12 +432,15 @@ func init() {
// Do data barrier. arg0=memorys
{name: "LoweredPubBarrier", argLength: 1, asm: "FENCE", hasSideEffects: true},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X7"], regNamed["X28"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X6"], regNamed["X7"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{first16Mask, first16Mask}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16Mask}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16Mask}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// F extension.
{name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true, typ: "Float32"}, // arg0 + arg1
@@ -447,7 +453,8 @@ func init() {
{name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS", commutative: true, typ: "Float32"}, // -(arg0 * arg1) - arg2
{name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"}, // sqrt(arg0)
{name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"}, // -arg0
- {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float
+ {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float32
+ {name: "FMVXS", argLength: 1, reg: fpgp, asm: "FMVXS", typ: "Int32"}, // reinterpret arg0 as int32, sign extended to 64 bits
{name: "FCVTSW", argLength: 1, reg: gpfp, asm: "FCVTSW", typ: "Float32"}, // float32(low 32 bits of arg0)
{name: "FCVTSL", argLength: 1, reg: gpfp, asm: "FCVTSL", typ: "Float32"}, // float32(arg0)
{name: "FCVTWS", argLength: 1, reg: fpgp, asm: "FCVTWS", typ: "Int32"}, // int32(arg0)
@@ -474,7 +481,8 @@ func init() {
{name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD", typ: "Float64"}, // -arg0
{name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD", typ: "Float64"}, // abs(arg0)
{name: "FSGNJD", argLength: 2, reg: fp21, asm: "FSGNJD", typ: "Float64"}, // copy sign of arg1 to arg0
- {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float
+ {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float64
+ {name: "FMVXD", argLength: 1, reg: fpgp, asm: "FMVXD", typ: "Int64"}, // reinterpret arg0 as int64
{name: "FCVTDW", argLength: 1, reg: gpfp, asm: "FCVTDW", typ: "Float64"}, // float64(low 32 bits of arg0)
{name: "FCVTDL", argLength: 1, reg: gpfp, asm: "FCVTDL", typ: "Float64"}, // float64(arg0)
{name: "FCVTWD", argLength: 1, reg: fpgp, asm: "FCVTWD", typ: "Int32"}, // int32(arg0)
diff --git a/src/cmd/compile/internal/ssa/_gen/S390X.rules b/src/cmd/compile/internal/ssa/_gen/S390X.rules
index 80e12f8e29d6d2..664bf4a89c9024 100644
--- a/src/cmd/compile/internal/ssa/_gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/_gen/S390X.rules
@@ -458,9 +458,11 @@
// Write barrier.
(WB ...) => (LoweredWB ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// ***************************
// Above: lowering rules
diff --git a/src/cmd/compile/internal/ssa/_gen/S390XOps.go b/src/cmd/compile/internal/ssa/_gen/S390XOps.go
index 38fb3cb0748932..c002d5bcc382a3 100644
--- a/src/cmd/compile/internal/ssa/_gen/S390XOps.go
+++ b/src/cmd/compile/internal/ssa/_gen/S390XOps.go
@@ -114,6 +114,7 @@ func init() {
sb = buildReg("SB")
r0 = buildReg("R0")
tmp = buildReg("R11") // R11 is used as a temporary in a small number of instructions.
+ lr = buildReg("R14")
// R10 is reserved by the assembler.
gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14")
@@ -518,12 +519,15 @@ func init() {
// Returns a pointer to a write barrier buffer in R9.
{name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R14") | r1, outputs: []regMask{r9}}, clobberFlags: true, aux: "Int64"},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{gp &^ lr, gp &^ lr}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp &^ lr}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp &^ lr}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// Constant condition code values. The condition code can be 0, 1, 2 or 3.
{name: "FlagEQ"}, // CC=0 (equal)
diff --git a/src/cmd/compile/internal/ssa/biasedsparsemap.go b/src/cmd/compile/internal/ssa/biasedsparsemap.go
index 3032309b7a2f1b..25fbaf68625753 100644
--- a/src/cmd/compile/internal/ssa/biasedsparsemap.go
+++ b/src/cmd/compile/internal/ssa/biasedsparsemap.go
@@ -84,14 +84,6 @@ func (s *biasedSparseMap) getEntry(i int) (x uint, v int32) {
return
}
-// add inserts x->0 into s, provided that x is in the range of keys stored in s.
-func (s *biasedSparseMap) add(x uint) {
- if int(x) < s.first || int(x) >= s.cap() {
- return
- }
- s.s.set(ID(int(x)-s.first), 0)
-}
-
// add inserts x->v into s, provided that x is in the range of keys stored in s.
func (s *biasedSparseMap) set(x uint, v int32) {
if int(x) < s.first || int(x) >= s.cap() {
diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
index f16959dd572973..a7d339cad064ac 100644
--- a/src/cmd/compile/internal/ssa/branchelim.go
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -436,8 +436,15 @@ func canSpeculativelyExecute(b *Block) bool {
// don't fuse memory ops, Phi ops, divides (can panic),
// or anything else with side-effects
for _, v := range b.Values {
- if v.Op == OpPhi || isDivMod(v.Op) || isPtrArithmetic(v.Op) || v.Type.IsMemory() ||
- v.MemoryArg() != nil || opcodeTable[v.Op].hasSideEffects {
+ if v.Op == OpPhi || isDivMod(v.Op) || isPtrArithmetic(v.Op) ||
+ v.Type.IsMemory() || opcodeTable[v.Op].hasSideEffects {
+ return false
+ }
+
+ // Allow inlining markers to be speculatively executed
+ // even though they have a memory argument.
+ // See issue #74915.
+ if v.Op != OpInlMark && v.MemoryArg() != nil {
return false
}
}
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index e92b37fb7bc342..c9a3e4291cc1a6 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -77,10 +77,6 @@ func (ls *liveSlot) String() string {
return fmt.Sprintf("0x%x.%d.%d", ls.Registers, ls.stackOffsetValue(), int32(ls.StackOffset)&1)
}
-func (ls liveSlot) absent() bool {
- return ls.Registers == 0 && !ls.onStack()
-}
-
// StackOffset encodes whether a value is on the stack and if so, where.
// It is a 31-bit integer followed by a presence flag at the low-order
// bit.
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index 272b653ca3c6ce..f6bb863c001798 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -844,27 +844,6 @@ func (c *registerCursor) plus(regWidth Abi1RO) registerCursor {
return rc
}
-// at returns the register cursor for component i of t, where the first
-// component is numbered 0.
-func (c *registerCursor) at(t *types.Type, i int) registerCursor {
- rc := *c
- if i == 0 || len(c.regs) == 0 {
- return rc
- }
- if t.IsArray() {
- w := c.config.NumParamRegs(t.Elem())
- rc.nextSlice += Abi1RO(i * w)
- return rc
- }
- if t.IsStruct() {
- for j := 0; j < i; j++ {
- rc.next(t.FieldType(j))
- }
- return rc
- }
- panic("Haven't implemented this case yet, do I need to?")
-}
-
func (c *registerCursor) init(regs []abi.RegIndex, info *abi.ABIParamResultInfo, result *[]*Value, storeDest *Value, storeOffset int64) {
c.regs = regs
c.nextSlice = 0
@@ -923,17 +902,6 @@ type expandState struct {
indentLevel int // Indentation for debugging recursion
}
-// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
-// that has no 64-bit integer registers.
-func (x *expandState) intPairTypes(et types.Kind) (tHi, tLo *types.Type) {
- tHi = x.typs.UInt32
- if et == types.TINT64 {
- tHi = x.typs.Int32
- }
- tLo = x.typs.UInt32
- return
-}
-
// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
func (x *expandState) offsetFrom(b *Block, from *Value, offset int64, pt *types.Type) *Value {
ft := from.Type
@@ -957,29 +925,6 @@ func (x *expandState) offsetFrom(b *Block, from *Value, offset int64, pt *types.
return b.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
}
-func (x *expandState) regWidth(t *types.Type) Abi1RO {
- return Abi1RO(x.f.ABI1.NumParamRegs(t))
-}
-
-// regOffset returns the register offset of the i'th element of type t
-func (x *expandState) regOffset(t *types.Type, i int) Abi1RO {
- // TODO maybe cache this in a map if profiling recommends.
- if i == 0 {
- return 0
- }
- if t.IsArray() {
- return Abi1RO(i) * x.regWidth(t.Elem())
- }
- if t.IsStruct() {
- k := Abi1RO(0)
- for j := 0; j < i; j++ {
- k += x.regWidth(t.FieldType(j))
- }
- return k
- }
- panic("Haven't implemented this case yet, do I need to?")
-}
-
// prAssignForArg returns the ABIParamAssignment for v, assumed to be an OpArg.
func (x *expandState) prAssignForArg(v *Value) *abi.ABIParamAssignment {
if v.Op != OpArg {
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index d5c7394a267815..e8bd5d9acf5917 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -485,53 +485,6 @@ const (
BoundsKindCount
)
-// boundsABI determines which register arguments a bounds check call should use. For an [a:b:c] slice, we do:
-//
-// CMPQ c, cap
-// JA fail1
-// CMPQ b, c
-// JA fail2
-// CMPQ a, b
-// JA fail3
-//
-// fail1: CALL panicSlice3Acap (c, cap)
-// fail2: CALL panicSlice3B (b, c)
-// fail3: CALL panicSlice3C (a, b)
-//
-// When we register allocate that code, we want the same register to be used for
-// the first arg of panicSlice3Acap and the second arg to panicSlice3B. That way,
-// initializing that register once will satisfy both calls.
-// That desire ends up dividing the set of bounds check calls into 3 sets. This function
-// determines which set to use for a given panic call.
-// The first arg for set 0 should be the second arg for set 1.
-// The first arg for set 1 should be the second arg for set 2.
-func boundsABI(b int64) int {
- switch BoundsKind(b) {
- case BoundsSlice3Alen,
- BoundsSlice3AlenU,
- BoundsSlice3Acap,
- BoundsSlice3AcapU,
- BoundsConvert:
- return 0
- case BoundsSliceAlen,
- BoundsSliceAlenU,
- BoundsSliceAcap,
- BoundsSliceAcapU,
- BoundsSlice3B,
- BoundsSlice3BU:
- return 1
- case BoundsIndex,
- BoundsIndexU,
- BoundsSliceB,
- BoundsSliceBU,
- BoundsSlice3C,
- BoundsSlice3CU:
- return 2
- default:
- panic("bad BoundsKind")
- }
-}
-
// Returns the bounds error code needed by the runtime, and
// whether the x field is signed.
func (b BoundsKind) Code() (rtabi.BoundsErrorCode, bool) {
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 541237262ebe19..b196629cc2fb1a 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -94,8 +94,8 @@ const (
BlockARM64GEnoov
BlockARM64JUMPTABLE
- BlockLOONG64EQ
- BlockLOONG64NE
+ BlockLOONG64EQZ
+ BlockLOONG64NEZ
BlockLOONG64LTZ
BlockLOONG64LEZ
BlockLOONG64GTZ
@@ -250,8 +250,8 @@ var blockString = [...]string{
BlockARM64GEnoov: "GEnoov",
BlockARM64JUMPTABLE: "JUMPTABLE",
- BlockLOONG64EQ: "EQ",
- BlockLOONG64NE: "NE",
+ BlockLOONG64EQZ: "EQZ",
+ BlockLOONG64NEZ: "NEZ",
BlockLOONG64LTZ: "LTZ",
BlockLOONG64LEZ: "LEZ",
BlockLOONG64GTZ: "GTZ",
@@ -2453,9 +2453,10 @@ const (
OpPPC64LoweredAtomicOr32
OpPPC64LoweredWB
OpPPC64LoweredPubBarrier
- OpPPC64LoweredPanicBoundsA
- OpPPC64LoweredPanicBoundsB
- OpPPC64LoweredPanicBoundsC
+ OpPPC64LoweredPanicBoundsRR
+ OpPPC64LoweredPanicBoundsRC
+ OpPPC64LoweredPanicBoundsCR
+ OpPPC64LoweredPanicBoundsCC
OpPPC64InvertFlags
OpPPC64FlagEQ
OpPPC64FlagLT
@@ -2585,9 +2586,10 @@ const (
OpRISCV64LoweredGetCallerPC
OpRISCV64LoweredWB
OpRISCV64LoweredPubBarrier
- OpRISCV64LoweredPanicBoundsA
- OpRISCV64LoweredPanicBoundsB
- OpRISCV64LoweredPanicBoundsC
+ OpRISCV64LoweredPanicBoundsRR
+ OpRISCV64LoweredPanicBoundsRC
+ OpRISCV64LoweredPanicBoundsCR
+ OpRISCV64LoweredPanicBoundsCC
OpRISCV64FADDS
OpRISCV64FSUBS
OpRISCV64FMULS
@@ -2599,6 +2601,7 @@ const (
OpRISCV64FSQRTS
OpRISCV64FNEGS
OpRISCV64FMVSX
+ OpRISCV64FMVXS
OpRISCV64FCVTSW
OpRISCV64FCVTSL
OpRISCV64FCVTWS
@@ -2624,6 +2627,7 @@ const (
OpRISCV64FABSD
OpRISCV64FSGNJD
OpRISCV64FMVDX
+ OpRISCV64FMVXD
OpRISCV64FCVTDW
OpRISCV64FCVTDL
OpRISCV64FCVTWD
@@ -2842,9 +2846,10 @@ const (
OpS390XLoweredRound32F
OpS390XLoweredRound64F
OpS390XLoweredWB
- OpS390XLoweredPanicBoundsA
- OpS390XLoweredPanicBoundsB
- OpS390XLoweredPanicBoundsC
+ OpS390XLoweredPanicBoundsRR
+ OpS390XLoweredPanicBoundsRC
+ OpS390XLoweredPanicBoundsCR
+ OpS390XLoweredPanicBoundsCC
OpS390XFlagEQ
OpS390XFlagLT
OpS390XFlagGT
@@ -33018,41 +33023,46 @@ var opcodeTable = [...]opInfo{
reg: regInfo{},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 32}, // R5
- {1, 64}, // R6
+ {0, 1016}, // R3 R4 R5 R6 R7 R8 R9
+ {1, 1016}, // R3 R4 R5 R6 R7 R8 R9
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 16}, // R4
- {1, 32}, // R5
+ {0, 1016}, // R3 R4 R5 R6 R7 R8 R9
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 8}, // R3
- {1, 16}, // R4
+ {0, 1016}, // R3 R4 R5 R6 R7 R8 R9
},
},
},
+ {
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
+ call: true,
+ reg: regInfo{},
+ },
{
name: "InvertFlags",
argLen: 1,
@@ -34782,41 +34792,46 @@ var opcodeTable = [...]opInfo{
reg: regInfo{},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 64}, // X7
- {1, 134217728}, // X28
+ {0, 1048560}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20
+ {1, 1048560}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 32}, // X6
- {1, 64}, // X7
+ {0, 1048560}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 16}, // X5
- {1, 32}, // X6
+ {0, 1048560}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20
},
},
},
+ {
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
+ call: true,
+ reg: regInfo{},
+ },
{
name: "FADDS",
argLen: 2,
@@ -34978,6 +34993,19 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "FMVXS",
+ argLen: 1,
+ asm: riscv.AFMVXS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
{
name: "FCVTSW",
argLen: 1,
@@ -35338,6 +35366,19 @@ var opcodeTable = [...]opInfo{
},
},
},
+ {
+ name: "FMVXD",
+ argLen: 1,
+ asm: riscv.AFMVXD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
{
name: "FCVTDW",
argLen: 1,
@@ -38519,41 +38560,46 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4}, // R2
- {1, 8}, // R3
+ {0, 7167}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12
+ {1, 7167}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 2}, // R1
- {1, 4}, // R2
+ {0, 7167}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1}, // R0
- {1, 2}, // R1
+ {0, 7167}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12
},
},
},
+ {
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
+ call: true,
+ reg: regInfo{},
+ },
{
name: "FlagEQ",
argLen: 0,
diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go
index b8c952ef3350ad..3f85bd2351f25c 100644
--- a/src/cmd/compile/internal/ssa/prove.go
+++ b/src/cmd/compile/internal/ssa/prove.go
@@ -145,10 +145,7 @@ func (l limit) signedMin(m int64) limit {
l.min = max(l.min, m)
return l
}
-func (l limit) signedMax(m int64) limit {
- l.max = min(l.max, m)
- return l
-}
+
func (l limit) signedMinMax(minimum, maximum int64) limit {
l.min = max(l.min, minimum)
l.max = min(l.max, maximum)
@@ -409,12 +406,6 @@ type factsTable struct {
limits []limit // indexed by value ID
limitStack []limitFact // previous entries
recurseCheck []bool // recursion detector for limit propagation
-
- // For each slice s, a map from s to a len(s)/cap(s) value (if any)
- // TODO: check if there are cases that matter where we have
- // more than one len(s) for a slice. We could keep a list if necessary.
- lens map[ID]*Value
- caps map[ID]*Value
}
// checkpointBound is an invalid value used for checkpointing
@@ -666,6 +657,11 @@ func (ft *factsTable) newLimit(v *Value, newLim limit) bool {
}
if !isTrue {
r ^= lt | gt | eq
+ } else if d == unsigned && (r == lt || r == lt|eq) && ft.isNonNegative(v.Args[1]) {
+ // Since every representation of a non-negative signed number is the same
+ // as in the unsigned domain, we can transfer x <= y to the signed domain,
+ // but only for the true branch.
+ d |= signed
}
// TODO: v.Block is wrong?
addRestrictions(v.Block, ft, d, v.Args[0], v.Args[1], r)
@@ -730,7 +726,7 @@ func (ft *factsTable) addOrdering(v, w *Value, d domain, r relation) {
// restricting it to r.
func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) {
if parent.Func.pass.debug > 2 {
- parent.Func.Warnl(parent.Pos, "parent=%s, update %s %s %s", parent, v, w, r)
+ parent.Func.Warnl(parent.Pos, "parent=%s, update %s %s %s %s", parent, d, v, w, r)
}
// No need to do anything else if we already found unsat.
if ft.unsat {
@@ -942,32 +938,6 @@ func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) {
return
}
- // Additional facts we know given the relationship between len and cap.
- //
- // TODO: Since prove now derives transitive relations, it
- // should be sufficient to learn that len(w) <= cap(w) at the
- // beginning of prove where we look for all len/cap ops.
- if v.Op == OpSliceLen && r< == 0 && ft.caps[v.Args[0].ID] != nil {
- // len(s) > w implies cap(s) > w
- // len(s) >= w implies cap(s) >= w
- // len(s) == w implies cap(s) >= w
- ft.update(parent, ft.caps[v.Args[0].ID], w, d, r|gt)
- }
- if w.Op == OpSliceLen && r> == 0 && ft.caps[w.Args[0].ID] != nil {
- // same, length on the RHS.
- ft.update(parent, v, ft.caps[w.Args[0].ID], d, r|lt)
- }
- if v.Op == OpSliceCap && r> == 0 && ft.lens[v.Args[0].ID] != nil {
- // cap(s) < w implies len(s) < w
- // cap(s) <= w implies len(s) <= w
- // cap(s) == w implies len(s) <= w
- ft.update(parent, ft.lens[v.Args[0].ID], w, d, r|lt)
- }
- if w.Op == OpSliceCap && r< == 0 && ft.lens[w.Args[0].ID] != nil {
- // same, capacity on the RHS.
- ft.update(parent, v, ft.lens[w.Args[0].ID], d, r|gt)
- }
-
// Process fence-post implications.
//
// First, make the condition > or >=.
@@ -1421,6 +1391,8 @@ func prove(f *Func) {
ft := newFactsTable(f)
ft.checkpoint()
+ var lens map[ID]*Value
+ var caps map[ID]*Value
// Find length and capacity ops.
for _, b := range f.Blocks {
for _, v := range b.Values {
@@ -1431,26 +1403,39 @@ func prove(f *Func) {
}
switch v.Op {
case OpSliceLen:
- if ft.lens == nil {
- ft.lens = map[ID]*Value{}
+ if lens == nil {
+ lens = map[ID]*Value{}
}
// Set all len Values for the same slice as equal in the poset.
// The poset handles transitive relations, so Values related to
// any OpSliceLen for this slice will be correctly related to others.
- if l, ok := ft.lens[v.Args[0].ID]; ok {
+ //
+ // Since we know that lens/caps are non-negative, their relation
+ // can be added in both the signed and unsigned domain.
+ if l, ok := lens[v.Args[0].ID]; ok {
ft.update(b, v, l, signed, eq)
+ ft.update(b, v, l, unsigned, eq)
} else {
- ft.lens[v.Args[0].ID] = v
+ lens[v.Args[0].ID] = v
+ }
+ if c, ok := caps[v.Args[0].ID]; ok {
+ ft.update(b, v, c, signed, lt|eq)
+ ft.update(b, v, c, unsigned, lt|eq)
}
case OpSliceCap:
- if ft.caps == nil {
- ft.caps = map[ID]*Value{}
+ if caps == nil {
+ caps = map[ID]*Value{}
}
// Same as case OpSliceLen above, but for slice cap.
- if c, ok := ft.caps[v.Args[0].ID]; ok {
+ if c, ok := caps[v.Args[0].ID]; ok {
ft.update(b, v, c, signed, eq)
+ ft.update(b, v, c, unsigned, eq)
} else {
- ft.caps[v.Args[0].ID] = v
+ caps[v.Args[0].ID] = v
+ }
+ if l, ok := lens[v.Args[0].ID]; ok {
+ ft.update(b, v, l, signed, gt|eq)
+ ft.update(b, v, l, unsigned, gt|eq)
}
}
}
@@ -2260,6 +2245,9 @@ func addLocalFacts(ft *factsTable, b *Block) {
OpRsh32Ux64, OpRsh32Ux32, OpRsh32Ux16, OpRsh32Ux8,
OpRsh64Ux64, OpRsh64Ux32, OpRsh64Ux16, OpRsh64Ux8:
ft.update(b, v, v.Args[0], unsigned, lt|eq)
+ if ft.isNonNegative(v.Args[0]) {
+ ft.update(b, v, v.Args[0], signed, lt|eq)
+ }
case OpMod64u, OpMod32u, OpMod16u, OpMod8u:
ft.update(b, v, v.Args[0], unsigned, lt|eq)
// Note: we have to be careful that this doesn't imply
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index fb9642cfedfbfd..c0881c7a45f504 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -2980,11 +2980,6 @@ type desiredStateEntry struct {
regs [4]register
}
-func (d *desiredState) clear() {
- d.entries = d.entries[:0]
- d.avoid = 0
-}
-
// get returns a list of desired registers for value vid.
func (d *desiredState) get(vid ID) [4]register {
for _, e := range d.entries {
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index f9a35deecc3d0b..22399275212381 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -504,18 +504,6 @@ func isUnsignedPowerOfTwo[T uint8 | uint16 | uint32 | uint64](n T) bool {
return n != 0 && n&(n-1) == 0
}
-// isUint64PowerOfTwo reports whether uint64(n) is a power of 2.
-func isUint64PowerOfTwo(in int64) bool {
- n := uint64(in)
- return n > 0 && n&(n-1) == 0
-}
-
-// isUint32PowerOfTwo reports whether uint32(n) is a power of 2.
-func isUint32PowerOfTwo(in int64) bool {
- n := uint64(uint32(in))
- return n > 0 && n&(n-1) == 0
-}
-
// is32Bit reports whether n can be represented as a signed 32 bit integer.
func is32Bit(n int64) bool {
return n == int64(int32(n))
@@ -637,51 +625,16 @@ func truncate64Fto32F(f float64) float32 {
return math.Float32frombits(r)
}
-// extend32Fto64F converts a float32 value to a float64 value preserving the bit
-// pattern of the mantissa.
-func extend32Fto64F(f float32) float64 {
- if !math.IsNaN(float64(f)) {
- return float64(f)
- }
- // NaN bit patterns aren't necessarily preserved across conversion
- // instructions so we need to do the conversion manually.
- b := uint64(math.Float32bits(f))
- // | sign | exponent | mantissa |
- r := ((b << 32) & (1 << 63)) | (0x7ff << 52) | ((b & 0x7fffff) << (52 - 23))
- return math.Float64frombits(r)
-}
-
// DivisionNeedsFixUp reports whether the division needs fix-up code.
func DivisionNeedsFixUp(v *Value) bool {
return v.AuxInt == 0
}
-// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
-func auxFrom64F(f float64) int64 {
- if f != f {
- panic("can't encode a NaN in AuxInt field")
- }
- return int64(math.Float64bits(f))
-}
-
-// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
-func auxFrom32F(f float32) int64 {
- if f != f {
- panic("can't encode a NaN in AuxInt field")
- }
- return int64(math.Float64bits(extend32Fto64F(f)))
-}
-
// auxTo32F decodes a float32 from the AuxInt value provided.
func auxTo32F(i int64) float32 {
return truncate64Fto32F(math.Float64frombits(uint64(i)))
}
-// auxTo64F decodes a float64 from the AuxInt value provided.
-func auxTo64F(i int64) float64 {
- return math.Float64frombits(uint64(i))
-}
-
func auxIntToBool(i int64) bool {
if i == 0 {
return false
@@ -715,12 +668,6 @@ func auxIntToValAndOff(i int64) ValAndOff {
func auxIntToArm64BitField(i int64) arm64BitField {
return arm64BitField(i)
}
-func auxIntToInt128(x int64) int128 {
- if x != 0 {
- panic("nonzero int128 not allowed")
- }
- return 0
-}
func auxIntToFlagConstant(x int64) flagConstant {
return flagConstant(x)
}
@@ -762,12 +709,6 @@ func valAndOffToAuxInt(v ValAndOff) int64 {
func arm64BitFieldToAuxInt(v arm64BitField) int64 {
return int64(v)
}
-func int128ToAuxInt(x int128) int64 {
- if x != 0 {
- panic("nonzero int128 not allowed")
- }
- return 0
-}
func flagConstantToAuxInt(x flagConstant) int64 {
return int64(x)
}
@@ -838,23 +779,6 @@ func uaddOvf(a, b int64) bool {
return uint64(a)+uint64(b) < uint64(a)
}
-// loadLSymOffset simulates reading a word at an offset into a
-// read-only symbol's runtime memory. If it would read a pointer to
-// another symbol, that symbol is returned. Otherwise, it returns nil.
-func loadLSymOffset(lsym *obj.LSym, offset int64) *obj.LSym {
- if lsym.Type != objabi.SRODATA {
- return nil
- }
-
- for _, r := range lsym.R {
- if int64(r.Off) == offset && r.Type&^objabi.R_WEAK == objabi.R_ADDR && r.Add == 0 {
- return r.Sym
- }
- }
-
- return nil
-}
-
func devirtLECall(v *Value, sym *obj.LSym) *Value {
v.Op = OpStaticLECall
auxcall := v.Aux.(*AuxCall)
@@ -1576,10 +1500,6 @@ func GetPPC64Shiftmb(auxint int64) int64 {
return int64(int8(auxint >> 8))
}
-func GetPPC64Shiftme(auxint int64) int64 {
- return int64(int8(auxint))
-}
-
// Test if this value can encoded as a mask for a rlwinm like
// operation. Masks can also extend from the msb and wrap to
// the lsb too. That is, the valid masks are 32 bit strings
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 3532d42b0cda6e..c83890aee69fe1 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -3108,8 +3108,8 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
break
}
// match: (ANDQ (MOVQconst [c]) x)
- // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31
- // result: (BTRQconst [int8(log64(^c))] x)
+ // cond: isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31
+ // result: (BTRQconst [int8(log64u(uint64(^c)))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64MOVQconst {
@@ -3117,11 +3117,11 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
}
c := auxIntToInt64(v_0.AuxInt)
x := v_1
- if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
+ if !(isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31) {
continue
}
v.reset(OpAMD64BTRQconst)
- v.AuxInt = int8ToAuxInt(int8(log64(^c)))
+ v.AuxInt = int8ToAuxInt(int8(log64u(uint64(^c))))
v.AddArg(x)
return true
}
@@ -14431,8 +14431,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
break
}
// match: (ORQ (MOVQconst [c]) x)
- // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
- // result: (BTSQconst [int8(log64(c))] x)
+ // cond: isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31
+ // result: (BTSQconst [int8(log64u(uint64(c)))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64MOVQconst {
@@ -14440,11 +14440,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
c := auxIntToInt64(v_0.AuxInt)
x := v_1
- if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
+ if !(isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31) {
continue
}
v.reset(OpAMD64BTSQconst)
- v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v.AddArg(x)
return true
}
@@ -17398,46 +17398,46 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
break
}
// match: (SETEQ (TESTLconst [c] x))
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (SETAE (BTLconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SETAE (BTLconst [int8(log32u(uint32(c)))] x))
for {
if v_0.Op != OpAMD64TESTLconst {
break
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v.reset(OpAMD64SETAE)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (SETEQ (TESTQconst [c] x))
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (SETAE (BTQconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETAE (BTQconst [int8(log32u(uint32(c)))] x))
for {
if v_0.Op != OpAMD64TESTQconst {
break
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v.reset(OpAMD64SETAE)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (SETEQ (TESTQ (MOVQconst [c]) x))
- // cond: isUint64PowerOfTwo(c)
- // result: (SETAE (BTQconst [int8(log64(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETAE (BTQconst [int8(log64u(uint64(c)))] x))
for {
if v_0.Op != OpAMD64TESTQ {
break
@@ -17451,12 +17451,12 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
}
c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v.reset(OpAMD64SETAE)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -17875,8 +17875,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
break
}
// match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -17887,21 +17887,21 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
c := auxIntToInt32(v_1.AuxInt)
x := v_1.Args[0]
mem := v_2
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v.reset(OpAMD64SETAEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem)
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -17912,21 +17912,21 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
c := auxIntToInt32(v_1.AuxInt)
x := v_1.Args[0]
mem := v_2
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v.reset(OpAMD64SETAEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
- // cond: isUint64PowerOfTwo(c)
- // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -17944,14 +17944,14 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
c := auxIntToInt64(v_1_0.AuxInt)
x := v_1_1
mem := v_2
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v.reset(OpAMD64SETAEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
@@ -19444,46 +19444,46 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
break
}
// match: (SETNE (TESTLconst [c] x))
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (SETB (BTLconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SETB (BTLconst [int8(log32u(uint32(c)))] x))
for {
if v_0.Op != OpAMD64TESTLconst {
break
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (SETNE (TESTQconst [c] x))
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (SETB (BTQconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETB (BTQconst [int8(log32u(uint32(c)))] x))
for {
if v_0.Op != OpAMD64TESTQconst {
break
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (SETNE (TESTQ (MOVQconst [c]) x))
- // cond: isUint64PowerOfTwo(c)
- // result: (SETB (BTQconst [int8(log64(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETB (BTQconst [int8(log64u(uint64(c)))] x))
for {
if v_0.Op != OpAMD64TESTQ {
break
@@ -19497,12 +19497,12 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
}
c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -19921,8 +19921,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
break
}
// match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -19933,21 +19933,21 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
c := auxIntToInt32(v_1.AuxInt)
x := v_1.Args[0]
mem := v_2
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v.reset(OpAMD64SETBstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem)
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -19958,21 +19958,21 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
c := auxIntToInt32(v_1.AuxInt)
x := v_1.Args[0]
mem := v_2
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v.reset(OpAMD64SETBstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
- // cond: isUint64PowerOfTwo(c)
- // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -19990,14 +19990,14 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
c := auxIntToInt64(v_1_0.AuxInt)
x := v_1_1
mem := v_2
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v.reset(OpAMD64SETBstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
@@ -23495,8 +23495,8 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
break
}
// match: (XORQ (MOVQconst [c]) x)
- // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
- // result: (BTCQconst [int8(log64(c))] x)
+ // cond: isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31
+ // result: (BTCQconst [int8(log64u(uint64(c)))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64MOVQconst {
@@ -23504,11 +23504,11 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
}
c := auxIntToInt64(v_0.AuxInt)
x := v_1
- if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
+ if !(isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31) {
continue
}
v.reset(OpAMD64BTCQconst)
- v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v.AddArg(x)
return true
}
@@ -30148,40 +30148,40 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
// match: (EQ (TESTLconst [c] x))
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (UGE (BTLconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (UGE (BTLconst [int8(log32u(uint32(c)))] x))
for b.Controls[0].Op == OpAMD64TESTLconst {
v_0 := b.Controls[0]
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64UGE, v0)
return true
}
// match: (EQ (TESTQconst [c] x))
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (UGE (BTQconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (UGE (BTQconst [int8(log32u(uint32(c)))] x))
for b.Controls[0].Op == OpAMD64TESTQconst {
v_0 := b.Controls[0]
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64UGE, v0)
return true
}
// match: (EQ (TESTQ (MOVQconst [c]) x))
- // cond: isUint64PowerOfTwo(c)
- // result: (UGE (BTQconst [int8(log64(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (UGE (BTQconst [int8(log64u(uint64(c)))] x))
for b.Controls[0].Op == OpAMD64TESTQ {
v_0 := b.Controls[0]
_ = v_0.Args[1]
@@ -30193,11 +30193,11 @@ func rewriteBlockAMD64(b *Block) bool {
}
c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64UGE, v0)
return true
@@ -31128,40 +31128,40 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
// match: (NE (TESTLconst [c] x))
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (ULT (BTLconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (ULT (BTLconst [int8(log32u(uint32(c)))] x))
for b.Controls[0].Op == OpAMD64TESTLconst {
v_0 := b.Controls[0]
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64ULT, v0)
return true
}
// match: (NE (TESTQconst [c] x))
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (ULT (BTQconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (ULT (BTQconst [int8(log32u(uint32(c)))] x))
for b.Controls[0].Op == OpAMD64TESTQconst {
v_0 := b.Controls[0]
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64ULT, v0)
return true
}
// match: (NE (TESTQ (MOVQconst [c]) x))
- // cond: isUint64PowerOfTwo(c)
- // result: (ULT (BTQconst [int8(log64(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (ULT (BTQconst [int8(log64u(uint64(c)))] x))
for b.Controls[0].Op == OpAMD64TESTQ {
v_0 := b.Controls[0]
_ = v_0.Args[1]
@@ -31173,11 +31173,11 @@ func rewriteBlockAMD64(b *Block) bool {
}
c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64ULT, v0)
return true
diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
index 83242413f06ab7..3065e2c315b50b 100644
--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go
+++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
@@ -5539,6 +5539,7 @@ func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
+ typ := &b.Func.Config.Types
// match: (MULV _ (MOVVconst [0]))
// result: (MOVVconst [0])
for {
@@ -5583,6 +5584,44 @@ func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool {
}
break
}
+ // match: (MULV (NEGV x) (MOVVconst [c]))
+ // result: (MULV x (MOVVconst [-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLOONG64NEGV {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpLOONG64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpLOONG64MULV)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(-c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (MULV (NEGV x) (NEGV y))
+ // result: (MULV x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLOONG64NEGV {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpLOONG64NEGV {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpLOONG64MULV)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (MULV (MOVVconst [c]) (MOVVconst [d]))
// result: (MOVVconst [c*d])
for {
@@ -11440,8 +11479,124 @@ func rewriteValueLOONG64_OpZero(v *Value) bool {
func rewriteBlockLOONG64(b *Block) bool {
typ := &b.Func.Config.Types
switch b.Kind {
- case BlockLOONG64EQ:
- // match: (EQ (FPFlagTrue cmp) yes no)
+ case BlockLOONG64BEQ:
+ // match: (BEQ (MOVVconst [0]) cond yes no)
+ // result: (EQZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64EQZ, cond)
+ return true
+ }
+ // match: (BEQ cond (MOVVconst [0]) yes no)
+ // result: (EQZ cond yes no)
+ for b.Controls[1].Op == OpLOONG64MOVVconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64EQZ, cond)
+ return true
+ }
+ case BlockLOONG64BGE:
+ // match: (BGE (MOVVconst [0]) cond yes no)
+ // result: (LEZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64LEZ, cond)
+ return true
+ }
+ // match: (BGE cond (MOVVconst [0]) yes no)
+ // result: (GEZ cond yes no)
+ for b.Controls[1].Op == OpLOONG64MOVVconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64GEZ, cond)
+ return true
+ }
+ case BlockLOONG64BGEU:
+ // match: (BGEU (MOVVconst [0]) cond yes no)
+ // result: (EQZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64EQZ, cond)
+ return true
+ }
+ case BlockLOONG64BLT:
+ // match: (BLT (MOVVconst [0]) cond yes no)
+ // result: (GTZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64GTZ, cond)
+ return true
+ }
+ // match: (BLT cond (MOVVconst [0]) yes no)
+ // result: (LTZ cond yes no)
+ for b.Controls[1].Op == OpLOONG64MOVVconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64LTZ, cond)
+ return true
+ }
+ case BlockLOONG64BLTU:
+ // match: (BLTU (MOVVconst [0]) cond yes no)
+ // result: (NEZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64NEZ, cond)
+ return true
+ }
+ case BlockLOONG64BNE:
+ // match: (BNE (MOVVconst [0]) cond yes no)
+ // result: (NEZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64NEZ, cond)
+ return true
+ }
+ // match: (BNE cond (MOVVconst [0]) yes no)
+ // result: (NEZ cond yes no)
+ for b.Controls[1].Op == OpLOONG64MOVVconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64NEZ, cond)
+ return true
+ }
+ case BlockLOONG64EQZ:
+ // match: (EQZ (FPFlagTrue cmp) yes no)
// result: (FPF cmp yes no)
for b.Controls[0].Op == OpLOONG64FPFlagTrue {
v_0 := b.Controls[0]
@@ -11449,7 +11604,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64FPF, cmp)
return true
}
- // match: (EQ (FPFlagFalse cmp) yes no)
+ // match: (EQZ (FPFlagFalse cmp) yes no)
// result: (FPT cmp yes no)
for b.Controls[0].Op == OpLOONG64FPFlagFalse {
v_0 := b.Controls[0]
@@ -11457,8 +11612,8 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64FPT, cmp)
return true
}
- // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
- // result: (NE cmp yes no)
+ // match: (EQZ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (NEZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11468,11 +11623,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGT {
break
}
- b.resetWithControl(BlockLOONG64NE, cmp)
+ b.resetWithControl(BlockLOONG64NEZ, cmp)
return true
}
- // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
- // result: (NE cmp yes no)
+ // match: (EQZ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (NEZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11482,11 +11637,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTU {
break
}
- b.resetWithControl(BlockLOONG64NE, cmp)
+ b.resetWithControl(BlockLOONG64NEZ, cmp)
return true
}
- // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
- // result: (NE cmp yes no)
+ // match: (EQZ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (NEZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11496,11 +11651,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTconst {
break
}
- b.resetWithControl(BlockLOONG64NE, cmp)
+ b.resetWithControl(BlockLOONG64NEZ, cmp)
return true
}
- // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
- // result: (NE cmp yes no)
+ // match: (EQZ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (NEZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11510,22 +11665,22 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTUconst {
break
}
- b.resetWithControl(BlockLOONG64NE, cmp)
+ b.resetWithControl(BlockLOONG64NEZ, cmp)
return true
}
- // match: (EQ (SGTUconst [1] x) yes no)
- // result: (NE x yes no)
+ // match: (EQZ (SGTUconst [1] x) yes no)
+ // result: (NEZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTUconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
- b.resetWithControl(BlockLOONG64NE, x)
+ b.resetWithControl(BlockLOONG64NEZ, x)
return true
}
- // match: (EQ (SGTU x (MOVVconst [0])) yes no)
- // result: (EQ x yes no)
+ // match: (EQZ (SGTU x (MOVVconst [0])) yes no)
+ // result: (EQZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
_ = v_0.Args[1]
@@ -11534,10 +11689,10 @@ func rewriteBlockLOONG64(b *Block) bool {
if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
break
}
- b.resetWithControl(BlockLOONG64EQ, x)
+ b.resetWithControl(BlockLOONG64EQZ, x)
return true
}
- // match: (EQ (SGTconst [0] x) yes no)
+ // match: (EQZ (SGTconst [0] x) yes no)
// result: (GEZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTconst {
v_0 := b.Controls[0]
@@ -11548,7 +11703,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64GEZ, x)
return true
}
- // match: (EQ (SGT x (MOVVconst [0])) yes no)
+ // match: (EQZ (SGT x (MOVVconst [0])) yes no)
// result: (LEZ x yes no)
for b.Controls[0].Op == OpLOONG64SGT {
v_0 := b.Controls[0]
@@ -11561,9 +11716,9 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64LEZ, x)
return true
}
- // match: (EQ (SGTU (MOVVconst [c]) y) yes no)
+ // match: (EQZ (SGTU (MOVVconst [c]) y) yes no)
// cond: c >= -2048 && c <= 2047
- // result: (EQ (SGTUconst [c] y) yes no)
+ // result: (EQZ (SGTUconst [c] y) yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
y := v_0.Args[1]
@@ -11578,10 +11733,10 @@ func rewriteBlockLOONG64(b *Block) bool {
v0 := b.NewValue0(v_0.Pos, OpLOONG64SGTUconst, typ.Bool)
v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(y)
- b.resetWithControl(BlockLOONG64EQ, v0)
+ b.resetWithControl(BlockLOONG64EQZ, v0)
return true
}
- // match: (EQ (SUBV x y) yes no)
+ // match: (EQZ (SUBV x y) yes no)
// result: (BEQ x y yes no)
for b.Controls[0].Op == OpLOONG64SUBV {
v_0 := b.Controls[0]
@@ -11590,7 +11745,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BEQ, x, y)
return true
}
- // match: (EQ (SGT x y) yes no)
+ // match: (EQZ (SGT x y) yes no)
// result: (BGE y x yes no)
for b.Controls[0].Op == OpLOONG64SGT {
v_0 := b.Controls[0]
@@ -11599,7 +11754,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BGE, y, x)
return true
}
- // match: (EQ (SGTU x y) yes no)
+ // match: (EQZ (SGTU x y) yes no)
// result: (BGEU y x yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
@@ -11608,7 +11763,29 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BGEU, y, x)
return true
}
- // match: (EQ (MOVVconst [0]) yes no)
+ // match: (EQZ (SGTconst [c] y) yes no)
+ // result: (BGE y (MOVVconst [c]) yes no)
+ for b.Controls[0].Op == OpLOONG64SGTconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ b.resetWithControl2(BlockLOONG64BGE, y, v0)
+ return true
+ }
+ // match: (EQZ (SGTUconst [c] y) yes no)
+ // result: (BGEU y (MOVVconst [c]) yes no)
+ for b.Controls[0].Op == OpLOONG64SGTUconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ b.resetWithControl2(BlockLOONG64BGEU, y, v0)
+ return true
+ }
+ // match: (EQZ (MOVVconst [0]) yes no)
// result: (First yes no)
for b.Controls[0].Op == OpLOONG64MOVVconst {
v_0 := b.Controls[0]
@@ -11618,7 +11795,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.Reset(BlockFirst)
return true
}
- // match: (EQ (MOVVconst [c]) yes no)
+ // match: (EQZ (MOVVconst [c]) yes no)
// cond: c != 0
// result: (First no yes)
for b.Controls[0].Op == OpLOONG64MOVVconst {
@@ -11685,12 +11862,12 @@ func rewriteBlockLOONG64(b *Block) bool {
}
case BlockIf:
// match: (If cond yes no)
- // result: (NE (MOVBUreg cond) yes no)
+ // result: (NEZ (MOVBUreg cond) yes no)
for {
cond := b.Controls[0]
v0 := b.NewValue0(cond.Pos, OpLOONG64MOVBUreg, typ.UInt64)
v0.AddArg(cond)
- b.resetWithControl(BlockLOONG64NE, v0)
+ b.resetWithControl(BlockLOONG64NEZ, v0)
return true
}
case BlockLOONG64LEZ:
@@ -11745,8 +11922,8 @@ func rewriteBlockLOONG64(b *Block) bool {
b.swapSuccessors()
return true
}
- case BlockLOONG64NE:
- // match: (NE (FPFlagTrue cmp) yes no)
+ case BlockLOONG64NEZ:
+ // match: (NEZ (FPFlagTrue cmp) yes no)
// result: (FPT cmp yes no)
for b.Controls[0].Op == OpLOONG64FPFlagTrue {
v_0 := b.Controls[0]
@@ -11754,7 +11931,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64FPT, cmp)
return true
}
- // match: (NE (FPFlagFalse cmp) yes no)
+ // match: (NEZ (FPFlagFalse cmp) yes no)
// result: (FPF cmp yes no)
for b.Controls[0].Op == OpLOONG64FPFlagFalse {
v_0 := b.Controls[0]
@@ -11762,8 +11939,8 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64FPF, cmp)
return true
}
- // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
- // result: (EQ cmp yes no)
+ // match: (NEZ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (EQZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11773,11 +11950,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGT {
break
}
- b.resetWithControl(BlockLOONG64EQ, cmp)
+ b.resetWithControl(BlockLOONG64EQZ, cmp)
return true
}
- // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
- // result: (EQ cmp yes no)
+ // match: (NEZ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (EQZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11787,11 +11964,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTU {
break
}
- b.resetWithControl(BlockLOONG64EQ, cmp)
+ b.resetWithControl(BlockLOONG64EQZ, cmp)
return true
}
- // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
- // result: (EQ cmp yes no)
+ // match: (NEZ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (EQZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11801,11 +11978,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTconst {
break
}
- b.resetWithControl(BlockLOONG64EQ, cmp)
+ b.resetWithControl(BlockLOONG64EQZ, cmp)
return true
}
- // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
- // result: (EQ cmp yes no)
+ // match: (NEZ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (EQZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11815,22 +11992,22 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTUconst {
break
}
- b.resetWithControl(BlockLOONG64EQ, cmp)
+ b.resetWithControl(BlockLOONG64EQZ, cmp)
return true
}
- // match: (NE (SGTUconst [1] x) yes no)
- // result: (EQ x yes no)
+ // match: (NEZ (SGTUconst [1] x) yes no)
+ // result: (EQZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTUconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
- b.resetWithControl(BlockLOONG64EQ, x)
+ b.resetWithControl(BlockLOONG64EQZ, x)
return true
}
- // match: (NE (SGTU x (MOVVconst [0])) yes no)
- // result: (NE x yes no)
+ // match: (NEZ (SGTU x (MOVVconst [0])) yes no)
+ // result: (NEZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
_ = v_0.Args[1]
@@ -11839,10 +12016,10 @@ func rewriteBlockLOONG64(b *Block) bool {
if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
break
}
- b.resetWithControl(BlockLOONG64NE, x)
+ b.resetWithControl(BlockLOONG64NEZ, x)
return true
}
- // match: (NE (SGTconst [0] x) yes no)
+ // match: (NEZ (SGTconst [0] x) yes no)
// result: (LTZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTconst {
v_0 := b.Controls[0]
@@ -11853,7 +12030,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64LTZ, x)
return true
}
- // match: (NE (SGT x (MOVVconst [0])) yes no)
+ // match: (NEZ (SGT x (MOVVconst [0])) yes no)
// result: (GTZ x yes no)
for b.Controls[0].Op == OpLOONG64SGT {
v_0 := b.Controls[0]
@@ -11866,9 +12043,9 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64GTZ, x)
return true
}
- // match: (NE (SGTU (MOVVconst [c]) y) yes no)
+ // match: (NEZ (SGTU (MOVVconst [c]) y) yes no)
// cond: c >= -2048 && c <= 2047
- // result: (NE (SGTUconst [c] y) yes no)
+ // result: (NEZ (SGTUconst [c] y) yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
y := v_0.Args[1]
@@ -11883,10 +12060,10 @@ func rewriteBlockLOONG64(b *Block) bool {
v0 := b.NewValue0(v_0.Pos, OpLOONG64SGTUconst, typ.Bool)
v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(y)
- b.resetWithControl(BlockLOONG64NE, v0)
+ b.resetWithControl(BlockLOONG64NEZ, v0)
return true
}
- // match: (NE (SUBV x y) yes no)
+ // match: (NEZ (SUBV x y) yes no)
// result: (BNE x y yes no)
for b.Controls[0].Op == OpLOONG64SUBV {
v_0 := b.Controls[0]
@@ -11895,7 +12072,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BNE, x, y)
return true
}
- // match: (NE (SGT x y) yes no)
+ // match: (NEZ (SGT x y) yes no)
// result: (BLT y x yes no)
for b.Controls[0].Op == OpLOONG64SGT {
v_0 := b.Controls[0]
@@ -11904,7 +12081,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BLT, y, x)
return true
}
- // match: (NE (SGTU x y) yes no)
+ // match: (NEZ (SGTU x y) yes no)
// result: (BLTU y x yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
@@ -11913,7 +12090,29 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BLTU, y, x)
return true
}
- // match: (NE (MOVVconst [0]) yes no)
+ // match: (NEZ (SGTconst [c] y) yes no)
+ // result: (BLT y (MOVVconst [c]) yes no)
+ for b.Controls[0].Op == OpLOONG64SGTconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ b.resetWithControl2(BlockLOONG64BLT, y, v0)
+ return true
+ }
+ // match: (NEZ (SGTUconst [c] y) yes no)
+ // result: (BLTU y (MOVVconst [c]) yes no)
+ for b.Controls[0].Op == OpLOONG64SGTUconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ b.resetWithControl2(BlockLOONG64BLTU, y, v0)
+ return true
+ }
+ // match: (NEZ (MOVVconst [0]) yes no)
// result: (First no yes)
for b.Controls[0].Op == OpLOONG64MOVVconst {
v_0 := b.Controls[0]
@@ -11924,7 +12123,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.swapSuccessors()
return true
}
- // match: (NE (MOVVconst [c]) yes no)
+ // match: (NEZ (MOVVconst [c]) yes no)
// cond: c != 0
// result: (First yes no)
for b.Controls[0].Op == OpLOONG64MOVVconst {
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index e987ae9662884b..050ace83dea2d7 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -540,6 +540,12 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64LessEqual(v)
case OpPPC64LessThan:
return rewriteValuePPC64_OpPPC64LessThan(v)
+ case OpPPC64LoweredPanicBoundsCR:
+ return rewriteValuePPC64_OpPPC64LoweredPanicBoundsCR(v)
+ case OpPPC64LoweredPanicBoundsRC:
+ return rewriteValuePPC64_OpPPC64LoweredPanicBoundsRC(v)
+ case OpPPC64LoweredPanicBoundsRR:
+ return rewriteValuePPC64_OpPPC64LoweredPanicBoundsRR(v)
case OpPPC64MFVSRD:
return rewriteValuePPC64_OpPPC64MFVSRD(v)
case OpPPC64MOVBZload:
@@ -667,7 +673,8 @@ func rewriteValuePPC64(v *Value) bool {
case OpPPC64XORconst:
return rewriteValuePPC64_OpPPC64XORconst(v)
case OpPanicBounds:
- return rewriteValuePPC64_OpPanicBounds(v)
+ v.Op = OpPPC64LoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValuePPC64_OpPopCount16(v)
case OpPopCount32:
@@ -6826,6 +6833,86 @@ func rewriteValuePPC64_OpPPC64LessThan(v *Value) bool {
return true
}
}
+func rewriteValuePPC64_OpPPC64LoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpPPC64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpPPC64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpPPC64LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MFVSRD(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -12981,60 +13068,6 @@ func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool {
}
return false
}
-func rewriteValuePPC64_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpPPC64LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpPPC64LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpPPC64LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValuePPC64_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index 95c6489a511be8..0dd952f5120b0d 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -486,7 +486,8 @@ func rewriteValueRISCV64(v *Value) bool {
v.Op = OpRISCV64OR
return true
case OpPanicBounds:
- return rewriteValueRISCV64_OpPanicBounds(v)
+ v.Op = OpRISCV64LoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValueRISCV64_OpPopCount16(v)
case OpPopCount32:
@@ -516,6 +517,14 @@ func rewriteValueRISCV64(v *Value) bool {
return rewriteValueRISCV64_OpRISCV64FMADDD(v)
case OpRISCV64FMADDS:
return rewriteValueRISCV64_OpRISCV64FMADDS(v)
+ case OpRISCV64FMOVDload:
+ return rewriteValueRISCV64_OpRISCV64FMOVDload(v)
+ case OpRISCV64FMOVDstore:
+ return rewriteValueRISCV64_OpRISCV64FMOVDstore(v)
+ case OpRISCV64FMOVWload:
+ return rewriteValueRISCV64_OpRISCV64FMOVWload(v)
+ case OpRISCV64FMOVWstore:
+ return rewriteValueRISCV64_OpRISCV64FMOVWstore(v)
case OpRISCV64FMSUBD:
return rewriteValueRISCV64_OpRISCV64FMSUBD(v)
case OpRISCV64FMSUBS:
@@ -532,6 +541,12 @@ func rewriteValueRISCV64(v *Value) bool {
return rewriteValueRISCV64_OpRISCV64FSUBD(v)
case OpRISCV64FSUBS:
return rewriteValueRISCV64_OpRISCV64FSUBS(v)
+ case OpRISCV64LoweredPanicBoundsCR:
+ return rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsCR(v)
+ case OpRISCV64LoweredPanicBoundsRC:
+ return rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRC(v)
+ case OpRISCV64LoweredPanicBoundsRR:
+ return rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRR(v)
case OpRISCV64MOVBUload:
return rewriteValueRISCV64_OpRISCV64MOVBUload(v)
case OpRISCV64MOVBUreg:
@@ -3416,60 +3431,6 @@ func rewriteValueRISCV64_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueRISCV64_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpRISCV64LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpRISCV64LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpRISCV64LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValueRISCV64_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -3891,6 +3852,250 @@ func rewriteValueRISCV64_OpRISCV64FMADDS(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVDload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVDX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVDX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVDstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVWload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVWload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (FMOVWstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVWstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FMSUBD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -4239,6 +4444,86 @@ func rewriteValueRISCV64_OpRISCV64FSUBS(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpRISCV64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpRISCV64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpRISCV64LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpRISCV64LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -4944,6 +5229,25 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool {
v.AddArg2(base, mem)
return true
}
+ // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVXD x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVXD)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVDnop(v *Value) bool {
@@ -5625,6 +5929,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
+ typ := &b.Func.Config.Types
// match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
@@ -5668,6 +5973,27 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
v.AddArg2(base, mem)
return true
}
+ // match: (MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVWUreg (FMVXS x))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64FMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUreg)
+ v0 := b.NewValue0(v_1.Pos, OpRISCV64FMVXS, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVWUreg(v *Value) bool {
@@ -5858,6 +6184,25 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
v.AddArg2(base, mem)
return true
}
+ // match: (MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVXS x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64FMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVXS)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVWreg(v *Value) bool {
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index a7fde81c4789b2..07dbe7bf7a697c 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -477,7 +477,8 @@ func rewriteValueS390X(v *Value) bool {
v.Op = OpS390XORW
return true
case OpPanicBounds:
- return rewriteValueS390X_OpPanicBounds(v)
+ v.Op = OpS390XLoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValueS390X_OpPopCount16(v)
case OpPopCount32:
@@ -644,6 +645,12 @@ func rewriteValueS390X(v *Value) bool {
return rewriteValueS390X_OpS390XLTDBR(v)
case OpS390XLTEBR:
return rewriteValueS390X_OpS390XLTEBR(v)
+ case OpS390XLoweredPanicBoundsCR:
+ return rewriteValueS390X_OpS390XLoweredPanicBoundsCR(v)
+ case OpS390XLoweredPanicBoundsRC:
+ return rewriteValueS390X_OpS390XLoweredPanicBoundsRC(v)
+ case OpS390XLoweredPanicBoundsRR:
+ return rewriteValueS390X_OpS390XLoweredPanicBoundsRR(v)
case OpS390XLoweredRound32F:
return rewriteValueS390X_OpS390XLoweredRound32F(v)
case OpS390XLoweredRound64F:
@@ -3971,60 +3978,6 @@ func rewriteValueS390X_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueS390X_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpS390XLoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpS390XLoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpS390XLoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValueS390X_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -8147,6 +8100,86 @@ func rewriteValueS390X_OpS390XLTEBR(v *Value) bool {
}
return false
}
+func rewriteValueS390X_OpS390XLoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpS390XLoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpS390XLoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpS390XLoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpS390XLoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueS390X_OpS390XLoweredRound32F(v *Value) bool {
v_0 := v.Args[0]
// match: (LoweredRound32F x:(FMOVSconst))
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index bce94d35f94841..1e2159579dfbf2 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -184,42 +184,6 @@ func InitConfig() {
BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert")
- } else {
- BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
- BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
- BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
- BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
- BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
- BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
- BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
- BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
- BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
- BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
- BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
- BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
- BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
- BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
- BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
- BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
- BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert")
- }
- if Arch.LinkArch.PtrSize == 4 {
- ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
- ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
- ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
- ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
- ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
- ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
- ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
- ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
- ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
- ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
- ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
- ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
- ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
- ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
- ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
- ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
}
// Wasm (all asm funcs with special ABIs)
@@ -1358,9 +1322,6 @@ func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
}
return s.constInt32(t, int32(c))
}
-func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
- return s.f.ConstOffPtrSP(t, c, s.sp)
-}
// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
// soft-float runtime function instead (when emitting soft-float code).
@@ -5418,26 +5379,6 @@ func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
return a
}
-func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
- pt := types.NewPtr(t)
- var addr *ssa.Value
- if base == s.sp {
- // Use special routine that avoids allocation on duplicate offsets.
- addr = s.constOffPtrSP(pt, off)
- } else {
- addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
- }
-
- if !ssa.CanSSA(t) {
- a := s.addr(n)
- s.move(t, addr, a)
- return
- }
-
- a := s.expr(n)
- s.storeType(t, addr, a, 0, false)
-}
-
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// v may be a slice, string or pointer to an array.
@@ -7758,7 +7699,4 @@ func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr {
}
}
-var (
- BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
- ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
-)
+var BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
index ce2e921771ed94..5e39bb512f45f4 100644
--- a/src/cmd/compile/internal/staticinit/sched.go
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -622,12 +622,6 @@ func (s *Schedule) staticAssignInlinedCall(l *ir.Name, loff int64, call *ir.Inli
// INLCALL-ReturnVars
// . NAME-p.~R0 Class:PAUTO Offset:0 OnStack Used PTR-*T tc(1) # x.go:18:13
//
- // In non-unified IR, the tree is slightly different:
- // - if there are no arguments to the inlined function,
- // the INLCALL-init omits the AS2.
- // - the DCL inside BLOCK is on the AS2's init list,
- // not its own statement in the top level of the BLOCK.
- //
// If the init values are side-effect-free and each either only
// appears once in the function body or is safely repeatable,
// then we inline the value expressions into the return argument
@@ -647,39 +641,26 @@ func (s *Schedule) staticAssignInlinedCall(l *ir.Name, loff int64, call *ir.Inli
// is the most important case for us to get right.
init := call.Init()
- var as2init *ir.AssignListStmt
- if len(init) == 2 && init[0].Op() == ir.OAS2 && init[1].Op() == ir.OINLMARK {
- as2init = init[0].(*ir.AssignListStmt)
- } else if len(init) == 1 && init[0].Op() == ir.OINLMARK {
- as2init = new(ir.AssignListStmt)
- } else {
+ if len(init) != 2 || init[0].Op() != ir.OAS2 || init[1].Op() != ir.OINLMARK {
return false
}
+ as2init := init[0].(*ir.AssignListStmt)
+
if len(call.Body) != 2 || call.Body[0].Op() != ir.OBLOCK || call.Body[1].Op() != ir.OLABEL {
return false
}
label := call.Body[1].(*ir.LabelStmt).Label
block := call.Body[0].(*ir.BlockStmt)
list := block.List
- var dcl *ir.Decl
- if len(list) == 3 && list[0].Op() == ir.ODCL {
- dcl = list[0].(*ir.Decl)
- list = list[1:]
- }
- if len(list) != 2 ||
- list[0].Op() != ir.OAS2 ||
- list[1].Op() != ir.OGOTO ||
- list[1].(*ir.BranchStmt).Label != label {
+ if len(list) != 3 ||
+ list[0].Op() != ir.ODCL ||
+ list[1].Op() != ir.OAS2 ||
+ list[2].Op() != ir.OGOTO ||
+ list[2].(*ir.BranchStmt).Label != label {
return false
}
- as2body := list[0].(*ir.AssignListStmt)
- if dcl == nil {
- ainit := as2body.Init()
- if len(ainit) != 1 || ainit[0].Op() != ir.ODCL {
- return false
- }
- dcl = ainit[0].(*ir.Decl)
- }
+ dcl := list[0].(*ir.Decl)
+ as2body := list[1].(*ir.AssignListStmt)
if len(as2body.Lhs) != 1 || as2body.Lhs[0] != dcl.X {
return false
}
diff --git a/src/cmd/compile/internal/syntax/printer.go b/src/cmd/compile/internal/syntax/printer.go
index 1c0bfc190ecf29..d86d77e73f70ec 100644
--- a/src/cmd/compile/internal/syntax/printer.go
+++ b/src/cmd/compile/internal/syntax/printer.go
@@ -138,10 +138,6 @@ func impliesSemi(tok token) bool {
// TODO(gri) provide table of []byte values for all tokens to avoid repeated string conversion
-func lineComment(text string) bool {
- return strings.HasPrefix(text, "//")
-}
-
func (p *printer) addWhitespace(kind ctrlSymbol, text string) {
p.pending = append(p.pending, whitespace{p.lastTok, kind /*text*/})
switch kind {
diff --git a/src/cmd/compile/internal/test/bench_test.go b/src/cmd/compile/internal/test/bench_test.go
index 472460009170e2..7303f672fee2a2 100644
--- a/src/cmd/compile/internal/test/bench_test.go
+++ b/src/cmd/compile/internal/test/bench_test.go
@@ -122,3 +122,26 @@ func BenchmarkBitToggleConst(b *testing.B) {
}
}
}
+
+func BenchmarkMulNeg(b *testing.B) {
+ x := make([]int64, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int64
+ for i := range x {
+ s = (-x[i]) * 11
+ }
+ globl = s
+ }
+}
+
+func BenchmarkMul2Neg(b *testing.B) {
+ x := make([]int64, 1024)
+ y := make([]int64, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int64
+ for i := range x {
+ s = (-x[i]) * (-y[i])
+ }
+ globl = s
+ }
+}
diff --git a/src/cmd/compile/internal/test/mulconst_test.go b/src/cmd/compile/internal/test/mulconst_test.go
index c4aed84432de50..1d1b351af19640 100644
--- a/src/cmd/compile/internal/test/mulconst_test.go
+++ b/src/cmd/compile/internal/test/mulconst_test.go
@@ -143,7 +143,7 @@ func BenchmarkMulconstI32(b *testing.B) {
}
mulSinkI32 = x
})
- // -120x = 8x - 120x
+ // -120x = 8x - 128x
b.Run("-120", func(b *testing.B) {
x := int32(1)
for i := 0; i < b.N; i++ {
@@ -202,7 +202,7 @@ func BenchmarkMulconstI64(b *testing.B) {
}
mulSinkI64 = x
})
- // -120x = 8x - 120x
+ // -120x = 8x - 128x
b.Run("-120", func(b *testing.B) {
x := int64(1)
for i := 0; i < b.N; i++ {
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index 29d6b2cc2dcb55..f3498f60090118 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -235,27 +235,7 @@
package typecheck
-import (
- "strings"
-)
-
const blankMarker = "$"
-// TparamName returns the real name of a type parameter, after stripping its
-// qualifying prefix and reverting blank-name encoding. See TparamExportName
-// for details.
-func TparamName(exportName string) string {
- // Remove the "path" from the type param name that makes it unique.
- ix := strings.LastIndex(exportName, ".")
- if ix < 0 {
- return ""
- }
- name := exportName[ix+1:]
- if strings.HasPrefix(name, blankMarker) {
- return "_"
- }
- return name
-}
-
// The name used for dictionary parameters or local variables.
const LocalDictName = ".dict"
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
index ac49f251bb93a4..2ca8e7fb861626 100644
--- a/src/cmd/compile/internal/typecheck/stmt.go
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -19,9 +19,6 @@ func RangeExprType(t *types.Type) *types.Type {
return t
}
-func typecheckrangeExpr(n *ir.RangeStmt) {
-}
-
// type check assignment.
// if this assignment is the definition of a var on the left side,
// fill in the var's type.
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 1c7f0a19e9158d..6a3e9b512e72d5 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -1694,13 +1694,6 @@ func fieldsHasShape(fields []*Field) bool {
return false
}
-// newBasic returns a new basic type of the given kind.
-func newBasic(kind Kind, obj Object) *Type {
- t := newType(kind)
- t.obj = obj
- return t
-}
-
// NewInterface returns a new interface for the given methods and
// embedded types. Embedded types are specified as fields with no Sym.
func NewInterface(methods []*Field) *Type {
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index 49cc0e54ecf750..8752eff99212e6 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -187,10 +187,6 @@ type Config struct {
EnableAlias bool
}
-func srcimporter_setUsesCgo(conf *Config) {
- conf.go115UsesCgo = true
-}
-
// Info holds result type information for a type-checked package.
// Only the information for which a map is provided is collected.
// If the package has type errors, the collected information may
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
index 31a1aa2abe9290..411a1719ce4bb5 100644
--- a/src/cmd/compile/internal/types2/check.go
+++ b/src/cmd/compile/internal/types2/check.go
@@ -22,7 +22,7 @@ var nopos syntax.Pos
const debug = false // leave on during development
// position tracing for panics during type checking
-const tracePos = false // TODO(markfreeman): check performance implications
+const tracePos = true
// _aliasAny changes the behavior of [Scope.Lookup] for "any" in the
// [Universe] scope.
diff --git a/src/cmd/compile/internal/types2/compilersupport.go b/src/cmd/compile/internal/types2/compilersupport.go
index 20a13642887a7f..d29241a2ed70b9 100644
--- a/src/cmd/compile/internal/types2/compilersupport.go
+++ b/src/cmd/compile/internal/types2/compilersupport.go
@@ -13,12 +13,6 @@ func AsPointer(t Type) *Pointer {
return u
}
-// If t is a signature, AsSignature returns that type, otherwise it returns nil.
-func AsSignature(t Type) *Signature {
- u, _ := t.Underlying().(*Signature)
- return u
-}
-
// If typ is a type parameter, CoreType returns the single underlying
// type of all types in the corresponding type constraint if it exists, or
// nil otherwise. If the type set contains only unrestricted and restricted
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
index fbfc56a39c3071..b9e226b20741fc 100644
--- a/src/cmd/compile/internal/walk/expr.go
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -131,6 +131,14 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
n := n.(*ir.BinaryExpr)
n.X = walkExpr(n.X, init)
n.Y = walkExpr(n.Y, init)
+ if n.Op() == ir.OUNSAFEADD && ir.ShouldCheckPtr(ir.CurFunc, 1) {
+ // For unsafe.Add(p, n), just walk "unsafe.Pointer(uintptr(p)+uintptr(n))"
+ // for the side effects of validating unsafe.Pointer rules.
+ x := typecheck.ConvNop(n.X, types.Types[types.TUINTPTR])
+ y := typecheck.Conv(n.Y, types.Types[types.TUINTPTR])
+ conv := typecheck.ConvNop(ir.NewBinaryExpr(n.Pos(), ir.OADD, x, y), types.Types[types.TUNSAFEPTR])
+ walkExpr(conv, init)
+ }
return n
case ir.OUNSAFESLICE:
diff --git a/src/cmd/dist/README b/src/cmd/dist/README
index 78a6d42d0a80be..d6005200407dfd 100644
--- a/src/cmd/dist/README
+++ b/src/cmd/dist/README
@@ -4,18 +4,17 @@ As of Go 1.5, dist and other parts of the compiler toolchain are written
in Go, making bootstrapping a little more involved than in the past.
The approach is to build the current release of Go with an earlier one.
-The process to install Go 1.x, for x ≥ 24, is:
+The process to install Go 1.x, for x ≥ 26, is:
-1. Build cmd/dist with Go 1.22.6.
-2. Using dist, build Go 1.x compiler toolchain with Go 1.22.6.
+1. Build cmd/dist with Go 1.24.6.
+2. Using dist, build Go 1.x compiler toolchain with Go 1.24.6.
3. Using dist, rebuild Go 1.x compiler toolchain with itself.
4. Using dist, build Go 1.x cmd/go (as go_bootstrap) with Go 1.x compiler toolchain.
5. Using go_bootstrap, build the remaining Go 1.x standard library and commands.
-Because of backward compatibility, although the steps above say Go 1.22.6,
-in practice any release ≥ Go 1.22.6 but < Go 1.x will work as the bootstrap base.
+Because of backward compatibility, although the steps above say Go 1.24.6,
+in practice any release ≥ Go 1.24.6 but < Go 1.x will work as the bootstrap base.
Releases ≥ Go 1.x are very likely to work as well.
-See https://go.dev/s/go15bootstrap for more details about the original bootstrap
-and https://go.dev/issue/54265 for details about later bootstrap version bumps.
-
+See go.dev/s/go15bootstrap for more details about the original bootstrap
+and go.dev/issue/54265 for details about later bootstrap version bumps.
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index 024050c2dd70c4..fb70047dd0e81c 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -1819,7 +1819,6 @@ var cgoEnabled = map[string]bool{
"solaris/amd64": true,
"windows/386": true,
"windows/amd64": true,
- "windows/arm": false,
"windows/arm64": true,
}
@@ -1828,9 +1827,9 @@ var cgoEnabled = map[string]bool{
// get filtered out of cgoEnabled for 'dist list'.
// See go.dev/issue/56679.
var broken = map[string]bool{
- "linux/sparc64": true, // An incomplete port. See CL 132155.
- "openbsd/mips64": true, // Broken: go.dev/issue/58110.
- "windows/arm": true, // Broken: go.dev/issue/68552.
+ "freebsd/riscv64": true, // Broken: go.dev/issue/73568.
+ "linux/sparc64": true, // An incomplete port. See CL 132155.
+ "openbsd/mips64": true, // Broken: go.dev/issue/58110.
}
// List of platforms which are first class ports. See go.dev/issue/38874.
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index 013b769b90f0e2..b7e58919815c8a 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -121,7 +121,7 @@ var ignoreSuffixes = []string{
"~",
}
-const minBootstrap = "go1.22.6"
+const minBootstrap = "go1.24.6"
var tryDirs = []string{
"sdk/" + minBootstrap,
diff --git a/src/cmd/dist/imports.go b/src/cmd/dist/imports.go
index 05dd84d0f12a0a..0ec2b06c4f6e7a 100644
--- a/src/cmd/dist/imports.go
+++ b/src/cmd/dist/imports.go
@@ -205,18 +205,6 @@ func (r *importReader) readImport(imports *[]string) {
r.readString(imports)
}
-// readComments is like ioutil.ReadAll, except that it only reads the leading
-// block of comments in the file.
-func readComments(f io.Reader) ([]byte, error) {
- r := &importReader{b: bufio.NewReader(f)}
- r.peekByte(true)
- if r.err == nil && !r.eof {
- // Didn't reach EOF, so must have found a non-space byte. Remove it.
- r.buf = r.buf[:len(r.buf)-1]
- }
- return r.buf, r.err
-}
-
// readimports returns the imports found in the named file.
func readimports(file string) []string {
var imports []string
diff --git a/src/cmd/dist/notgo122.go b/src/cmd/dist/notgo124.go
similarity index 62%
rename from src/cmd/dist/notgo122.go
rename to src/cmd/dist/notgo124.go
index 229a26e757b20c..dc6ef4d8bd7fe1 100644
--- a/src/cmd/dist/notgo122.go
+++ b/src/cmd/dist/notgo124.go
@@ -2,20 +2,20 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Go 1.24 and later requires Go 1.22.6 as the bootstrap toolchain.
+// Go 1.26 and later requires Go 1.24.6 as the minimum bootstrap toolchain.
// If cmd/dist is built using an earlier Go version, this file will be
// included in the build and cause an error like:
//
// % GOROOT_BOOTSTRAP=$HOME/sdk/go1.16 ./make.bash
// Building Go cmd/dist using /Users/rsc/sdk/go1.16. (go1.16 darwin/amd64)
-// found packages main (build.go) and building_Go_requires_Go_1_22_6_or_later (notgo122.go) in /Users/rsc/go/src/cmd/dist
+// found packages main (build.go) and building_Go_requires_Go_1_24_6_or_later (notgo124.go) in /Users/rsc/go/src/cmd/dist
// %
//
// which is the best we can do under the circumstances.
//
-// See go.dev/issue/44505 for more background on
-// why Go moved on from Go 1.4 for bootstrap.
+// See go.dev/issue/44505 and go.dev/issue/54265 for more
+// background on why Go moved on from Go 1.4 for bootstrap.
-//go:build !go1.22
+//go:build !go1.24
-package building_Go_requires_Go_1_22_6_or_later
+package building_Go_requires_Go_1_24_6_or_later
diff --git a/src/cmd/dist/sys_windows.go b/src/cmd/dist/sys_windows.go
index 37dffb8541447e..fbcbf6fb887048 100644
--- a/src/cmd/dist/sys_windows.go
+++ b/src/cmd/dist/sys_windows.go
@@ -33,7 +33,6 @@ type systeminfo struct {
const (
PROCESSOR_ARCHITECTURE_AMD64 = 9
PROCESSOR_ARCHITECTURE_INTEL = 0
- PROCESSOR_ARCHITECTURE_ARM = 5
PROCESSOR_ARCHITECTURE_ARM64 = 12
PROCESSOR_ARCHITECTURE_IA64 = 6
)
@@ -47,8 +46,6 @@ func sysinit() {
gohostarch = "amd64"
case PROCESSOR_ARCHITECTURE_INTEL:
gohostarch = "386"
- case PROCESSOR_ARCHITECTURE_ARM:
- gohostarch = "arm"
case PROCESSOR_ARCHITECTURE_ARM64:
gohostarch = "arm64"
default:
diff --git a/src/cmd/dist/util.go b/src/cmd/dist/util.go
index 4d5e3589dc27d3..7db06f86ea7297 100644
--- a/src/cmd/dist/util.go
+++ b/src/cmd/dist/util.go
@@ -362,16 +362,6 @@ func errprintf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format, args...)
}
-// xsamefile reports whether f1 and f2 are the same file (or dir).
-func xsamefile(f1, f2 string) bool {
- fi1, err1 := os.Stat(f1)
- fi2, err2 := os.Stat(f2)
- if err1 != nil || err2 != nil {
- return f1 == f2
- }
- return os.SameFile(fi1, fi2)
-}
-
func xgetgoarm() string {
// If we're building on an actual arm system, and not building
// a cross-compiling toolchain, try to exec ourselves
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index 7403b92cd14a3c..e472f25eea8322 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -2527,7 +2527,7 @@
// The default is GOFIPS140=off, which makes no FIPS-140 changes at all.
// Other values enable FIPS-140 compliance measures and select alternate
// versions of the cryptography source code.
-// See https://go.dev/security/fips140 for details.
+// See https://go.dev/doc/security/fips140 for details.
// GO_EXTLINK_ENABLED
// Whether the linker should use external linking mode
// when using -linkmode=auto with code that uses cgo.
diff --git a/src/cmd/go/internal/gover/toolchain.go b/src/cmd/go/internal/gover/toolchain.go
index 43b117edcf0023..a24df98168056b 100644
--- a/src/cmd/go/internal/gover/toolchain.go
+++ b/src/cmd/go/internal/gover/toolchain.go
@@ -52,16 +52,6 @@ func maybeToolchainVersion(name string) string {
return FromToolchain(name)
}
-// ToolchainMax returns the maximum of x and y interpreted as toolchain names,
-// compared using Compare(FromToolchain(x), FromToolchain(y)).
-// If x and y compare equal, Max returns x.
-func ToolchainMax(x, y string) string {
- if Compare(FromToolchain(x), FromToolchain(y)) < 0 {
- return y
- }
- return x
-}
-
// Startup records the information that went into the startup-time version switch.
// It is initialized by switchGoToolchain.
var Startup struct {
diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go
index 7f8565a3cbab82..9d44b862eec3e5 100644
--- a/src/cmd/go/internal/help/helpdoc.go
+++ b/src/cmd/go/internal/help/helpdoc.go
@@ -695,7 +695,7 @@ Special-purpose environment variables:
The default is GOFIPS140=off, which makes no FIPS-140 changes at all.
Other values enable FIPS-140 compliance measures and select alternate
versions of the cryptography source code.
- See https://go.dev/security/fips140 for details.
+ See https://go.dev/doc/security/fips140 for details.
GO_EXTLINK_ENABLED
Whether the linker should use external linking mode
when using -linkmode=auto with code that uses cgo.
diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go
index d37331892d1efb..f0452f014777f9 100644
--- a/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go
+++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go
@@ -8,7 +8,6 @@
package filelock
import (
- "errors"
"io/fs"
)
@@ -74,10 +73,3 @@ func (lt lockType) String() string {
return "Unlock"
}
}
-
-// IsNotSupported returns a boolean indicating whether the error is known to
-// report that a function is not supported (possibly for a specific input).
-// It is satisfied by errors.ErrUnsupported as well as some syscall errors.
-func IsNotSupported(err error) bool {
- return errors.Is(err, errors.ErrUnsupported)
-}
diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go
index b445ac24862202..8a1c12b90a5c07 100644
--- a/src/cmd/go/internal/modfetch/codehost/git.go
+++ b/src/cmd/go/internal/modfetch/codehost/git.go
@@ -387,23 +387,6 @@ func (r *gitRepo) Latest(ctx context.Context) (*RevInfo, error) {
return info, nil
}
-// findRef finds some ref name for the given hash,
-// for use when the server requires giving a ref instead of a hash.
-// There may be multiple ref names for a given hash,
-// in which case this returns some name - it doesn't matter which.
-func (r *gitRepo) findRef(ctx context.Context, hash string) (ref string, ok bool) {
- refs, err := r.loadRefs(ctx)
- if err != nil {
- return "", false
- }
- for ref, h := range refs {
- if h == hash {
- return ref, true
- }
- }
- return "", false
-}
-
func (r *gitRepo) checkConfigSHA256(ctx context.Context) bool {
if hashType, sha256CfgErr := r.runGit(ctx, "git", "config", "extensions.objectformat"); sha256CfgErr == nil {
return "sha256" == strings.TrimSpace(string(hashType))
diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go
index afed35c970975e..3df469d28525fa 100644
--- a/src/cmd/go/internal/modfetch/coderepo.go
+++ b/src/cmd/go/internal/modfetch/coderepo.go
@@ -1009,10 +1009,6 @@ func LegacyGoMod(modPath string) []byte {
return fmt.Appendf(nil, "module %s\n", modfile.AutoQuote(modPath))
}
-func (r *codeRepo) modPrefix(rev string) string {
- return r.modPath + "@" + rev
-}
-
func (r *codeRepo) retractedVersions(ctx context.Context) (func(string) bool, error) {
vs, err := r.Versions(ctx, "")
if err != nil {
diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go
index 31e9244e2dce54..25dbf3972fd465 100644
--- a/src/cmd/go/internal/modget/get.go
+++ b/src/cmd/go/internal/modget/get.go
@@ -453,7 +453,7 @@ func updateTools(ctx context.Context, queries []*query, opts *modload.WriteOpts)
if queries[i].version == "none" {
opts.DropTools = append(opts.DropTools, m.Pkgs...)
} else {
- opts.AddTools = append(opts.DropTools, m.Pkgs...)
+ opts.AddTools = append(opts.AddTools, m.Pkgs...)
}
}
}
diff --git a/src/cmd/go/internal/modindex/build.go b/src/cmd/go/internal/modindex/build.go
index d7e09fed25f43a..761bda8d39b158 100644
--- a/src/cmd/go/internal/modindex/build.go
+++ b/src/cmd/go/internal/modindex/build.go
@@ -10,7 +10,6 @@ package modindex
import (
"bytes"
"cmd/go/internal/fsys"
- "cmd/go/internal/str"
"errors"
"fmt"
"go/ast"
@@ -118,96 +117,12 @@ func (ctxt *Context) joinPath(elem ...string) string {
return filepath.Join(elem...)
}
-// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
-func (ctxt *Context) splitPathList(s string) []string {
- if f := ctxt.SplitPathList; f != nil {
- return f(s)
- }
- return filepath.SplitList(s)
-}
-
-// isAbsPath calls ctxt.IsAbsPath (if not nil) or else filepath.IsAbs.
-func (ctxt *Context) isAbsPath(path string) bool {
- if f := ctxt.IsAbsPath; f != nil {
- return f(path)
- }
- return filepath.IsAbs(path)
-}
-
// isDir reports whether path is a directory.
func isDir(path string) bool {
fi, err := fsys.Stat(path)
return err == nil && fi.IsDir()
}
-// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
-// the local file system to answer the question.
-func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
- if f := ctxt.HasSubdir; f != nil {
- return f(root, dir)
- }
-
- // Try using paths we received.
- if rel, ok = hasSubdir(root, dir); ok {
- return
- }
-
- // Try expanding symlinks and comparing
- // expanded against unexpanded and
- // expanded against expanded.
- rootSym, _ := filepath.EvalSymlinks(root)
- dirSym, _ := filepath.EvalSymlinks(dir)
-
- if rel, ok = hasSubdir(rootSym, dir); ok {
- return
- }
- if rel, ok = hasSubdir(root, dirSym); ok {
- return
- }
- return hasSubdir(rootSym, dirSym)
-}
-
-// hasSubdir reports if dir is within root by performing lexical analysis only.
-func hasSubdir(root, dir string) (rel string, ok bool) {
- root = str.WithFilePathSeparator(filepath.Clean(root))
- dir = filepath.Clean(dir)
- if !strings.HasPrefix(dir, root) {
- return "", false
- }
- return filepath.ToSlash(dir[len(root):]), true
-}
-
-// gopath returns the list of Go path directories.
-func (ctxt *Context) gopath() []string {
- var all []string
- for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
- if p == "" || p == ctxt.GOROOT {
- // Empty paths are uninteresting.
- // If the path is the GOROOT, ignore it.
- // People sometimes set GOPATH=$GOROOT.
- // Do not get confused by this common mistake.
- continue
- }
- if strings.HasPrefix(p, "~") {
- // Path segments starting with ~ on Unix are almost always
- // users who have incorrectly quoted ~ while setting GOPATH,
- // preventing it from expanding to $HOME.
- // The situation is made more confusing by the fact that
- // bash allows quoted ~ in $PATH (most shells do not).
- // Do not get confused by this, and do not try to use the path.
- // It does not exist, and printing errors about it confuses
- // those users even more, because they think "sure ~ exists!".
- // The go command diagnoses this situation and prints a
- // useful error.
- // On Windows, ~ is used in short names, such as c:\progra~1
- // for c:\program files.
- continue
- }
- all = append(all, p)
- }
- return all
-}
-
var defaultToolTags, defaultReleaseTags []string
// NoGoError is the error used by Import to describe a directory
@@ -266,114 +181,12 @@ func fileListForExt(p *build.Package, ext string) *[]string {
return nil
}
-var errNoModules = errors.New("not using modules")
-
-func findImportComment(data []byte) (s string, line int) {
- // expect keyword package
- word, data := parseWord(data)
- if string(word) != "package" {
- return "", 0
- }
-
- // expect package name
- _, data = parseWord(data)
-
- // now ready for import comment, a // or /* */ comment
- // beginning and ending on the current line.
- for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') {
- data = data[1:]
- }
-
- var comment []byte
- switch {
- case bytes.HasPrefix(data, slashSlash):
- comment, _, _ = bytes.Cut(data[2:], newline)
- case bytes.HasPrefix(data, slashStar):
- var ok bool
- comment, _, ok = bytes.Cut(data[2:], starSlash)
- if !ok {
- // malformed comment
- return "", 0
- }
- if bytes.Contains(comment, newline) {
- return "", 0
- }
- }
- comment = bytes.TrimSpace(comment)
-
- // split comment into `import`, `"pkg"`
- word, arg := parseWord(comment)
- if string(word) != "import" {
- return "", 0
- }
-
- line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline)
- return strings.TrimSpace(string(arg)), line
-}
-
var (
slashSlash = []byte("//")
slashStar = []byte("/*")
starSlash = []byte("*/")
- newline = []byte("\n")
)
-// skipSpaceOrComment returns data with any leading spaces or comments removed.
-func skipSpaceOrComment(data []byte) []byte {
- for len(data) > 0 {
- switch data[0] {
- case ' ', '\t', '\r', '\n':
- data = data[1:]
- continue
- case '/':
- if bytes.HasPrefix(data, slashSlash) {
- i := bytes.Index(data, newline)
- if i < 0 {
- return nil
- }
- data = data[i+1:]
- continue
- }
- if bytes.HasPrefix(data, slashStar) {
- data = data[2:]
- i := bytes.Index(data, starSlash)
- if i < 0 {
- return nil
- }
- data = data[i+2:]
- continue
- }
- }
- break
- }
- return data
-}
-
-// parseWord skips any leading spaces or comments in data
-// and then parses the beginning of data as an identifier or keyword,
-// returning that word and what remains after the word.
-func parseWord(data []byte) (word, rest []byte) {
- data = skipSpaceOrComment(data)
-
- // Parse past leading word characters.
- rest = data
- for {
- r, size := utf8.DecodeRune(rest)
- if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' {
- rest = rest[size:]
- continue
- }
- break
- }
-
- word = data[:len(data)-len(rest)]
- if len(word) == 0 {
- return nil, nil
- }
-
- return word, rest
-}
-
var dummyPkg build.Package
// fileInfo records information learned about a file included in a build.
diff --git a/src/cmd/go/internal/modindex/read.go b/src/cmd/go/internal/modindex/read.go
index d87fb06b57e173..399e89eca3cf47 100644
--- a/src/cmd/go/internal/modindex/read.go
+++ b/src/cmd/go/internal/modindex/read.go
@@ -1039,11 +1039,6 @@ func (r *reader) string() string {
return r.d.stringTableAt(r.int())
}
-// bool reads the next bool.
-func (r *reader) bool() bool {
- return r.int() != 0
-}
-
// tokpos reads the next token.Position.
func (r *reader) tokpos() token.Position {
return token.Position{
diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go
index eefc0083c93ddb..cd3ec4f102473b 100644
--- a/src/cmd/go/internal/modload/buildlist.go
+++ b/src/cmd/go/internal/modload/buildlist.go
@@ -658,11 +658,6 @@ func EditBuildList(ctx context.Context, add, mustSelect []module.Version) (chang
return changed, nil
}
-// OverrideRoots edits the global requirement roots by replacing the specific module versions.
-func OverrideRoots(ctx context.Context, replace []module.Version) {
- requirements = overrideRoots(ctx, requirements, replace)
-}
-
func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Version) *Requirements {
drop := make(map[string]bool)
for _, m := range replace {
diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go
index cb9d74df68cb5b..25151103edb579 100644
--- a/src/cmd/go/internal/modload/init.go
+++ b/src/cmd/go/internal/modload/init.go
@@ -305,30 +305,6 @@ func (mms *MainModuleSet) Godebugs() []*modfile.Godebug {
return nil
}
-// Toolchain returns the toolchain set on the single module, in module mode,
-// or the go.work file in workspace mode.
-func (mms *MainModuleSet) Toolchain() string {
- if inWorkspaceMode() {
- if mms.workFile != nil && mms.workFile.Toolchain != nil {
- return mms.workFile.Toolchain.Name
- }
- return "go" + mms.GoVersion()
- }
- if mms != nil && len(mms.versions) == 1 {
- f := mms.ModFile(mms.mustGetSingleMainModule())
- if f == nil {
- // Special case: we are outside a module, like 'go run x.go'.
- // Assume the local Go version.
- // TODO(#49228): Clean this up; see loadModFile.
- return gover.LocalToolchain()
- }
- if f.Toolchain != nil {
- return f.Toolchain.Name
- }
- }
- return "go" + mms.GoVersion()
-}
-
func (mms *MainModuleSet) WorkFileReplaceMap() map[module.Version]module.Version {
return mms.workFileReplaceMap
}
diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go
index 39a1f5f74c228c..71c36e80cbaaab 100644
--- a/src/cmd/go/internal/work/gc.go
+++ b/src/cmd/go/internal/work/gc.go
@@ -6,7 +6,6 @@ package work
import (
"bufio"
- "bytes"
"fmt"
"internal/buildcfg"
"internal/platform"
@@ -438,32 +437,6 @@ func (gcToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, erro
return symabis, nil
}
-// toolVerify checks that the command line args writes the same output file
-// if run using newTool instead.
-// Unused now but kept around for future use.
-func toolVerify(a *Action, b *Builder, p *load.Package, newTool string, ofile string, args []any) error {
- newArgs := make([]any, len(args))
- copy(newArgs, args)
- newArgs[1] = base.Tool(newTool)
- newArgs[3] = ofile + ".new" // x.6 becomes x.6.new
- if err := b.Shell(a).run(p.Dir, p.ImportPath, nil, newArgs...); err != nil {
- return err
- }
- data1, err := os.ReadFile(ofile)
- if err != nil {
- return err
- }
- data2, err := os.ReadFile(ofile + ".new")
- if err != nil {
- return err
- }
- if !bytes.Equal(data1, data2) {
- return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(str.StringList(args...), " "), strings.Join(str.StringList(newArgs...), " "))
- }
- os.Remove(ofile + ".new")
- return nil
-}
-
func (gcToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error {
absOfiles := make([]string, 0, len(ofiles))
for _, f := range ofiles {
diff --git a/src/cmd/go/testdata/script/mod_get_tool_issue74035.txt b/src/cmd/go/testdata/script/mod_get_tool_issue74035.txt
new file mode 100644
index 00000000000000..d6fa592c7b2726
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_get_tool_issue74035.txt
@@ -0,0 +1,25 @@
+# Regression test for https://go.dev/issue/74035.
+go get -tool example.com/foo/cmd/a example.com/foo/cmd/b
+cmp go.mod go.mod.want
+
+-- go.mod --
+module example.com/foo
+go 1.24
+-- go.mod.want --
+module example.com/foo
+
+go 1.24
+
+tool (
+ example.com/foo/cmd/a
+ example.com/foo/cmd/b
+)
+-- cmd/a/a.go --
+package a
+
+func main() {}
+
+-- cmd/b/b.go --
+package b
+
+func main() {}
diff --git a/src/cmd/internal/archive/archive.go b/src/cmd/internal/archive/archive.go
index 393034d7769f2d..b8abc0d4f648fd 100644
--- a/src/cmd/internal/archive/archive.go
+++ b/src/cmd/internal/archive/archive.go
@@ -498,20 +498,6 @@ func exactly16Bytes(s string) string {
// architecture-independent object file output
const HeaderSize = 60
-func ReadHeader(b *bufio.Reader, name string) int {
- var buf [HeaderSize]byte
- if _, err := io.ReadFull(b, buf[:]); err != nil {
- return -1
- }
- aname := strings.Trim(string(buf[0:16]), " ")
- if !strings.HasPrefix(aname, name) {
- return -1
- }
- asize := strings.Trim(string(buf[48:58]), " ")
- i, _ := strconv.Atoi(asize)
- return i
-}
-
func FormatHeader(arhdr []byte, name string, size int64) {
copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
}
diff --git a/src/cmd/internal/gcprog/gcprog.go b/src/cmd/internal/gcprog/gcprog.go
index eeea53daf4a5e9..52505f3b20243d 100644
--- a/src/cmd/internal/gcprog/gcprog.go
+++ b/src/cmd/internal/gcprog/gcprog.go
@@ -56,11 +56,6 @@ func (w *Writer) Debug(out io.Writer) {
w.debug = out
}
-// BitIndex returns the number of bits written to the bit stream so far.
-func (w *Writer) BitIndex() int64 {
- return w.index
-}
-
// byte writes the byte x to the output.
func (w *Writer) byte(x byte) {
if w.debug != nil {
@@ -98,20 +93,6 @@ func (w *Writer) Ptr(index int64) {
w.lit(1)
}
-// ShouldRepeat reports whether it would be worthwhile to
-// use a Repeat to describe c elements of n bits each,
-// compared to just emitting c copies of the n-bit description.
-func (w *Writer) ShouldRepeat(n, c int64) bool {
- // Should we lay out the bits directly instead of
- // encoding them as a repetition? Certainly if count==1,
- // since there's nothing to repeat, but also if the total
- // size of the plain pointer bits for the type will fit in
- // 4 or fewer bytes, since using a repetition will require
- // flushing the current bits plus at least one byte for
- // the repeat size and one for the repeat count.
- return c > 1 && c*n > 4*8
-}
-
// Repeat emits an instruction to repeat the description
// of the last n words c times (including the initial description, c+1 times in total).
func (w *Writer) Repeat(n, c int64) {
@@ -163,36 +144,6 @@ func (w *Writer) ZeroUntil(index int64) {
w.Repeat(1, skip-1)
}
-// Append emits the given GC program into the current output.
-// The caller asserts that the program emits n bits (describes n words),
-// and Append panics if that is not true.
-func (w *Writer) Append(prog []byte, n int64) {
- w.flushlit()
- if w.debug != nil {
- fmt.Fprintf(w.debug, "gcprog: append prog for %d ptrs\n", n)
- fmt.Fprintf(w.debug, "\t")
- }
- n1 := progbits(prog)
- if n1 != n {
- panic("gcprog: wrong bit count in append")
- }
- // The last byte of the prog terminates the program.
- // Don't emit that, or else our own program will end.
- for i, x := range prog[:len(prog)-1] {
- if w.debug != nil {
- if i > 0 {
- fmt.Fprintf(w.debug, " ")
- }
- fmt.Fprintf(w.debug, "%02x", x)
- }
- w.byte(x)
- }
- if w.debug != nil {
- fmt.Fprintf(w.debug, "\n")
- }
- w.index += n
-}
-
// progbits returns the length of the bit stream encoded by the program p.
func progbits(p []byte) int64 {
var n int64
diff --git a/src/cmd/internal/goobj/objfile.go b/src/cmd/internal/goobj/objfile.go
index a9342427efbc09..38da67076d5bb2 100644
--- a/src/cmd/internal/goobj/objfile.go
+++ b/src/cmd/internal/goobj/objfile.go
@@ -635,29 +635,11 @@ func (r *Reader) uint64At(off uint32) uint64 {
return binary.LittleEndian.Uint64(b)
}
-func (r *Reader) int64At(off uint32) int64 {
- return int64(r.uint64At(off))
-}
-
func (r *Reader) uint32At(off uint32) uint32 {
b := r.BytesAt(off, 4)
return binary.LittleEndian.Uint32(b)
}
-func (r *Reader) int32At(off uint32) int32 {
- return int32(r.uint32At(off))
-}
-
-func (r *Reader) uint16At(off uint32) uint16 {
- b := r.BytesAt(off, 2)
- return binary.LittleEndian.Uint16(b)
-}
-
-func (r *Reader) uint8At(off uint32) uint8 {
- b := r.BytesAt(off, 1)
- return b[0]
-}
-
func (r *Reader) StringAt(off uint32, len uint32) string {
b := r.b[off : off+len]
if r.readonly {
diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go
index 344b73e658f11e..0c9c70aa89aee1 100644
--- a/src/cmd/internal/obj/arm64/asm7.go
+++ b/src/cmd/internal/obj/arm64/asm7.go
@@ -1054,15 +1054,6 @@ var sysInstFields = map[SpecialOperand]struct {
// Used for padding NOOP instruction
const OP_NOOP = 0xd503201f
-// pcAlignPadLength returns the number of bytes required to align pc to alignedValue,
-// reporting an error if alignedValue is not a power of two or is out of range.
-func pcAlignPadLength(ctxt *obj.Link, pc int64, alignedValue int64) int {
- if !((alignedValue&(alignedValue-1) == 0) && 8 <= alignedValue && alignedValue <= 2048) {
- ctxt.Diag("alignment value of an instruction must be a power of two and in the range [8, 2048], got %d\n", alignedValue)
- }
- return int(-pc & (alignedValue - 1))
-}
-
// size returns the size of the sequence of machine instructions when p is encoded with o.
// Usually it just returns o.size directly, in some cases it checks whether the optimization
// conditions are met, and if so returns the size of the optimized instruction sequence.
@@ -1209,10 +1200,6 @@ type codeBuffer struct {
data *[]byte
}
-func (cb *codeBuffer) pc() int64 {
- return int64(len(*cb.data))
-}
-
// Write a sequence of opcodes into the code buffer.
func (cb *codeBuffer) emit(op ...uint32) {
for _, o := range op {
diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go
index 193993ec4d9b4c..f5d20cfabe76d5 100644
--- a/src/cmd/internal/obj/loong64/a.out.go
+++ b/src/cmd/internal/obj/loong64/a.out.go
@@ -567,6 +567,11 @@ const (
AMOVVF
AMOVVD
+ // 2.2.1.3
+ AALSLW
+ AALSLWU
+ AALSLV
+
// 2.2.1.8
AORN
AANDN
@@ -743,6 +748,9 @@ const (
AFTINTRNEVF
AFTINTRNEVD
+ // 3.2.4.2
+ AFSEL
+
// LSX and LASX memory access instructions
AVMOVQ
AXVMOVQ
@@ -816,6 +824,31 @@ const (
AXVPCNTW
AXVPCNTV
+ AVBITCLRB
+ AVBITCLRH
+ AVBITCLRW
+ AVBITCLRV
+ AVBITSETB
+ AVBITSETH
+ AVBITSETW
+ AVBITSETV
+ AVBITREVB
+ AVBITREVH
+ AVBITREVW
+ AVBITREVV
+ AXVBITCLRB
+ AXVBITCLRH
+ AXVBITCLRW
+ AXVBITCLRV
+ AXVBITSETB
+ AXVBITSETH
+ AXVBITSETW
+ AXVBITSETV
+ AXVBITREVB
+ AXVBITREVH
+ AXVBITREVW
+ AXVBITREVV
+
// LSX and LASX integer comparison instruction
AVSEQB
AXVSEQB
diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go
index bf9b0722cc39d7..67b5f2fc809927 100644
--- a/src/cmd/internal/obj/loong64/anames.go
+++ b/src/cmd/internal/obj/loong64/anames.go
@@ -125,6 +125,9 @@ var Anames = []string{
"MOVDV",
"MOVVF",
"MOVVD",
+ "ALSLW",
+ "ALSLWU",
+ "ALSLV",
"ORN",
"ANDN",
"AMSWAPB",
@@ -261,6 +264,7 @@ var Anames = []string{
"FTINTRNEWD",
"FTINTRNEVF",
"FTINTRNEVD",
+ "FSEL",
"VMOVQ",
"XVMOVQ",
"VADDB",
@@ -327,6 +331,30 @@ var Anames = []string{
"XVPCNTH",
"XVPCNTW",
"XVPCNTV",
+ "VBITCLRB",
+ "VBITCLRH",
+ "VBITCLRW",
+ "VBITCLRV",
+ "VBITSETB",
+ "VBITSETH",
+ "VBITSETW",
+ "VBITSETV",
+ "VBITREVB",
+ "VBITREVH",
+ "VBITREVW",
+ "VBITREVV",
+ "XVBITCLRB",
+ "XVBITCLRH",
+ "XVBITCLRW",
+ "XVBITCLRV",
+ "XVBITSETB",
+ "XVBITSETH",
+ "XVBITSETW",
+ "XVBITSETV",
+ "XVBITREVB",
+ "XVBITREVH",
+ "XVBITREVW",
+ "XVBITREVV",
"VSEQB",
"XVSEQB",
"VSEQH",
diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go
index 6e09930183383c..76ad8e877935e1 100644
--- a/src/cmd/internal/obj/loong64/asm.go
+++ b/src/cmd/internal/obj/loong64/asm.go
@@ -154,6 +154,9 @@ var optab = []Optab{
{AFMADDF, C_FREG, C_FREG, C_NONE, C_FREG, C_NONE, 37, 4, 0, 0},
{AFMADDF, C_FREG, C_FREG, C_FREG, C_FREG, C_NONE, 37, 4, 0, 0},
+ {AFSEL, C_FCCREG, C_FREG, C_FREG, C_FREG, C_NONE, 33, 4, 0, 0},
+ {AFSEL, C_FCCREG, C_FREG, C_NONE, C_FREG, C_NONE, 33, 4, 0, 0},
+
{AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGSP, 0},
{AMOVWU, C_REG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGSP, 0},
{AMOVV, C_REG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGSP, 0},
@@ -416,8 +419,13 @@ var optab = []Optab{
{AVMOVQ, C_ELEM, C_NONE, C_NONE, C_ARNG, C_NONE, 45, 4, 0, 0},
- {APRELD, C_SOREG, C_U5CON, C_NONE, C_NONE, C_NONE, 46, 4, 0, 0},
- {APRELDX, C_SOREG, C_DCON, C_U5CON, C_NONE, C_NONE, 47, 20, 0, 0},
+ {AVMOVQ, C_SOREG, C_NONE, C_NONE, C_ARNG, C_NONE, 46, 4, 0, 0},
+ {AXVMOVQ, C_SOREG, C_NONE, C_NONE, C_ARNG, C_NONE, 46, 4, 0, 0},
+
+ {APRELD, C_SOREG, C_U5CON, C_NONE, C_NONE, C_NONE, 47, 4, 0, 0},
+ {APRELDX, C_SOREG, C_DCON, C_U5CON, C_NONE, C_NONE, 48, 20, 0, 0},
+
+ {AALSLV, C_U3CON, C_REG, C_REG, C_REG, C_NONE, 64, 4, 0, 0},
{obj.APCALIGN, C_U12CON, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0},
{obj.APCDATA, C_32CON, C_NONE, C_NONE, C_32CON, C_NONE, 0, 0, 0, 0},
@@ -726,10 +734,6 @@ func isint32(v int64) bool {
return int64(int32(v)) == v
}
-func isuint32(v uint64) bool {
- return uint64(uint32(v)) == v
-}
-
func (c *ctxt0) aclass(a *obj.Addr) int {
switch a.Type {
case obj.TYPE_NONE:
@@ -1493,6 +1497,10 @@ func buildop(ctxt *obj.Link) {
case ABFPT:
opset(ABFPF, r0)
+ case AALSLV:
+ opset(AALSLW, r0)
+ opset(AALSLWU, r0)
+
case AMOVW,
AMOVD,
AMOVF,
@@ -1512,6 +1520,7 @@ func buildop(ctxt *obj.Link) {
AWORD,
APRELD,
APRELDX,
+ AFSEL,
obj.ANOP,
obj.ATEXT,
obj.AFUNCDATA,
@@ -1830,21 +1839,33 @@ func buildop(ctxt *obj.Link) {
opset(AVSRLB, r0)
opset(AVSRAB, r0)
opset(AVROTRB, r0)
+ opset(AVBITCLRB, r0)
+ opset(AVBITSETB, r0)
+ opset(AVBITREVB, r0)
case AXVSLLB:
opset(AXVSRLB, r0)
opset(AXVSRAB, r0)
opset(AXVROTRB, r0)
+ opset(AXVBITCLRB, r0)
+ opset(AXVBITSETB, r0)
+ opset(AXVBITREVB, r0)
case AVSLLH:
opset(AVSRLH, r0)
opset(AVSRAH, r0)
opset(AVROTRH, r0)
+ opset(AVBITCLRH, r0)
+ opset(AVBITSETH, r0)
+ opset(AVBITREVH, r0)
case AXVSLLH:
opset(AXVSRLH, r0)
opset(AXVSRAH, r0)
opset(AXVROTRH, r0)
+ opset(AXVBITCLRH, r0)
+ opset(AXVBITSETH, r0)
+ opset(AXVBITREVH, r0)
case AVSLLW:
opset(AVSRLW, r0)
@@ -1858,6 +1879,9 @@ func buildop(ctxt *obj.Link) {
opset(AVSUBHU, r0)
opset(AVSUBWU, r0)
opset(AVSUBVU, r0)
+ opset(AVBITCLRW, r0)
+ opset(AVBITSETW, r0)
+ opset(AVBITREVW, r0)
case AXVSLLW:
opset(AXVSRLW, r0)
@@ -1871,16 +1895,25 @@ func buildop(ctxt *obj.Link) {
opset(AXVSUBHU, r0)
opset(AXVSUBWU, r0)
opset(AXVSUBVU, r0)
+ opset(AXVBITCLRW, r0)
+ opset(AXVBITSETW, r0)
+ opset(AXVBITREVW, r0)
case AVSLLV:
opset(AVSRLV, r0)
opset(AVSRAV, r0)
opset(AVROTRV, r0)
+ opset(AVBITCLRV, r0)
+ opset(AVBITSETV, r0)
+ opset(AVBITREVV, r0)
case AXVSLLV:
opset(AXVSRLV, r0)
opset(AXVSRAV, r0)
opset(AXVROTRV, r0)
+ opset(AXVBITCLRV, r0)
+ opset(AXVBITSETV, r0)
+ opset(AXVBITREVV, r0)
case AVSETEQV:
opset(AVSETNEV, r0)
@@ -1925,6 +1958,10 @@ func OP_RR(op uint32, r2 uint32, r3 uint32) uint32 {
return op | (r2&0x1F)<<5 | (r3&0x1F)<<0
}
+func OP_2IRRR(op uint32, i uint32, r2 uint32, r3 uint32, r4 uint32) uint32 {
+ return op | (i&0x3)<<15 | (r2&0x1F)<<10 | (r3&0x1F)<<5 | (r4&0x1F)<<0
+}
+
func OP_16IR_5I(op uint32, i uint32, r2 uint32) uint32 {
return op | (i&0xFFFF)<<10 | (r2&0x1F)<<5 | ((i >> 16) & 0x1F)
}
@@ -2354,6 +2391,16 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
}
o1 = OP_6IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+ case 33: // fsel ca, fk, [fj], fd
+ ca := uint32(p.From.Reg)
+ fk := uint32(p.Reg)
+ fd := uint32(p.To.Reg)
+ fj := fd
+ if len(p.RestArgs) > 0 {
+ fj = uint32(p.GetFrom3().Reg)
+ }
+ o1 = 0x340<<18 | (ca&0x7)<<15 | (fk&0x1F)<<10 | (fj&0x1F)<<5 | (fd & 0x1F)
+
case 34: // mov $con,fr
v := c.regoff(&p.From)
a := AADDU
@@ -2395,7 +2442,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = uint32(c.regoff(&p.From))
case 39: // vmov Rn, Vd.[index]
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2407,7 +2454,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Rj << 5) | Vd
case 40: // vmov Vd.[index], Rn
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2419,7 +2466,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Vj << 5) | Rd
case 41: // vmov Rn, Vd.
- v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2429,7 +2476,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (Rj << 5) | Vd
case 42: // vmov xj, xd.
- v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2439,7 +2486,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (Xj << 5) | Xd
case 43: // vmov xj, xd.[index]
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2451,7 +2498,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Xj << 5) | Xd
case 44: // vmov xj.[index], xd
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2463,7 +2510,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Xj << 5) | Xd
case 45: // vmov vj.[index], vd.
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2474,12 +2521,23 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
c.checkindex(p, index, m)
o1 = v | (index << 10) | (vj << 5) | vd
- case 46: // preld offset(Rbase), $hint
+ case 46: // vmov offset(vj), vd.
+ v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, true)
+ if v == 0 {
+ c.ctxt.Diag("illegal arng type combination: %v\n", p)
+ }
+
+ si := c.regoff(&p.From)
+ Rj := uint32(p.From.Reg & EXT_REG_MASK)
+ Vd := uint32(p.To.Reg & EXT_REG_MASK)
+ o1 = v | uint32(si<<10) | (Rj << 5) | Vd
+
+ case 47: // preld offset(Rbase), $hint
offs := c.regoff(&p.From)
hint := p.GetFrom3().Offset
o1 = OP_12IR_5I(c.opiir(p.As), uint32(offs), uint32(p.From.Reg), uint32(hint))
- case 47: // preldx offset(Rbase), $n, $hint
+ case 48: // preldx offset(Rbase), $n, $hint
offs := c.regoff(&p.From)
hint := p.RestArgs[1].Offset
n := uint64(p.GetFrom3().Offset)
@@ -2683,6 +2741,14 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
case 62: // rdtimex rd, rj
o1 = OP_RR(c.oprr(p.As), uint32(p.To.Reg), uint32(p.RegTo2))
+ case 64: // alsl rd, rj, rk, sa2
+ sa := p.From.Offset - 1
+ if sa > 3 {
+ c.ctxt.Diag("The shift amount is too large.")
+ }
+ r := p.GetFrom3().Reg
+ o1 = OP_2IRRR(c.opirrr(p.As), uint32(sa), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
+
case 65: // mov sym@GOT, r ==> pcalau12i + ld.d
o1 = OP_IR(c.opir(APCALAU12I), uint32(0), uint32(p.To.Reg))
c.cursym.AddRel(c.ctxt, obj.Reloc{
@@ -3504,6 +3570,54 @@ func (c *ctxt0) oprrr(a obj.As) uint32 {
return 0xea75 << 15 // xvfdiv.s
case AXVDIVD:
return 0xea76 << 15 // xvfdiv.d
+ case AVBITCLRB:
+ return 0xe218 << 15 // vbitclr.b
+ case AVBITCLRH:
+ return 0xe219 << 15 // vbitclr.h
+ case AVBITCLRW:
+ return 0xe21a << 15 // vbitclr.w
+ case AVBITCLRV:
+ return 0xe21b << 15 // vbitclr.d
+ case AVBITSETB:
+ return 0xe21c << 15 // vbitset.b
+ case AVBITSETH:
+ return 0xe21d << 15 // vbitset.h
+ case AVBITSETW:
+ return 0xe21e << 15 // vbitset.w
+ case AVBITSETV:
+ return 0xe21f << 15 // vbitset.d
+ case AVBITREVB:
+ return 0xe220 << 15 // vbitrev.b
+ case AVBITREVH:
+ return 0xe221 << 15 // vbitrev.h
+ case AVBITREVW:
+ return 0xe222 << 15 // vbitrev.w
+ case AVBITREVV:
+ return 0xe223 << 15 // vbitrev.d
+ case AXVBITCLRB:
+ return 0xea18 << 15 // xvbitclr.b
+ case AXVBITCLRH:
+ return 0xea19 << 15 // xvbitclr.h
+ case AXVBITCLRW:
+ return 0xea1a << 15 // xvbitclr.w
+ case AXVBITCLRV:
+ return 0xea1b << 15 // xvbitclr.d
+ case AXVBITSETB:
+ return 0xea1c << 15 // xvbitset.b
+ case AXVBITSETH:
+ return 0xea1d << 15 // xvbitset.h
+ case AXVBITSETW:
+ return 0xea1e << 15 // xvbitset.w
+ case AXVBITSETV:
+ return 0xea1f << 15 // xvbitset.d
+ case AXVBITREVB:
+ return 0xea20 << 15 // xvbitrev.b
+ case AXVBITREVH:
+ return 0xea21 << 15 // xvbitrev.h
+ case AXVBITREVW:
+ return 0xea22 << 15 // xvbitrev.w
+ case AXVBITREVV:
+ return 0xea23 << 15 // xvbitrev.d
}
if a < 0 {
@@ -4104,6 +4218,54 @@ func (c *ctxt0) opirr(a obj.As) uint32 {
return 0x1de6 << 18 // xvshuf4i.w
case AXVSHUF4IV:
return 0x1de7 << 18 // xvshuf4i.d
+ case AVBITCLRB:
+ return 0x1CC4<<18 | 0x1<<13 // vbitclri.b
+ case AVBITCLRH:
+ return 0x1CC4<<18 | 0x1<<14 // vbitclri.h
+ case AVBITCLRW:
+ return 0x1CC4<<18 | 0x1<<15 // vbitclri.w
+ case AVBITCLRV:
+ return 0x1CC4<<18 | 0x1<<16 // vbitclri.d
+ case AVBITSETB:
+ return 0x1CC5<<18 | 0x1<<13 // vbitseti.b
+ case AVBITSETH:
+ return 0x1CC5<<18 | 0x1<<14 // vbitseti.h
+ case AVBITSETW:
+ return 0x1CC5<<18 | 0x1<<15 // vbitseti.w
+ case AVBITSETV:
+ return 0x1CC5<<18 | 0x1<<16 // vbitseti.d
+ case AVBITREVB:
+ return 0x1CC6<<18 | 0x1<<13 // vbitrevi.b
+ case AVBITREVH:
+ return 0x1CC6<<18 | 0x1<<14 // vbitrevi.h
+ case AVBITREVW:
+ return 0x1CC6<<18 | 0x1<<15 // vbitrevi.w
+ case AVBITREVV:
+ return 0x1CC6<<18 | 0x1<<16 // vbitrevi.d
+ case AXVBITCLRB:
+ return 0x1DC4<<18 | 0x1<<13 // xvbitclri.b
+ case AXVBITCLRH:
+ return 0x1DC4<<18 | 0x1<<14 // xvbitclri.h
+ case AXVBITCLRW:
+ return 0x1DC4<<18 | 0x1<<15 // xvbitclri.w
+ case AXVBITCLRV:
+ return 0x1DC4<<18 | 0x1<<16 // xvbitclri.d
+ case AXVBITSETB:
+ return 0x1DC5<<18 | 0x1<<13 // xvbitseti.b
+ case AXVBITSETH:
+ return 0x1DC5<<18 | 0x1<<14 // xvbitseti.h
+ case AXVBITSETW:
+ return 0x1DC5<<18 | 0x1<<15 // xvbitseti.w
+ case AXVBITSETV:
+ return 0x1DC5<<18 | 0x1<<16 // xvbitseti.d
+ case AXVBITREVB:
+ return 0x1DC6<<18 | 0x1<<13 // xvbitrevi.b
+ case AXVBITREVH:
+ return 0x1DC6<<18 | 0x1<<14 // xvbitrevi.h
+ case AXVBITREVW:
+ return 0x1DC6<<18 | 0x1<<15 // xvbitrevi.w
+ case AXVBITREVV:
+ return 0x1DC6<<18 | 0x1<<16 // xvbitrevi.d
}
if a < 0 {
@@ -4114,6 +4276,19 @@ func (c *ctxt0) opirr(a obj.As) uint32 {
return 0
}
+func (c *ctxt0) opirrr(a obj.As) uint32 {
+ switch a {
+ case AALSLW:
+ return 0x2 << 17 // alsl.w
+ case AALSLWU:
+ return 0x3 << 17 // alsl.wu
+ case AALSLV:
+ return 0x16 << 17 // alsl.d
+ }
+
+ return 0
+}
+
func (c *ctxt0) opirir(a obj.As) uint32 {
switch a {
case ABSTRINSW:
@@ -4192,7 +4367,7 @@ func (c *ctxt0) specialFpMovInst(a obj.As, fclass int, tclass int) uint32 {
return 0
}
-func (c *ctxt0) specialLsxMovInst(a obj.As, fReg, tReg int16) (op_code, index_mask uint32) {
+func (c *ctxt0) specialLsxMovInst(a obj.As, fReg, tReg int16, offset_flag bool) (op_code, index_mask uint32) {
farng := (fReg >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK
tarng := (tReg >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK
fclass := c.rclass(fReg)
@@ -4258,29 +4433,58 @@ func (c *ctxt0) specialLsxMovInst(a obj.As, fReg, tReg int16) (op_code, index_ma
}
case C_REG | (C_ARNG << 16):
- // vmov Rn, Vd.
- switch a {
- case AVMOVQ:
- switch tarng {
- case ARNG_16B:
- return (0x1CA7C0 << 10), 0x0 // vreplgr2vr.b
- case ARNG_8H:
- return (0x1CA7C1 << 10), 0x0 // vreplgr2vr.h
- case ARNG_4W:
- return (0x1CA7C2 << 10), 0x0 // vreplgr2vr.w
- case ARNG_2V:
- return (0x1CA7C3 << 10), 0x0 // vreplgr2vr.d
+ switch {
+ case offset_flag:
+ // vmov offset(vj), vd.
+ switch a {
+ case AVMOVQ:
+ switch tarng {
+ case ARNG_16B:
+ return (0xC2 << 22), 0x0 // vldrepl.b
+ case ARNG_8H:
+ return (0x182 << 21), 0x0 // vldrepl.h
+ case ARNG_4W:
+ return (0x302 << 20), 0x0 // vldrepl.w
+ case ARNG_2V:
+ return (0x602 << 19), 0x0 // vldrepl.d
+ }
+ case AXVMOVQ:
+ switch tarng {
+ case ARNG_32B:
+ return (0xCA << 22), 0x0 // xvldrepl.b
+ case ARNG_16H:
+ return (0x192 << 21), 0x0 // xvldrepl.h
+ case ARNG_8W:
+ return (0x322 << 20), 0x0 // xvldrepl.w
+ case ARNG_4V:
+ return (0x642 << 19), 0x0 // xvldrepl.d
+ }
}
- case AXVMOVQ:
- switch tarng {
- case ARNG_32B:
- return (0x1DA7C0 << 10), 0x0 // xvreplgr2vr.b
- case ARNG_16H:
- return (0x1DA7C1 << 10), 0x0 // xvreplgr2vr.h
- case ARNG_8W:
- return (0x1DA7C2 << 10), 0x0 // xvreplgr2vr.w
- case ARNG_4V:
- return (0x1DA7C3 << 10), 0x0 // xvreplgr2vr.d
+ default:
+ // vmov Rn, Vd.
+ switch a {
+ case AVMOVQ:
+ switch tarng {
+ case ARNG_16B:
+ return (0x1CA7C0 << 10), 0x0 // vreplgr2vr.b
+ case ARNG_8H:
+ return (0x1CA7C1 << 10), 0x0 // vreplgr2vr.h
+ case ARNG_4W:
+ return (0x1CA7C2 << 10), 0x0 // vreplgr2vr.w
+ case ARNG_2V:
+ return (0x1CA7C3 << 10), 0x0 // vreplgr2vr.d
+ }
+ case AXVMOVQ:
+ switch tarng {
+ case ARNG_32B:
+ return (0x1DA7C0 << 10), 0x0 // xvreplgr2vr.b
+ case ARNG_16H:
+ return (0x1DA7C1 << 10), 0x0 // xvreplgr2vr.h
+ case ARNG_8W:
+ return (0x1DA7C2 << 10), 0x0 // xvreplgr2vr.w
+ case ARNG_4V:
+ return (0x1DA7C3 << 10), 0x0 // xvreplgr2vr.d
+ }
}
}
diff --git a/src/cmd/internal/obj/loong64/doc.go b/src/cmd/internal/obj/loong64/doc.go
index 0818389c8d9366..64bb41ae5a2219 100644
--- a/src/cmd/internal/obj/loong64/doc.go
+++ b/src/cmd/internal/obj/loong64/doc.go
@@ -203,6 +203,23 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate)
VMOVQ Vj.W[index], Vd.W4 | vreplvei.w vd, vj, ui2 | for i in range(4) : VR[vd].w[i] = VR[vj].w[ui2]
VMOVQ Vj.V[index], Vd.V2 | vreplvei.d vd, vj, ui1 | for i in range(2) : VR[vd].d[i] = VR[vj].d[ui1]
+3.7 Load data from memory and broadcast to each element of a vector register.
+
+ Instruction format:
+ VMOVQ offset(Rj), .
+
+ Mapping between Go and platform assembly:
+ Go assembly | platform assembly | semantics
+ -------------------------------------------------------------------------------------------------------------------------------------------------------
+ VMOVQ offset(Rj), Vd.B16 | vldrepl.b Vd, Rj, si12 | for i in range(16): VR[vd].b[i] = load 8 bit memory data from (GR[rj]+SignExtend(si12))
+ VMOVQ offset(Rj), Vd.H8 | vldrepl.h Vd, Rj, si11 | for i in range(8) : VR[vd].h[i] = load 16 bit memory data from (GR[rj]+SignExtend(si11<<1))
+ VMOVQ offset(Rj), Vd.W4 | vldrepl.w Vd, Rj, si10 | for i in range(4) : VR[vd].w[i] = load 32 bit memory data from (GR[rj]+SignExtend(si10<<2))
+ VMOVQ offset(Rj), Vd.V2 | vldrepl.d Vd, Rj, si9 | for i in range(2) : VR[vd].d[i] = load 64 bit memory data from (GR[rj]+SignExtend(si9<<3))
+ XVMOVQ offset(Rj), Xd.B32 | xvldrepl.b Xd, Rj, si12 | for i in range(32): XR[xd].b[i] = load 8 bit memory data from (GR[rj]+SignExtend(si12))
+ XVMOVQ offset(Rj), Xd.H16 | xvldrepl.h Xd, Rj, si11 | for i in range(16): XR[xd].h[i] = load 16 bit memory data from (GR[rj]+SignExtend(si11<<1))
+ XVMOVQ offset(Rj), Xd.W8 | xvldrepl.w Xd, Rj, si10 | for i in range(8) : XR[xd].w[i] = load 32 bit memory data from (GR[rj]+SignExtend(si10<<2))
+ XVMOVQ offset(Rj), Xd.V4 | xvldrepl.d Xd, Rj, si9 | for i in range(4) : XR[xd].d[i] = load 64 bit memory data from (GR[rj]+SignExtend(si9<<3))
+
# Special instruction encoding definition and description on LoongArch
1. DBAR hint encoding for LA664(Loongson 3A6000) and later micro-architectures, paraphrased
@@ -251,6 +268,27 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate)
bits[11:1]: block size, the value range is [16, 1024], and it must be an integer multiple of 16
bits[20:12]: block num, the value range is [1, 256]
bits[36:21]: stride, the value range is [0, 0xffff]
+
+4. ShiftAdd instructions
+ Mapping between Go and platform assembly:
+ Go assembly | platform assembly
+ ALSL.W/WU/V $Imm, Rj, Rk, Rd | alsl.w/wu/d rd, rj, rk, $imm
+
+ Instruction encoding format is as follows:
+
+ | 31 ~ 17 | 16 ~ 15 | 14 ~ 10 | 9 ~ 5 | 4 ~ 0 |
+ | opcode | sa2 | rk | rj | rd |
+
+ The alsl.w/wu/v series of instructions shift the data in rj left by sa+1, add the value
+ in rk, and write the result to rd.
+
+ To allow programmers to directly write the desired shift amount in assembly code, we actually write
+ the value of sa2+1 in the assembly code and then include the value of sa2 in the instruction encoding.
+
+ For example:
+
+ Go assembly | instruction Encoding
+ ALSLV $4, r4, r5, R6 | 002d9486
*/
package loong64
diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go
index 79fbb23fef92e1..a1eb786da31067 100644
--- a/src/cmd/internal/obj/loong64/obj.go
+++ b/src/cmd/internal/obj/loong64/obj.go
@@ -771,14 +771,6 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
return end
}
-func (c *ctxt0) addnop(p *obj.Prog) {
- q := c.newprog()
- q.As = ANOOP
- q.Pos = p.Pos
- q.Link = p.Link
- p.Link = q
-}
-
var Linkloong64 = obj.LinkArch{
Arch: sys.ArchLoong64,
Init: buildop,
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index 9cba8c33ced087..dcd3aa59a4690a 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -2137,10 +2137,6 @@ func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
return o<<26 | xo<<1 | oe<<10 | rc&1
}
-func OPCC(o uint32, xo uint32, rc uint32) uint32 {
- return OPVCC(o, xo, 0, rc)
-}
-
/* Generate MD-form opcode */
func OPMD(o, xo, rc uint32) uint32 {
return o<<26 | xo<<2 | rc&1
diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go
index 078e81a2f7a317..44edb8d841837a 100644
--- a/src/cmd/internal/obj/riscv/obj.go
+++ b/src/cmd/internal/obj/riscv/obj.go
@@ -1072,24 +1072,6 @@ func regV(r uint32) uint32 {
return regVal(r, REG_V0, REG_V31)
}
-// regAddr extracts a register from an Addr.
-func regAddr(a obj.Addr, min, max uint32) uint32 {
- if a.Type != obj.TYPE_REG {
- panic(fmt.Sprintf("ill typed: %+v", a))
- }
- return regVal(uint32(a.Reg), min, max)
-}
-
-// regIAddr extracts the integer register from an Addr.
-func regIAddr(a obj.Addr) uint32 {
- return regAddr(a, REG_X0, REG_X31)
-}
-
-// regFAddr extracts the float register from an Addr.
-func regFAddr(a obj.Addr) uint32 {
- return regAddr(a, REG_F0, REG_F31)
-}
-
// immEven checks that the immediate is a multiple of two. If it
// is not, an error is returned.
func immEven(x int64) error {
diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go
index 957222a1559388..97de5a4a0896d5 100644
--- a/src/cmd/internal/obj/s390x/asmz.go
+++ b/src/cmd/internal/obj/s390x/asmz.go
@@ -2677,20 +2677,6 @@ func (c *ctxtz) addrilreloc(sym *obj.LSym, add int64) {
})
}
-func (c *ctxtz) addrilrelocoffset(sym *obj.LSym, add, offset int64) {
- if sym == nil {
- c.ctxt.Diag("require symbol to apply relocation")
- }
- offset += int64(2) // relocation offset from start of instruction
- c.cursym.AddRel(c.ctxt, obj.Reloc{
- Type: objabi.R_PCRELDBL,
- Off: int32(c.pc + offset),
- Siz: 4,
- Sym: sym,
- Add: add + offset + 4,
- })
-}
-
// Add a CALL relocation for the immediate in a RIL style instruction.
// The addend will be adjusted as required.
func (c *ctxtz) addcallreloc(sym *obj.LSym, add int64) {
@@ -4745,16 +4731,6 @@ func zI(op, i1 uint32, asm *[]byte) {
*asm = append(*asm, uint8(op>>8), uint8(i1))
}
-func zMII(op, m1, ri2, ri3 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(m1)<<4)|uint8((ri2>>8)&0x0F),
- uint8(ri2),
- uint8(ri3>>16),
- uint8(ri3>>8),
- uint8(ri3))
-}
-
func zRI(op, r1_m1, i2_ri2 uint32, asm *[]byte) {
*asm = append(*asm,
uint8(op>>8),
@@ -4807,16 +4783,6 @@ func zRIL(f form, op, r1_m1, i2_ri2 uint32, asm *[]byte) {
uint8(i2_ri2))
}
-func zRIS(op, r1, m3, b4, d4, i2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(r1)<<4)|uint8(m3&0x0F),
- (uint8(b4)<<4)|(uint8(d4>>8)&0x0F),
- uint8(d4),
- uint8(i2),
- uint8(op))
-}
-
func zRR(op, r1, r2 uint32, asm *[]byte) {
*asm = append(*asm, uint8(op>>8), (uint8(r1)<<4)|uint8(r2&0x0F))
}
@@ -4845,16 +4811,6 @@ func zRRF(op, r3_m3, m4, r1, r2 uint32, asm *[]byte) {
(uint8(r1)<<4)|uint8(r2&0x0F))
}
-func zRRS(op, r1, r2, b4, d4, m3 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(r1)<<4)|uint8(r2&0x0F),
- (uint8(b4)<<4)|uint8((d4>>8)&0x0F),
- uint8(d4),
- uint8(m3)<<4,
- uint8(op))
-}
-
func zRS(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
*asm = append(*asm,
uint8(op>>8),
@@ -4863,23 +4819,6 @@ func zRS(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
uint8(d2))
}
-func zRSI(op, r1, r3, ri2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(r1)<<4)|uint8(r3&0x0F),
- uint8(ri2>>8),
- uint8(ri2))
-}
-
-func zRSL(op, l1, b2, d2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- uint8(l1),
- (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
- uint8(d2),
- uint8(op))
-}
-
func zRSY(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
dl2 := uint16(d2) & 0x0FFF
*asm = append(*asm,
@@ -4909,16 +4848,6 @@ func zRXE(op, r1, x2, b2, d2, m3 uint32, asm *[]byte) {
uint8(op))
}
-func zRXF(op, r3, x2, b2, d2, m1 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(r3)<<4)|uint8(x2&0x0F),
- (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
- uint8(d2),
- uint8(m1)<<4,
- uint8(op))
-}
-
func zRXY(op, r1_m1, x2, b2, d2 uint32, asm *[]byte) {
dl2 := uint16(d2) & 0x0FFF
*asm = append(*asm,
@@ -4967,16 +4896,6 @@ func zSIY(op, i2, b1, d1 uint32, asm *[]byte) {
uint8(op))
}
-func zSMI(op, m1, b3, d3, ri2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- uint8(m1)<<4,
- (uint8(b3)<<4)|uint8((d3>>8)&0x0F),
- uint8(d3),
- uint8(ri2>>8),
- uint8(ri2))
-}
-
// Expected argument values for the instruction formats.
//
// Format a1 a2 a3 a4 a5 a6
@@ -5006,26 +4925,6 @@ func zSS(f form, op, l1_r1, l2_i3_r3, b1_b2, d1_d2, b2_b4, d2_d4 uint32, asm *[]
uint8(d2_d4))
}
-func zSSE(op, b1, d1, b2, d2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- uint8(op),
- (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
- uint8(d1),
- (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
- uint8(d2))
-}
-
-func zSSF(op, r3, b1, d1, b2, d2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(r3)<<4)|(uint8(op)&0x0F),
- (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
- uint8(d1),
- (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
- uint8(d2))
-}
-
func rxb(va, vb, vc, vd uint32) uint8 {
mask := uint8(0)
if va >= REG_V16 && va <= REG_V31 {
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index 33321341417764..0906f16eaae750 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -2037,23 +2037,6 @@ type nopPad struct {
n int32 // Size of the pad
}
-// requireAlignment ensures that the function alignment is at
-// least as high as a, which should be a power of two
-// and between 8 and 2048, inclusive.
-//
-// the boolean result indicates whether the alignment meets those constraints
-func requireAlignment(a int64, ctxt *obj.Link, cursym *obj.LSym) bool {
- if !((a&(a-1) == 0) && 8 <= a && a <= 2048) {
- ctxt.Diag("alignment value of an instruction must be a power of two and in the range [8, 2048], got %d\n", a)
- return false
- }
- // By default function alignment is 32 bytes for amd64
- if cursym.Func().Align < int32(a) {
- cursym.Func().Align = int32(a)
- }
- return true
-}
-
func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
if ctxt.Retpoline && ctxt.Arch.Family == sys.I386 {
ctxt.Diag("-spectre=ret not supported on 386")
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index 7f308686c113eb..48287546b38769 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -852,11 +852,6 @@ func isZeroArgRuntimeCall(s *obj.LSym) bool {
return false
}
-func indir_cx(ctxt *obj.Link, a *obj.Addr) {
- a.Type = obj.TYPE_MEM
- a.Reg = REG_CX
-}
-
// loadG ensures the G is loaded into a register (either CX or REGG),
// appending instructions to p if necessary. It returns the new last
// instruction and the G register.
diff --git a/src/cmd/internal/objfile/pe.go b/src/cmd/internal/objfile/pe.go
index 774760829c3d0a..c5c08264a9cdb0 100644
--- a/src/cmd/internal/objfile/pe.go
+++ b/src/cmd/internal/objfile/pe.go
@@ -174,8 +174,6 @@ func (f *peFile) goarch() string {
return "386"
case pe.IMAGE_FILE_MACHINE_AMD64:
return "amd64"
- case pe.IMAGE_FILE_MACHINE_ARMNT:
- return "arm"
case pe.IMAGE_FILE_MACHINE_ARM64:
return "arm64"
default:
diff --git a/src/cmd/internal/robustio/robustio.go b/src/cmd/internal/robustio/robustio.go
index 15b33773cf5f5b..73f88dcdd040af 100644
--- a/src/cmd/internal/robustio/robustio.go
+++ b/src/cmd/internal/robustio/robustio.go
@@ -37,17 +37,3 @@ func ReadFile(filename string) ([]byte, error) {
func RemoveAll(path string) error {
return removeAll(path)
}
-
-// IsEphemeralError reports whether err is one of the errors that the functions
-// in this package attempt to mitigate.
-//
-// Errors considered ephemeral include:
-// - syscall.ERROR_ACCESS_DENIED
-// - syscall.ERROR_FILE_NOT_FOUND
-// - internal/syscall/windows.ERROR_SHARING_VIOLATION
-//
-// This set may be expanded in the future; programs must not rely on the
-// non-ephemerality of any given error.
-func IsEphemeralError(err error) bool {
- return isEphemeralError(err)
-}
diff --git a/src/cmd/internal/script/engine.go b/src/cmd/internal/script/engine.go
index ba821712e5ec69..eb9344f6e2a1eb 100644
--- a/src/cmd/internal/script/engine.go
+++ b/src/cmd/internal/script/engine.go
@@ -72,14 +72,6 @@ type Engine struct {
Quiet bool
}
-// NewEngine returns an Engine configured with a basic set of commands and conditions.
-func NewEngine() *Engine {
- return &Engine{
- Cmds: DefaultCmds(),
- Conds: DefaultConds(),
- }
-}
-
// A Cmd is a command that is available to a script.
type Cmd interface {
// Run begins running the command.
diff --git a/src/cmd/internal/testdir/testdir_test.go b/src/cmd/internal/testdir/testdir_test.go
index 666645873bbc70..5781276afadba7 100644
--- a/src/cmd/internal/testdir/testdir_test.go
+++ b/src/cmd/internal/testdir/testdir_test.go
@@ -233,19 +233,23 @@ var stdlibImportcfgFile = sync.OnceValue(func() string {
return filename
})
-func linkFile(runcmd runCmd, goname string, importcfg string, ldflags []string) (err error) {
+// linkFile links infile with the given importcfg and ldflags, writes to outfile.
+// infile can be the name of an object file or a go source file.
+func linkFile(runcmd runCmd, outfile, infile string, importcfg string, ldflags []string) (err error) {
if importcfg == "" {
importcfg = stdlibImportcfgFile()
}
- pfile := strings.ReplaceAll(goname, ".go", ".o")
- cmd := []string{goTool, "tool", "link", "-w", "-o", "a.exe", "-importcfg=" + importcfg}
+ if strings.HasSuffix(infile, ".go") {
+ infile = infile[:len(infile)-3] + ".o"
+ }
+ cmd := []string{goTool, "tool", "link", "-s", "-w", "-buildid=test", "-o", outfile, "-importcfg=" + importcfg}
if *linkshared {
cmd = append(cmd, "-linkshared", "-installsuffix=dynlink")
}
if ldflags != nil {
cmd = append(cmd, ldflags...)
}
- cmd = append(cmd, pfile)
+ cmd = append(cmd, infile)
_, err = runcmd(cmd...)
return
}
@@ -853,7 +857,7 @@ func (t test) run() error {
}
if i == len(pkgs)-1 {
- err = linkFile(runcmd, pkg.files[0], importcfgfile, ldflags)
+ err = linkFile(runcmd, "a.exe", pkg.files[0], importcfgfile, ldflags)
if err != nil {
return err
}
@@ -974,8 +978,7 @@ func (t test) run() error {
if err != nil {
return err
}
- cmd = []string{goTool, "tool", "link", "-importcfg=" + stdlibImportcfgFile(), "-o", "a.exe", "all.a"}
- _, err = runcmd(cmd...)
+ err = linkFile(runcmd, "a.exe", "all.a", stdlibImportcfgFile(), nil)
if err != nil {
return err
}
@@ -1033,9 +1036,7 @@ func (t test) run() error {
return err
}
exe := filepath.Join(tempDir, "test.exe")
- cmd := []string{goTool, "tool", "link", "-s", "-w", "-importcfg=" + stdlibImportcfgFile()}
- cmd = append(cmd, "-o", exe, pkg)
- if _, err := runcmd(cmd...); err != nil {
+ if err := linkFile(runcmd, exe, pkg, stdlibImportcfgFile(), nil); err != nil {
return err
}
out, err = runcmd(append([]string{exe}, args...)...)
diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go
index 7754cf9bfa58bb..b8127a2538ea18 100644
--- a/src/cmd/link/internal/amd64/asm.go
+++ b/src/cmd/link/internal/amd64/asm.go
@@ -40,10 +40,6 @@ import (
"log"
)
-func PADDR(x uint32) uint32 {
- return x &^ 0x80000000
-}
-
func gentext(ctxt *ld.Link, ldr *loader.Loader) {
initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt)
if initfunc == nil {
diff --git a/src/cmd/link/internal/arm/obj.go b/src/cmd/link/internal/arm/obj.go
index 3a1830ce10e117..c17dca40b30eab 100644
--- a/src/cmd/link/internal/arm/obj.go
+++ b/src/cmd/link/internal/arm/obj.go
@@ -105,9 +105,5 @@ func archinit(ctxt *ld.Link) {
if *ld.FlagTextAddr == -1 {
*ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR)
}
-
- case objabi.Hwindows: /* PE executable */
- // ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit
- return
}
}
diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go
index b2d4ad7cb0e7f6..802fb35aee4e65 100644
--- a/src/cmd/link/internal/ld/config.go
+++ b/src/cmd/link/internal/ld/config.go
@@ -34,7 +34,7 @@ func (mode *BuildMode) Set(s string) error {
return fmt.Errorf("invalid buildmode: %q", s)
case "exe":
switch buildcfg.GOOS + "/" + buildcfg.GOARCH {
- case "darwin/arm64", "windows/arm", "windows/arm64": // On these platforms, everything is PIE
+ case "darwin/arm64", "windows/arm64": // On these platforms, everything is PIE
*mode = BuildModePIE
default:
*mode = BuildModeExe
diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go
index 8c9fa8efab5a1e..949ea5590589ae 100644
--- a/src/cmd/link/internal/ld/decodesym.go
+++ b/src/cmd/link/internal/ld/decodesym.go
@@ -244,35 +244,6 @@ func decodetypeGcmask(ctxt *Link, s loader.Sym) []byte {
return ctxt.loader.Data(mask)
}
-// Type.commonType.gc
-func decodetypeGcprog(ctxt *Link, s loader.Sym) []byte {
- if ctxt.loader.SymType(s) == sym.SDYNIMPORT {
- symData := ctxt.loader.Data(s)
- addr := decodetypeGcprogShlib(ctxt, symData)
- sect := findShlibSection(ctxt, ctxt.loader.SymPkg(s), addr)
- if sect != nil {
- // A gcprog is a 4-byte uint32 indicating length, followed by
- // the actual program.
- progsize := make([]byte, 4)
- _, err := sect.ReadAt(progsize, int64(addr-sect.Addr))
- if err != nil {
- log.Fatal(err)
- }
- progbytes := make([]byte, ctxt.Arch.ByteOrder.Uint32(progsize))
- _, err = sect.ReadAt(progbytes, int64(addr-sect.Addr+4))
- if err != nil {
- log.Fatal(err)
- }
- return append(progsize, progbytes...)
- }
- Exitf("cannot find gcprog for %s", ctxt.loader.SymName(s))
- return nil
- }
- relocs := ctxt.loader.Relocs(s)
- rs := decodeRelocSym(ctxt.loader, s, &relocs, 2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize))
- return ctxt.loader.Data(rs)
-}
-
// Find the elf.Section of a given shared library that contains a given address.
func findShlibSection(ctxt *Link, path string, addr uint64) *elf.Section {
for _, shlib := range ctxt.Shlibs {
diff --git a/src/cmd/link/internal/ld/fallocate_test.go b/src/cmd/link/internal/ld/fallocate_test.go
index d95fec788a616b..163ffc26e8406a 100644
--- a/src/cmd/link/internal/ld/fallocate_test.go
+++ b/src/cmd/link/internal/ld/fallocate_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build darwin || (freebsd && go1.21) || linux
+//go:build darwin || (freebsd && go1.21) || linux || (netbsd && go1.25)
package ld
diff --git a/src/cmd/link/internal/ld/outbuf_freebsd.go b/src/cmd/link/internal/ld/outbuf_bsd.go
similarity index 90%
rename from src/cmd/link/internal/ld/outbuf_freebsd.go
rename to src/cmd/link/internal/ld/outbuf_bsd.go
index 7e718c1408e730..5dce83fefd2d7c 100644
--- a/src/cmd/link/internal/ld/outbuf_freebsd.go
+++ b/src/cmd/link/internal/ld/outbuf_bsd.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build freebsd && go1.21
+//go:build (freebsd && go1.21) || (netbsd && go1.25)
package ld
diff --git a/src/cmd/link/internal/ld/outbuf_mmap.go b/src/cmd/link/internal/ld/outbuf_mmap.go
index b8b8dc5158e918..e92a06dcb25f80 100644
--- a/src/cmd/link/internal/ld/outbuf_mmap.go
+++ b/src/cmd/link/internal/ld/outbuf_mmap.go
@@ -28,7 +28,7 @@ func (out *OutBuf) Mmap(filesize uint64) (err error) {
// Some file systems do not support fallocate. We ignore that error as linking
// can still take place, but you might SIGBUS when you write to the mmapped
// area.
- if err != syscall.ENOTSUP && err != syscall.EPERM && err != errNoFallocate {
+ if err != syscall.ENOTSUP && err != syscall.EOPNOTSUPP && err != syscall.EPERM && err != errNoFallocate {
return err
}
}
diff --git a/src/cmd/link/internal/ld/outbuf_nofallocate.go b/src/cmd/link/internal/ld/outbuf_nofallocate.go
index 435be5e09fe5b9..9169379e23897b 100644
--- a/src/cmd/link/internal/ld/outbuf_nofallocate.go
+++ b/src/cmd/link/internal/ld/outbuf_nofallocate.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !darwin && !(freebsd && go1.21) && !linux
+//go:build !darwin && !(freebsd && go1.21) && !linux && !(netbsd && go1.25)
package ld
diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go
index fbfd928e873be5..c290410b0ecb74 100644
--- a/src/cmd/link/internal/ld/pe.go
+++ b/src/cmd/link/internal/ld/pe.go
@@ -913,8 +913,6 @@ func (f *peFile) writeFileHeader(ctxt *Link) {
fh.Machine = pe.IMAGE_FILE_MACHINE_AMD64
case sys.I386:
fh.Machine = pe.IMAGE_FILE_MACHINE_I386
- case sys.ARM:
- fh.Machine = pe.IMAGE_FILE_MACHINE_ARMNT
case sys.ARM64:
fh.Machine = pe.IMAGE_FILE_MACHINE_ARM64
}
diff --git a/src/cmd/pprof/doc.go b/src/cmd/pprof/doc.go
index 59e1a47cd19ff2..f55a50a60f196e 100644
--- a/src/cmd/pprof/doc.go
+++ b/src/cmd/pprof/doc.go
@@ -12,5 +12,5 @@
//
// go tool pprof -h
//
-// For an example, see https://blog.golang.org/profiling-go-programs.
+// For an example, see https://go.dev/blog/pprof.
package main
diff --git a/src/cmd/trace/viewer.go b/src/cmd/trace/viewer.go
index da83e81ab9327e..f8abcec2cb7df1 100644
--- a/src/cmd/trace/viewer.go
+++ b/src/cmd/trace/viewer.go
@@ -9,7 +9,6 @@ import (
"internal/trace"
"internal/trace/traceviewer"
"slices"
- "time"
)
// viewerFrames returns the frames of the stack of ev. The given frame slice is
@@ -40,7 +39,3 @@ func viewerGState(state trace.GoState, inMarkAssist bool) traceviewer.GState {
panic(fmt.Sprintf("unknown GoState: %s", state.String()))
}
}
-
-func viewerTime(t time.Duration) float64 {
- return float64(t) / float64(time.Microsecond)
-}
diff --git a/src/context/context.go b/src/context/context.go
index 4f150f6a1d6c7e..4fb537e23387ab 100644
--- a/src/context/context.go
+++ b/src/context/context.go
@@ -103,7 +103,7 @@ type Context interface {
// }
// }
//
- // See https://blog.golang.org/pipelines for more examples of how to use
+ // See https://go.dev/blog/pipelines for more examples of how to use
// a Done channel for cancellation.
Done() <-chan struct{}
diff --git a/src/crypto/internal/fips140/sha256/sha256block_loong64.s b/src/crypto/internal/fips140/sha256/sha256block_loong64.s
index e171d93e0ba5f0..ad03cd9931b7f5 100644
--- a/src/crypto/internal/fips140/sha256/sha256block_loong64.s
+++ b/src/crypto/internal/fips140/sha256/sha256block_loong64.s
@@ -52,6 +52,7 @@
#define REGTMP3 R18
#define REGTMP4 R7
#define REGTMP5 R6
+#define REG_KT R19
// W[i] = M[i]; for 0 <= i <= 15
#define LOAD0(index) \
@@ -89,8 +90,9 @@
// Ch(x, y, z) = (x AND y) XOR (NOT x AND z)
// = ((y XOR z) AND x) XOR z
// Calculate T1 in REGTMP4
-#define SHA256T1(const, e, f, g, h) \
- ADDV $const, h; \
+#define SHA256T1(index, e, f, g, h) \
+ MOVW (index*4)(REG_KT), REGTMP5; \
+ ADDV REGTMP5, h; \
ADD REGTMP4, h; \
ROTR $6, e, REGTMP5; \
ROTR $11, e, REGTMP; \
@@ -122,19 +124,19 @@
// Calculate T1 and T2, then e = d + T1 and a = T1 + T2.
// The values for e and a are stored in d and h, ready for rotation.
-#define SHA256ROUND(const, a, b, c, d, e, f, g, h) \
- SHA256T1(const, e, f, g, h); \
+#define SHA256ROUND(index, a, b, c, d, e, f, g, h) \
+ SHA256T1(index, e, f, g, h); \
SHA256T2(a, b, c); \
ADD REGTMP4, d; \
ADD REGTMP1, REGTMP4, h
-#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \
+#define SHA256ROUND0(index, a, b, c, d, e, f, g, h) \
LOAD0(index); \
- SHA256ROUND(const, a, b, c, d, e, f, g, h)
+ SHA256ROUND(index, a, b, c, d, e, f, g, h)
-#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \
+#define SHA256ROUND1(index, a, b, c, d, e, f, g, h) \
LOAD1(index); \
- SHA256ROUND(const, a, b, c, d, e, f, g, h)
+ SHA256ROUND(index, a, b, c, d, e, f, g, h)
// A stack frame size of 64 bytes is required here, because
// the frame size used for data expansion is 64 bytes.
@@ -147,6 +149,8 @@ TEXT ·block(SB),NOSPLIT,$64-32
AND $~63, R6
BEQ R6, end
+ MOVV $·_K(SB), REG_KT // const table
+
// p_len >= 64
MOVV dig+0(FP), R4
ADDV R5, R6, R25
@@ -160,71 +164,71 @@ TEXT ·block(SB),NOSPLIT,$64-32
MOVW (7*4)(R4), R15 // h = H7
loop:
- SHA256ROUND0(0, 0x428a2f98, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND0(1, 0x71374491, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND0(2, 0xb5c0fbcf, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND0(3, 0xe9b5dba5, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND0(4, 0x3956c25b, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND0(5, 0x59f111f1, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND0(6, 0x923f82a4, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND0(7, 0xab1c5ed5, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND0(8, 0xd807aa98, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND0(9, 0x12835b01, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND0(10, 0x243185be, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND0(11, 0x550c7dc3, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND0(12, 0x72be5d74, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND0(13, 0x80deb1fe, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND0(14, 0x9bdc06a7, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND0(15, 0xc19bf174, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND0(0, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND0(1, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND0(2, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND0(3, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND0(4, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND0(5, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND0(6, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND0(7, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND0(8, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND0(9, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND0(10, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND0(11, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND0(12, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND0(13, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND0(14, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND0(15, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(16, 0xe49b69c1, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(17, 0xefbe4786, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(18, 0x0fc19dc6, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(19, 0x240ca1cc, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(20, 0x2de92c6f, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(21, 0x4a7484aa, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(22, 0x5cb0a9dc, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(23, 0x76f988da, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(24, 0x983e5152, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(25, 0xa831c66d, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(26, 0xb00327c8, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(27, 0xbf597fc7, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(28, 0xc6e00bf3, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(29, 0xd5a79147, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(30, 0x06ca6351, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(31, 0x14292967, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(32, 0x27b70a85, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(33, 0x2e1b2138, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(34, 0x4d2c6dfc, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(35, 0x53380d13, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(36, 0x650a7354, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(37, 0x766a0abb, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(38, 0x81c2c92e, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(39, 0x92722c85, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(40, 0xa2bfe8a1, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(41, 0xa81a664b, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(42, 0xc24b8b70, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(43, 0xc76c51a3, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(44, 0xd192e819, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(45, 0xd6990624, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(46, 0xf40e3585, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(47, 0x106aa070, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(48, 0x19a4c116, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(49, 0x1e376c08, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(50, 0x2748774c, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(51, 0x34b0bcb5, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(52, 0x391c0cb3, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(53, 0x4ed8aa4a, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(54, 0x5b9cca4f, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(55, 0x682e6ff3, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(56, 0x748f82ee, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(57, 0x78a5636f, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(58, 0x84c87814, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(59, 0x8cc70208, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(60, 0x90befffa, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(61, 0xa4506ceb, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(62, 0xbef9a3f7, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(63, 0xc67178f2, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(16, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(17, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(18, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(19, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(20, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(21, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(22, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(23, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(24, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(25, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(26, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(27, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(28, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(29, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(30, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(31, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(32, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(33, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(34, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(35, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(36, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(37, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(38, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(39, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(40, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(41, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(42, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(43, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(44, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(45, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(46, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(47, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(48, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(49, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(50, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(51, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(52, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(53, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(54, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(55, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(56, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(57, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(58, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(59, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(60, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(61, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(62, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(63, R9, R10, R11, R12, R13, R14, R15, R8)
MOVW (0*4)(R4), REGTMP
MOVW (1*4)(R4), REGTMP1
diff --git a/src/crypto/internal/fips140/sha512/sha512block_loong64.s b/src/crypto/internal/fips140/sha512/sha512block_loong64.s
index f65d563ca34d82..751ab4e4f696e7 100644
--- a/src/crypto/internal/fips140/sha512/sha512block_loong64.s
+++ b/src/crypto/internal/fips140/sha512/sha512block_loong64.s
@@ -14,6 +14,7 @@
#define REGTMP3 R18
#define REGTMP4 R7
#define REGTMP5 R6
+#define REG_KT R19
// W[i] = M[i]; for 0 <= i <= 15
#define LOAD0(index) \
@@ -52,8 +53,9 @@
// Ch(x, y, z) = (x AND y) XOR (NOT x AND z)
// = ((y XOR z) AND x) XOR z
// Calculate T1 in REGTMP4
-#define SHA512T1(const, e, f, g, h) \
- ADDV $const, h; \
+#define SHA512T1(index, e, f, g, h) \
+ MOVV (index*8)(REG_KT), REGTMP5; \
+ ADDV REGTMP5, h; \
ADDV REGTMP4, h; \
ROTRV $14, e, REGTMP5; \
ROTRV $18, e, REGTMP; \
@@ -85,19 +87,19 @@
// Calculate T1 and T2, then e = d + T1 and a = T1 + T2.
// The values for e and a are stored in d and h, ready for rotation.
-#define SHA512ROUND(const, a, b, c, d, e, f, g, h) \
- SHA512T1(const, e, f, g, h); \
+#define SHA512ROUND(index, a, b, c, d, e, f, g, h) \
+ SHA512T1(index, e, f, g, h); \
SHA512T2(a, b, c); \
ADDV REGTMP4, d; \
ADDV REGTMP1, REGTMP4, h
-#define SHA512ROUND0(index, const, a, b, c, d, e, f, g, h) \
+#define SHA512ROUND0(index, a, b, c, d, e, f, g, h) \
LOAD0(index); \
- SHA512ROUND(const, a, b, c, d, e, f, g, h)
+ SHA512ROUND(index, a, b, c, d, e, f, g, h)
-#define SHA512ROUND1(index, const, a, b, c, d, e, f, g, h) \
+#define SHA512ROUND1(index, a, b, c, d, e, f, g, h) \
LOAD1(index); \
- SHA512ROUND(const, a, b, c, d, e, f, g, h)
+ SHA512ROUND(index, a, b, c, d, e, f, g, h)
// A stack frame size of 128 bytes is required here, because
// the frame size used for data expansion is 128 bytes.
@@ -110,6 +112,8 @@ TEXT ·block(SB),NOSPLIT,$128-32
AND $~127, R6
BEQ R6, end
+ MOVV $·_K(SB), REG_KT // const table
+
// p_len >= 128
MOVV dig+0(FP), R4
ADDV R5, R6, R25
@@ -123,87 +127,87 @@ TEXT ·block(SB),NOSPLIT,$128-32
MOVV (7*8)(R4), R15 // h = H7
loop:
- SHA512ROUND0( 0, 0x428a2f98d728ae22, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND0( 1, 0x7137449123ef65cd, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND0( 2, 0xb5c0fbcfec4d3b2f, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND0( 3, 0xe9b5dba58189dbbc, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND0( 4, 0x3956c25bf348b538, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND0( 5, 0x59f111f1b605d019, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND0( 6, 0x923f82a4af194f9b, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND0( 7, 0xab1c5ed5da6d8118, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND0( 8, 0xd807aa98a3030242, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND0( 9, 0x12835b0145706fbe, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND0(10, 0x243185be4ee4b28c, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND0(11, 0x550c7dc3d5ffb4e2, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND0(12, 0x72be5d74f27b896f, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND0(13, 0x80deb1fe3b1696b1, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND0(14, 0x9bdc06a725c71235, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND0(15, 0xc19bf174cf692694, R9, R10, R11, R12, R13, R14, R15, R8)
-
- SHA512ROUND1(16, 0xe49b69c19ef14ad2, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(17, 0xefbe4786384f25e3, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(18, 0x0fc19dc68b8cd5b5, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(19, 0x240ca1cc77ac9c65, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(20, 0x2de92c6f592b0275, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(21, 0x4a7484aa6ea6e483, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(22, 0x5cb0a9dcbd41fbd4, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(23, 0x76f988da831153b5, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(24, 0x983e5152ee66dfab, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(25, 0xa831c66d2db43210, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(26, 0xb00327c898fb213f, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(27, 0xbf597fc7beef0ee4, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(28, 0xc6e00bf33da88fc2, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(29, 0xd5a79147930aa725, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(30, 0x06ca6351e003826f, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(31, 0x142929670a0e6e70, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(32, 0x27b70a8546d22ffc, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(33, 0x2e1b21385c26c926, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(34, 0x4d2c6dfc5ac42aed, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(35, 0x53380d139d95b3df, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(36, 0x650a73548baf63de, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(37, 0x766a0abb3c77b2a8, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(38, 0x81c2c92e47edaee6, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(39, 0x92722c851482353b, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(40, 0xa2bfe8a14cf10364, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(41, 0xa81a664bbc423001, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(42, 0xc24b8b70d0f89791, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(43, 0xc76c51a30654be30, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(44, 0xd192e819d6ef5218, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(45, 0xd69906245565a910, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(46, 0xf40e35855771202a, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(47, 0x106aa07032bbd1b8, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(48, 0x19a4c116b8d2d0c8, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(49, 0x1e376c085141ab53, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(50, 0x2748774cdf8eeb99, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(51, 0x34b0bcb5e19b48a8, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(52, 0x391c0cb3c5c95a63, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(53, 0x4ed8aa4ae3418acb, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(54, 0x5b9cca4f7763e373, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(55, 0x682e6ff3d6b2b8a3, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(56, 0x748f82ee5defb2fc, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(57, 0x78a5636f43172f60, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(58, 0x84c87814a1f0ab72, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(59, 0x8cc702081a6439ec, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(60, 0x90befffa23631e28, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(61, 0xa4506cebde82bde9, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(62, 0xbef9a3f7b2c67915, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(63, 0xc67178f2e372532b, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(64, 0xca273eceea26619c, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(65, 0xd186b8c721c0c207, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(66, 0xeada7dd6cde0eb1e, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(67, 0xf57d4f7fee6ed178, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(68, 0x06f067aa72176fba, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(69, 0x0a637dc5a2c898a6, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(70, 0x113f9804bef90dae, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(71, 0x1b710b35131c471b, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(72, 0x28db77f523047d84, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(73, 0x32caab7b40c72493, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(74, 0x3c9ebe0a15c9bebc, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(75, 0x431d67c49c100d4c, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(76, 0x4cc5d4becb3e42b6, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(77, 0x597f299cfc657e2a, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(78, 0x5fcb6fab3ad6faec, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(79, 0x6c44198c4a475817, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND0( 0, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND0( 1, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND0( 2, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND0( 3, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND0( 4, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND0( 5, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND0( 6, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND0( 7, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND0( 8, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND0( 9, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND0(10, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND0(11, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND0(12, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND0(13, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND0(14, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND0(15, R9, R10, R11, R12, R13, R14, R15, R8)
+
+ SHA512ROUND1(16, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(17, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(18, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(19, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(20, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(21, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(22, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(23, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(24, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(25, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(26, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(27, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(28, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(29, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(30, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(31, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(32, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(33, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(34, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(35, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(36, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(37, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(38, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(39, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(40, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(41, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(42, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(43, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(44, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(45, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(46, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(47, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(48, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(49, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(50, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(51, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(52, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(53, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(54, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(55, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(56, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(57, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(58, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(59, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(60, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(61, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(62, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(63, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(64, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(65, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(66, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(67, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(68, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(69, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(70, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(71, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(72, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(73, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(74, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(75, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(76, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(77, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(78, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(79, R9, R10, R11, R12, R13, R14, R15, R8)
MOVV (0*8)(R4), REGTMP
MOVV (1*8)(R4), REGTMP1
diff --git a/src/crypto/tls/quic.go b/src/crypto/tls/quic.go
index ba8a235d84ad93..ed70100d11f0e7 100644
--- a/src/crypto/tls/quic.go
+++ b/src/crypto/tls/quic.go
@@ -302,6 +302,9 @@ type QUICSessionTicketOptions struct {
// Currently, it can only be called once.
func (q *QUICConn) SendSessionTicket(opts QUICSessionTicketOptions) error {
c := q.conn
+ if c.config.SessionTicketsDisabled {
+ return nil
+ }
if !c.isHandshakeComplete.Load() {
return quicError(errors.New("tls: SendSessionTicket called before handshake completed"))
}
diff --git a/src/crypto/tls/quic_test.go b/src/crypto/tls/quic_test.go
index 51cd4ef765dd6c..f6e8c55d9d63e4 100644
--- a/src/crypto/tls/quic_test.go
+++ b/src/crypto/tls/quic_test.go
@@ -231,6 +231,18 @@ func TestQUICSessionResumption(t *testing.T) {
if !cli2.conn.ConnectionState().DidResume {
t.Errorf("second connection did not use session resumption")
}
+
+ clientConfig.TLSConfig.SessionTicketsDisabled = true
+ cli3 := newTestQUICClient(t, clientConfig)
+ cli3.conn.SetTransportParameters(nil)
+ srv3 := newTestQUICServer(t, serverConfig)
+ srv3.conn.SetTransportParameters(nil)
+ if err := runTestQUICConnection(context.Background(), cli3, srv3, nil); err != nil {
+ t.Fatalf("error during third connection handshake: %v", err)
+ }
+ if cli3.conn.ConnectionState().DidResume {
+ t.Errorf("third connection unexpectedly used session resumption")
+ }
}
func TestQUICFragmentaryData(t *testing.T) {
diff --git a/src/database/sql/convert.go b/src/database/sql/convert.go
index 65fdfe6fa8c3ad..26b139ababd178 100644
--- a/src/database/sql/convert.go
+++ b/src/database/sql/convert.go
@@ -335,7 +335,6 @@ func convertAssignRows(dest, src any, rows *Rows) error {
if rows == nil {
return errors.New("invalid context to convert cursor rows, missing parent *Rows")
}
- rows.closemu.Lock()
*d = Rows{
dc: rows.dc,
releaseConn: func(error) {},
@@ -351,7 +350,6 @@ func convertAssignRows(dest, src any, rows *Rows) error {
parentCancel()
}
}
- rows.closemu.Unlock()
return nil
}
}
diff --git a/src/database/sql/driver/driver.go b/src/database/sql/driver/driver.go
index d0892e80fc28d5..487870be63209e 100644
--- a/src/database/sql/driver/driver.go
+++ b/src/database/sql/driver/driver.go
@@ -515,6 +515,18 @@ type RowsColumnTypePrecisionScale interface {
ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool)
}
+// RowsColumnScanner may be implemented by [Rows]. It allows the driver to completely
+// take responsibility for how values are scanned and replace the normal [database/sql].
+// scanning path. This allows drivers to directly support types that do not implement
+// [database/sql.Scanner].
+type RowsColumnScanner interface {
+ Rows
+
+ // ScanColumn copies the column in the current row into the value pointed at by
+ // dest. It returns [ErrSkip] to fall back to the normal [database/sql] scanning path.
+ ScanColumn(dest any, index int) error
+}
+
// Tx is a transaction.
type Tx interface {
Commit() error
diff --git a/src/database/sql/fakedb_test.go b/src/database/sql/fakedb_test.go
index 3dfcd447b52bca..003e6c62986f31 100644
--- a/src/database/sql/fakedb_test.go
+++ b/src/database/sql/fakedb_test.go
@@ -5,6 +5,7 @@
package sql
import (
+ "bytes"
"context"
"database/sql/driver"
"errors"
@@ -15,7 +16,6 @@ import (
"strconv"
"strings"
"sync"
- "sync/atomic"
"testing"
"time"
)
@@ -91,8 +91,6 @@ func (cc *fakeDriverCtx) OpenConnector(name string) (driver.Connector, error) {
type fakeDB struct {
name string
- useRawBytes atomic.Bool
-
mu sync.Mutex
tables map[string]*table
badConn bool
@@ -684,8 +682,6 @@ func (c *fakeConn) PrepareContext(ctx context.Context, query string) (driver.Stm
switch cmd {
case "WIPE":
// Nothing
- case "USE_RAWBYTES":
- c.db.useRawBytes.Store(true)
case "SELECT":
stmt, err = c.prepareSelect(stmt, parts)
case "CREATE":
@@ -789,9 +785,6 @@ func (s *fakeStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (d
case "WIPE":
db.wipe()
return driver.ResultNoRows, nil
- case "USE_RAWBYTES":
- s.c.db.useRawBytes.Store(true)
- return driver.ResultNoRows, nil
case "CREATE":
if err := db.createTable(s.table, s.colName, s.colType); err != nil {
return nil, err
@@ -1076,10 +1069,9 @@ type rowsCursor struct {
errPos int
err error
- // a clone of slices to give out to clients, indexed by the
- // original slice's first byte address. we clone them
- // just so we're able to corrupt them on close.
- bytesClone map[*byte][]byte
+ // Data returned to clients.
+ // We clone and stash it here so it can be invalidated by Close and Next.
+ driverOwnedMemory [][]byte
// Every operation writes to line to enable the race detector
// check for data races.
@@ -1096,9 +1088,19 @@ func (rc *rowsCursor) touchMem() {
rc.line++
}
+func (rc *rowsCursor) invalidateDriverOwnedMemory() {
+ for _, buf := range rc.driverOwnedMemory {
+ for i := range buf {
+ buf[i] = 'x'
+ }
+ }
+ rc.driverOwnedMemory = nil
+}
+
func (rc *rowsCursor) Close() error {
rc.touchMem()
rc.parentMem.touchMem()
+ rc.invalidateDriverOwnedMemory()
rc.closed = true
return rc.closeErr
}
@@ -1129,6 +1131,8 @@ func (rc *rowsCursor) Next(dest []driver.Value) error {
if rc.posRow >= len(rc.rows[rc.posSet]) {
return io.EOF // per interface spec
}
+ // Corrupt any previously returned bytes.
+ rc.invalidateDriverOwnedMemory()
for i, v := range rc.rows[rc.posSet][rc.posRow].cols {
// TODO(bradfitz): convert to subset types? naah, I
// think the subset types should only be input to
@@ -1136,20 +1140,13 @@ func (rc *rowsCursor) Next(dest []driver.Value) error {
// a wider range of types coming out of drivers. all
// for ease of drivers, and to prevent drivers from
// messing up conversions or doing them differently.
- dest[i] = v
-
- if bs, ok := v.([]byte); ok && !rc.db.useRawBytes.Load() {
- if rc.bytesClone == nil {
- rc.bytesClone = make(map[*byte][]byte)
- }
- clone, ok := rc.bytesClone[&bs[0]]
- if !ok {
- clone = make([]byte, len(bs))
- copy(clone, bs)
- rc.bytesClone[&bs[0]] = clone
- }
- dest[i] = clone
+ if bs, ok := v.([]byte); ok {
+ // Clone []bytes and stash for later invalidation.
+ bs = bytes.Clone(bs)
+ rc.driverOwnedMemory = append(rc.driverOwnedMemory, bs)
+ v = bs
}
+ dest[i] = v
}
return nil
}
diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go
index b0abcf7fcd408b..85b9ffc37d9445 100644
--- a/src/database/sql/sql.go
+++ b/src/database/sql/sql.go
@@ -3368,38 +3368,45 @@ func (rs *Rows) Scan(dest ...any) error {
// without calling Next.
return fmt.Errorf("sql: Scan called without calling Next (closemuScanHold)")
}
+
rs.closemu.RLock()
+ rs.raw = rs.raw[:0]
+ err := rs.scanLocked(dest...)
+ if err == nil && scanArgsContainRawBytes(dest) {
+ rs.closemuScanHold = true
+ } else {
+ rs.closemu.RUnlock()
+ }
+ return err
+}
+func (rs *Rows) scanLocked(dest ...any) error {
if rs.lasterr != nil && rs.lasterr != io.EOF {
- rs.closemu.RUnlock()
return rs.lasterr
}
if rs.closed {
- err := rs.lasterrOrErrLocked(errRowsClosed)
- rs.closemu.RUnlock()
- return err
- }
-
- if scanArgsContainRawBytes(dest) {
- rs.closemuScanHold = true
- rs.raw = rs.raw[:0]
- } else {
- rs.closemu.RUnlock()
+ return rs.lasterrOrErrLocked(errRowsClosed)
}
if rs.lastcols == nil {
- rs.closemuRUnlockIfHeldByScan()
return errors.New("sql: Scan called without calling Next")
}
if len(dest) != len(rs.lastcols) {
- rs.closemuRUnlockIfHeldByScan()
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
}
for i, sv := range rs.lastcols {
- err := convertAssignRows(dest[i], sv, rs)
+ err := driver.ErrSkip
+
+ if rcs, ok := rs.rowsi.(driver.RowsColumnScanner); ok {
+ err = rcs.ScanColumn(dest[i], i)
+ }
+
+ if err == driver.ErrSkip {
+ err = convertAssignRows(dest[i], sv, rs)
+ }
+
if err != nil {
- rs.closemuRUnlockIfHeldByScan()
return fmt.Errorf(`sql: Scan error on column index %d, name %q: %w`, i, rs.rowsi.Columns()[i], err)
}
}
diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go
index 74b9bf550249c7..f706610b87e85b 100644
--- a/src/database/sql/sql_test.go
+++ b/src/database/sql/sql_test.go
@@ -5,6 +5,7 @@
package sql
import (
+ "bytes"
"context"
"database/sql/driver"
"errors"
@@ -4200,6 +4201,102 @@ func TestNamedValueCheckerSkip(t *testing.T) {
}
}
+type rcsDriver struct {
+ fakeDriver
+}
+
+func (d *rcsDriver) Open(dsn string) (driver.Conn, error) {
+ c, err := d.fakeDriver.Open(dsn)
+ fc := c.(*fakeConn)
+ fc.db.allowAny = true
+ return &rcsConn{fc}, err
+}
+
+type rcsConn struct {
+ *fakeConn
+}
+
+func (c *rcsConn) PrepareContext(ctx context.Context, q string) (driver.Stmt, error) {
+ stmt, err := c.fakeConn.PrepareContext(ctx, q)
+ if err != nil {
+ return stmt, err
+ }
+ return &rcsStmt{stmt.(*fakeStmt)}, nil
+}
+
+type rcsStmt struct {
+ *fakeStmt
+}
+
+func (s *rcsStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ rows, err := s.fakeStmt.QueryContext(ctx, args)
+ if err != nil {
+ return rows, err
+ }
+ return &rcsRows{rows.(*rowsCursor)}, nil
+}
+
+type rcsRows struct {
+ *rowsCursor
+}
+
+func (r *rcsRows) ScanColumn(dest any, index int) error {
+ switch d := dest.(type) {
+ case *int64:
+ *d = 42
+ return nil
+ }
+
+ return driver.ErrSkip
+}
+
+func TestRowsColumnScanner(t *testing.T) {
+ Register("RowsColumnScanner", &rcsDriver{})
+ db, err := Open("RowsColumnScanner", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ _, err = db.ExecContext(ctx, "CREATE|t|str=string,n=int64")
+ if err != nil {
+ t.Fatal("exec create", err)
+ }
+
+ _, err = db.ExecContext(ctx, "INSERT|t|str=?,n=?", "foo", int64(1))
+ if err != nil {
+ t.Fatal("exec insert", err)
+ }
+ var (
+ str string
+ i64 int64
+ i int
+ f64 float64
+ ui uint
+ )
+ err = db.QueryRowContext(ctx, "SELECT|t|str,n,n,n,n|").Scan(&str, &i64, &i, &f64, &ui)
+ if err != nil {
+ t.Fatal("select", err)
+ }
+
+ list := []struct{ got, want any }{
+ {str, "foo"},
+ {i64, int64(42)},
+ {i, int(1)},
+ {f64, float64(1)},
+ {ui, uint(1)},
+ }
+
+ for index, item := range list {
+ if !reflect.DeepEqual(item.got, item.want) {
+ t.Errorf("got %#v wanted %#v for index %d", item.got, item.want, index)
+ }
+ }
+}
+
func TestOpenConnector(t *testing.T) {
Register("testctx", &fakeDriverCtx{})
db, err := Open("testctx", "people")
@@ -4434,10 +4531,6 @@ func testContextCancelDuringRawBytesScan(t *testing.T, mode string) {
db := newTestDB(t, "people")
defer closeDB(t, db)
- if _, err := db.Exec("USE_RAWBYTES"); err != nil {
- t.Fatal(err)
- }
-
// cancel used to call close asynchronously.
// This test checks that it waits so as not to interfere with RawBytes.
ctx, cancel := context.WithCancel(context.Background())
@@ -4529,6 +4622,61 @@ func TestContextCancelBetweenNextAndErr(t *testing.T) {
}
}
+type testScanner struct {
+ scanf func(src any) error
+}
+
+func (ts testScanner) Scan(src any) error { return ts.scanf(src) }
+
+func TestContextCancelDuringScan(t *testing.T) {
+ db := newTestDB(t, "people")
+ defer closeDB(t, db)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ scanStart := make(chan any)
+ scanEnd := make(chan error)
+ scanner := &testScanner{
+ scanf: func(src any) error {
+ scanStart <- src
+ return <-scanEnd
+ },
+ }
+
+ // Start a query, and pause it mid-scan.
+ want := []byte("Alice")
+ r, err := db.QueryContext(ctx, "SELECT|people|name|name=?", string(want))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !r.Next() {
+ t.Fatalf("r.Next() = false, want true")
+ }
+ go func() {
+ r.Scan(scanner)
+ }()
+ got := <-scanStart
+ defer close(scanEnd)
+ gotBytes, ok := got.([]byte)
+ if !ok {
+ t.Fatalf("r.Scan returned %T, want []byte", got)
+ }
+ if !bytes.Equal(gotBytes, want) {
+ t.Fatalf("before cancel: r.Scan returned %q, want %q", gotBytes, want)
+ }
+
+ // Cancel the query.
+ // Sleep to give it a chance to finish canceling.
+ cancel()
+ time.Sleep(10 * time.Millisecond)
+
+ // Cancelling the query should not have changed the result.
+ if !bytes.Equal(gotBytes, want) {
+ t.Fatalf("after cancel: r.Scan result is now %q, want %q", gotBytes, want)
+ }
+}
+
func TestNilErrorAfterClose(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
@@ -4562,10 +4710,6 @@ func TestRawBytesReuse(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
- if _, err := db.Exec("USE_RAWBYTES"); err != nil {
- t.Fatal(err)
- }
-
var raw RawBytes
// The RawBytes in this query aliases driver-owned memory.
diff --git a/src/debug/macho/file.go b/src/debug/macho/file.go
index fcf28c4b25edaf..52ff81750cc4c1 100644
--- a/src/debug/macho/file.go
+++ b/src/debug/macho/file.go
@@ -719,15 +719,28 @@ func (f *File) DWARF() (*dwarf.Data, error) {
// referred to by the binary f that are expected to be
// satisfied by other libraries at dynamic load time.
func (f *File) ImportedSymbols() ([]string, error) {
- if f.Dysymtab == nil || f.Symtab == nil {
+ if f.Symtab == nil {
return nil, &FormatError{0, "missing symbol table", nil}
}
st := f.Symtab
dt := f.Dysymtab
var all []string
- for _, s := range st.Syms[dt.Iundefsym : dt.Iundefsym+dt.Nundefsym] {
- all = append(all, s.Name)
+ if dt != nil {
+ for _, s := range st.Syms[dt.Iundefsym : dt.Iundefsym+dt.Nundefsym] {
+ all = append(all, s.Name)
+ }
+ } else {
+ // From Darwin's include/mach-o/nlist.h
+ const (
+ N_TYPE = 0x0e
+ N_UNDF = 0x0
+ )
+ for _, s := range st.Syms {
+ if s.Type&N_TYPE == N_UNDF && s.Sect == 0 {
+ all = append(all, s.Name)
+ }
+ }
}
return all, nil
}
diff --git a/src/debug/macho/file_test.go b/src/debug/macho/file_test.go
index 313c376c54a27f..fbcc7bdcb01e96 100644
--- a/src/debug/macho/file_test.go
+++ b/src/debug/macho/file_test.go
@@ -9,15 +9,17 @@ import (
"internal/obscuretestdata"
"io"
"reflect"
+ "slices"
"testing"
)
type fileTest struct {
- file string
- hdr FileHeader
- loads []any
- sections []*SectionHeader
- relocations map[string][]Reloc
+ file string
+ hdr FileHeader
+ loads []any
+ sections []*SectionHeader
+ relocations map[string][]Reloc
+ importedSyms []string
}
var fileTests = []fileTest{
@@ -46,6 +48,7 @@ var fileTests = []fileTest{
{"__jump_table", "__IMPORT", 0x3000, 0xa, 0x2000, 0x6, 0x0, 0x0, 0x4000008},
},
nil,
+ nil,
},
{
"testdata/gcc-amd64-darwin-exec.base64",
@@ -74,6 +77,7 @@ var fileTests = []fileTest{
{"__la_symbol_ptr", "__DATA", 0x100001058, 0x10, 0x1058, 0x2, 0x0, 0x0, 0x7},
},
nil,
+ nil,
},
{
"testdata/gcc-amd64-darwin-exec-debug.base64",
@@ -102,6 +106,7 @@ var fileTests = []fileTest{
{"__debug_str", "__DWARF", 0x10000215c, 0x60, 0x115c, 0x0, 0x0, 0x0, 0x0},
},
nil,
+ nil,
},
{
"testdata/clang-386-darwin-exec-with-rpath.base64",
@@ -126,6 +131,7 @@ var fileTests = []fileTest{
},
nil,
nil,
+ nil,
},
{
"testdata/clang-amd64-darwin-exec-with-rpath.base64",
@@ -150,6 +156,7 @@ var fileTests = []fileTest{
},
nil,
nil,
+ nil,
},
{
"testdata/clang-386-darwin.obj.base64",
@@ -185,6 +192,7 @@ var fileTests = []fileTest{
},
},
},
+ nil,
},
{
"testdata/clang-amd64-darwin.obj.base64",
@@ -221,6 +229,15 @@ var fileTests = []fileTest{
},
},
},
+ []string{"_printf"},
+ },
+ {
+ "testdata/clang-amd64-darwin-ld-r.obj.base64",
+ FileHeader{0xfeedfacf, CpuAmd64, 0x3, 0x1, 0x4, 0x1c0, 0x2000},
+ nil,
+ nil,
+ nil,
+ []string{"_printf"},
},
}
@@ -345,6 +362,17 @@ func TestOpen(t *testing.T) {
}
}
}
+
+ if tt.importedSyms != nil {
+ ss, err := f.ImportedSymbols()
+ if err != nil {
+ t.Errorf("open %s: fail to read imported symbols: %v", tt.file, err)
+ }
+ want := tt.importedSyms
+ if !slices.Equal(ss, want) {
+ t.Errorf("open %s: imported symbols differ:\n\thave %v\n\twant %v", tt.file, ss, want)
+ }
+ }
}
}
diff --git a/src/debug/macho/testdata/clang-amd64-darwin-ld-r.obj.base64 b/src/debug/macho/testdata/clang-amd64-darwin-ld-r.obj.base64
new file mode 100644
index 00000000000000..036b5746abe351
--- /dev/null
+++ b/src/debug/macho/testdata/clang-amd64-darwin-ld-r.obj.base64
@@ -0,0 +1 @@
+z/rt/gcAAAEDAAAAAQAAAAQAAADAAQAAACAAAAAAAAAZAAAAiAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJgAAAAAAAAAAAIAAAAAAACYAAAAAAAAAAcAAAAHAAAABAAAAAAAAABfX3RleHQAAAAAAAAAAAAAX19URVhUAAAAAAAAAAAAAAAAAAAAAAAAKgAAAAAAAAAAAgAABAAAAJgCAAACAAAAAAQAgAAAAAAAAAAAAAAAAF9fY3N0cmluZwAAAAAAAABfX1RFWFQAAAAAAAAAAAAAKgAAAAAAAAAOAAAAAAAAACoCAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAX19laF9mcmFtZQAAAAAAAF9fVEVYVAAAAAAAAAAAAAA4AAAAAAAAAEAAAAAAAAAAOAIAAAMAAACoAgAABAAAAAAAAAAAAAAAAAAAAAAAAABfX2NvbXBhY3RfdW53aW5kX19MRAAAAAAAAAAAAAAAAHgAAAAAAAAAIAAAAAAAAAB4AgAAAwAAAMgCAAABAAAAAAAAAgAAAAAAAAAAAAAAAAIAAAAYAAAA0AIAAAUAAAAgAwAAKAAAACQAAAAQAAAAAAwKAAAAAAApAAAAEAAAANACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABVSInlSIPsEEiNPQAAAADHRfwAAAAAsADoAAAAADHJiUX4ichIg8QQXcNoZWxsbywgd29ybGQKABQAAAAAAAAAAXpSAAF4EAEQDAcIkAEAACQAAAAEAAAA+P////////8qAAAAAAAAAABBDhCGAkMNBgAAAAAAAAAAAAAAAAAAACoAAAAAAAABAAAAAAAAAAAAAAAAAAAAABkAAAAEAAAtCwAAAAAAAB0cAAAAAQAAXBwAAAACAAAMIAAAAAIAAF4gAAAAAwAADgAAAAADAAAOEAAAAB4CAAAqAAAAAAAAABQAAAAOAwAAOAAAAAAAAAAeAAAADgMAAFAAAAAAAAAAAgAAAA8BAAAAAAAAAAAAAAgAAAABAAAAAAAAAAAAAAAgAF9tYWluAF9wcmludGYATEMxAEVIX0ZyYW1lMQBmdW5jLmVoAAAA
diff --git a/src/encoding/gob/doc.go b/src/encoding/gob/doc.go
index 0866ba1544666d..c746806887ab3d 100644
--- a/src/encoding/gob/doc.go
+++ b/src/encoding/gob/doc.go
@@ -274,7 +274,7 @@ released version, subject to issues such as security fixes. See the Go compatibi
document for background: https://golang.org/doc/go1compat
See "Gobs of data" for a design discussion of the gob wire format:
-https://blog.golang.org/gobs-of-data
+https://go.dev/blog/gob
# Security
diff --git a/src/encoding/json/scanner_test.go b/src/encoding/json/scanner_test.go
index fb64463599625e..a062e91243e477 100644
--- a/src/encoding/json/scanner_test.go
+++ b/src/encoding/json/scanner_test.go
@@ -74,6 +74,7 @@ func TestCompactAndIndent(t *testing.T) {
-5e+2
]`},
{Name(""), "{\"\":\"<>&\u2028\u2029\"}", "{\n\t\"\": \"<>&\u2028\u2029\"\n}"}, // See golang.org/issue/34070
+ {Name(""), `null`, "null \n\r\t"}, // See golang.org/issue/13520 and golang.org/issue/74806
}
var buf bytes.Buffer
for _, tt := range tests {
@@ -102,7 +103,7 @@ func TestCompactAndIndent(t *testing.T) {
buf.Reset()
if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
t.Errorf("%s: Indent error: %v", tt.Where, err)
- } else if got := buf.String(); got != tt.indent {
+ } else if got := buf.String(); got != strings.TrimRight(tt.indent, " \n\r\t") {
t.Errorf("%s: Compact:\n\tgot: %s\n\twant: %s", tt.Where, indentNewlines(got), indentNewlines(tt.indent))
}
})
diff --git a/src/encoding/json/v2/arshal.go b/src/encoding/json/v2/arshal.go
index e2ce778d5ad96c..6b4bcb0c74cf7c 100644
--- a/src/encoding/json/v2/arshal.go
+++ b/src/encoding/json/v2/arshal.go
@@ -470,7 +470,7 @@ func unmarshalDecode(in *jsontext.Decoder, out any, uo *jsonopts.Struct, last bo
// was validated before attempting to unmarshal it.
if uo.Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
if err := export.Decoder(in).CheckNextValue(last); err != nil {
- if err == io.EOF {
+ if err == io.EOF && last {
offset := in.InputOffset() + int64(len(in.UnreadBuffer()))
return &jsontext.SyntacticError{ByteOffset: offset, Err: io.ErrUnexpectedEOF}
}
@@ -487,7 +487,7 @@ func unmarshalDecode(in *jsontext.Decoder, out any, uo *jsonopts.Struct, last bo
if !uo.Flags.Get(jsonflags.AllowDuplicateNames) {
export.Decoder(in).Tokens.InvalidateDisabledNamespaces()
}
- if err == io.EOF {
+ if err == io.EOF && last {
offset := in.InputOffset() + int64(len(in.UnreadBuffer()))
return &jsontext.SyntacticError{ByteOffset: offset, Err: io.ErrUnexpectedEOF}
}
diff --git a/src/encoding/json/v2/arshal_test.go b/src/encoding/json/v2/arshal_test.go
index f1ee2e2e3a7365..764ce690078a87 100644
--- a/src/encoding/json/v2/arshal_test.go
+++ b/src/encoding/json/v2/arshal_test.go
@@ -9413,6 +9413,51 @@ func TestUnmarshalDecodeOptions(t *testing.T) {
}
}
+func TestUnmarshalDecodeStream(t *testing.T) {
+ tests := []struct {
+ in string
+ want []any
+ err error
+ }{
+ {in: ``, err: io.EOF},
+ {in: `{`, err: &jsontext.SyntacticError{ByteOffset: len64(`{`), Err: io.ErrUnexpectedEOF}},
+ {in: `{"`, err: &jsontext.SyntacticError{ByteOffset: len64(`{"`), Err: io.ErrUnexpectedEOF}},
+ {in: `{"k"`, err: &jsontext.SyntacticError{ByteOffset: len64(`{"k"`), JSONPointer: "/k", Err: io.ErrUnexpectedEOF}},
+ {in: `{"k":`, err: &jsontext.SyntacticError{ByteOffset: len64(`{"k":`), JSONPointer: "/k", Err: io.ErrUnexpectedEOF}},
+ {in: `{"k",`, err: &jsontext.SyntacticError{ByteOffset: len64(`{"k"`), JSONPointer: "/k", Err: jsonwire.NewInvalidCharacterError(",", "after object name (expecting ':')")}},
+ {in: `{"k"}`, err: &jsontext.SyntacticError{ByteOffset: len64(`{"k"`), JSONPointer: "/k", Err: jsonwire.NewInvalidCharacterError("}", "after object name (expecting ':')")}},
+ {in: `[`, err: &jsontext.SyntacticError{ByteOffset: len64(`[`), Err: io.ErrUnexpectedEOF}},
+ {in: `[0`, err: &jsontext.SyntacticError{ByteOffset: len64(`[0`), Err: io.ErrUnexpectedEOF}},
+ {in: ` [0`, err: &jsontext.SyntacticError{ByteOffset: len64(` [0`), Err: io.ErrUnexpectedEOF}},
+ {in: `[0.`, err: &jsontext.SyntacticError{ByteOffset: len64(`[`), JSONPointer: "/0", Err: io.ErrUnexpectedEOF}},
+ {in: `[0. `, err: &jsontext.SyntacticError{ByteOffset: len64(`[0.`), JSONPointer: "/0", Err: jsonwire.NewInvalidCharacterError(" ", "in number (expecting digit)")}},
+ {in: `[0,`, err: &jsontext.SyntacticError{ByteOffset: len64(`[0,`), Err: io.ErrUnexpectedEOF}},
+ {in: `[0:`, err: &jsontext.SyntacticError{ByteOffset: len64(`[0`), Err: jsonwire.NewInvalidCharacterError(":", "after array element (expecting ',' or ']')")}},
+ {in: `n`, err: &jsontext.SyntacticError{ByteOffset: len64(`n`), Err: io.ErrUnexpectedEOF}},
+ {in: `nul`, err: &jsontext.SyntacticError{ByteOffset: len64(`nul`), Err: io.ErrUnexpectedEOF}},
+ {in: `fal `, err: &jsontext.SyntacticError{ByteOffset: len64(`fal`), Err: jsonwire.NewInvalidCharacterError(" ", "in literal false (expecting 's')")}},
+ {in: `false`, want: []any{false}, err: io.EOF},
+ {in: `false0.0[]null`, want: []any{false, 0.0, []any{}, nil}, err: io.EOF},
+ }
+ for _, tt := range tests {
+ d := jsontext.NewDecoder(strings.NewReader(tt.in))
+ var got []any
+ for {
+ var v any
+ if err := UnmarshalDecode(d, &v); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("`%s`: UnmarshalDecode error = %v, want %v", tt.in, err, tt.err)
+ }
+ break
+ }
+ got = append(got, v)
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("`%s`: UnmarshalDecode = %v, want %v", tt.in, got, tt.want)
+ }
+ }
+}
+
// BenchmarkUnmarshalDecodeOptions is a minimal decode operation to measure
// the overhead options setup before the unmarshal operation.
func BenchmarkUnmarshalDecodeOptions(b *testing.B) {
diff --git a/src/encoding/json/v2_indent.go b/src/encoding/json/v2_indent.go
index 2655942b128b32..b2e8518471ba67 100644
--- a/src/encoding/json/v2_indent.go
+++ b/src/encoding/json/v2_indent.go
@@ -88,17 +88,8 @@ func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
}
func appendIndent(dst, src []byte, prefix, indent string) ([]byte, error) {
- // In v2, trailing whitespace is discarded, while v1 preserved it.
- dstLen := len(dst)
- if n := len(src) - len(bytes.TrimRight(src, " \n\r\t")); n > 0 {
- // Append the trailing whitespace afterwards.
- defer func() {
- if len(dst) > dstLen {
- dst = append(dst, src[len(src)-n:]...)
- }
- }()
- }
// In v2, only spaces and tabs are allowed, while v1 allowed any character.
+ dstLen := len(dst)
if len(strings.Trim(prefix, " \t"))+len(strings.Trim(indent, " \t")) > 0 {
// Use placeholder spaces of correct length, and replace afterwards.
invalidPrefix, invalidIndent := prefix, indent
@@ -129,5 +120,10 @@ func appendIndent(dst, src []byte, prefix, indent string) ([]byte, error) {
if err != nil {
return dst[:dstLen], transformSyntacticError(err)
}
+
+ // In v2, trailing whitespace is discarded, while v1 preserved it.
+ if n := len(src) - len(bytes.TrimRight(src, " \n\r\t")); n > 0 {
+ dst = append(dst, src[len(src)-n:]...)
+ }
return dst, nil
}
diff --git a/src/encoding/json/v2_scanner_test.go b/src/encoding/json/v2_scanner_test.go
index bec55212745b01..8885520e6d890a 100644
--- a/src/encoding/json/v2_scanner_test.go
+++ b/src/encoding/json/v2_scanner_test.go
@@ -74,6 +74,7 @@ func TestCompactAndIndent(t *testing.T) {
-5e+2
]`},
{Name(""), "{\"\":\"<>&\u2028\u2029\"}", "{\n\t\"\": \"<>&\u2028\u2029\"\n}"}, // See golang.org/issue/34070
+ {Name(""), `null`, "null \n\r\t"}, // See golang.org/issue/13520 and golang.org/issue/74806
}
var buf bytes.Buffer
for _, tt := range tests {
@@ -102,7 +103,7 @@ func TestCompactAndIndent(t *testing.T) {
buf.Reset()
if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
t.Errorf("%s: Indent error: %v", tt.Where, err)
- } else if got := buf.String(); got != tt.indent {
+ } else if got := buf.String(); got != strings.TrimRight(tt.indent, " \n\r\t") {
t.Errorf("%s: Compact:\n\tgot: %s\n\twant: %s", tt.Where, indentNewlines(got), indentNewlines(tt.indent))
}
})
diff --git a/src/go/parser/error_test.go b/src/go/parser/error_test.go
index a4e17dd6dbff4f..252325659cb752 100644
--- a/src/go/parser/error_test.go
+++ b/src/go/parser/error_test.go
@@ -88,7 +88,7 @@ func expectedErrors(fset *token.FileSet, filename string, src []byte) map[token.
s := errRx.FindStringSubmatch(lit)
if len(s) == 3 {
if s[1] == "HERE" {
- pos = here // start of comment
+ pos = here // position right after the previous token prior to comment
} else if s[1] == "AFTER" {
pos += token.Pos(len(lit)) // end of comment
} else {
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
index 8a2f95976fc390..9ee1576a99e85d 100644
--- a/src/go/parser/parser.go
+++ b/src/go/parser/parser.go
@@ -455,25 +455,6 @@ var exprEnd = map[token.Token]bool{
token.RBRACE: true,
}
-// safePos returns a valid file position for a given position: If pos
-// is valid to begin with, safePos returns pos. If pos is out-of-range,
-// safePos returns the EOF position.
-//
-// This is hack to work around "artificial" end positions in the AST which
-// are computed by adding 1 to (presumably valid) token positions. If the
-// token positions are invalid due to parse errors, the resulting end position
-// may be past the file's EOF position, which would lead to panics if used
-// later on.
-func (p *parser) safePos(pos token.Pos) (res token.Pos) {
- defer func() {
- if recover() != nil {
- res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
- }
- }()
- _ = p.file.Offset(pos) // trigger a panic if position is out-of-range
- return pos
-}
-
// ----------------------------------------------------------------------------
// Identifiers
@@ -2022,7 +2003,7 @@ func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
}
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
- p.error(p.safePos(x.End()), fmt.Sprintf("expression in %s must be function call", callType))
+ p.error(x.End(), fmt.Sprintf("expression in %s must be function call", callType))
}
return nil
}
@@ -2100,7 +2081,7 @@ func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
found = "assignment"
}
p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
- return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
+ return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
// parseIfHeader is an adjusted version of parser.header
@@ -2423,7 +2404,7 @@ func (p *parser) parseForStmt() ast.Stmt {
key, value = as.Lhs[0], as.Lhs[1]
default:
p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
- return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
+ return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
diff --git a/src/go/types/check.go b/src/go/types/check.go
index e4e8e95c9974b9..c9753280bf8685 100644
--- a/src/go/types/check.go
+++ b/src/go/types/check.go
@@ -25,7 +25,7 @@ var noposn = atPos(nopos)
const debug = false // leave on during development
// position tracing for panics during type checking
-const tracePos = false // TODO(markfreeman): check performance implications
+const tracePos = true
// gotypesalias controls the use of Alias types.
// As of Apr 16 2024 they are used by default.
diff --git a/src/internal/bytealg/bytealg.go b/src/internal/bytealg/bytealg.go
index 711df74baf14cb..319ea54ba3c77f 100644
--- a/src/internal/bytealg/bytealg.go
+++ b/src/internal/bytealg/bytealg.go
@@ -11,16 +11,18 @@ import (
// Offsets into internal/cpu records for use in assembly.
const (
- offsetX86HasSSE42 = unsafe.Offsetof(cpu.X86.HasSSE42)
- offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
- offsetX86HasPOPCNT = unsafe.Offsetof(cpu.X86.HasPOPCNT)
+ offsetPPC64HasPOWER9 = unsafe.Offsetof(cpu.PPC64.IsPOWER9)
+
+ offsetRISCV64HasV = unsafe.Offsetof(cpu.RISCV64.HasV)
offsetLOONG64HasLSX = unsafe.Offsetof(cpu.Loong64.HasLSX)
offsetLOONG64HasLASX = unsafe.Offsetof(cpu.Loong64.HasLASX)
offsetS390xHasVX = unsafe.Offsetof(cpu.S390X.HasVX)
- offsetPPC64HasPOWER9 = unsafe.Offsetof(cpu.PPC64.IsPOWER9)
+ offsetX86HasSSE42 = unsafe.Offsetof(cpu.X86.HasSSE42)
+ offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
+ offsetX86HasPOPCNT = unsafe.Offsetof(cpu.X86.HasPOPCNT)
)
// MaxLen is the maximum length of the string to be searched for (argument b) in Index.
diff --git a/src/internal/bytealg/compare_riscv64.s b/src/internal/bytealg/compare_riscv64.s
index 6388fcd2095dda..3b1523dfbf7f3b 100644
--- a/src/internal/bytealg/compare_riscv64.s
+++ b/src/internal/bytealg/compare_riscv64.s
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+#include "asm_riscv64.h"
#include "go_asm.h"
#include "textflag.h"
@@ -35,6 +36,46 @@ TEXT compare<>(SB),NOSPLIT|NOFRAME,$0
MIN X11, X13, X5
BEQZ X5, cmp_len
+ MOV $16, X6
+ BLT X5, X6, check8_unaligned
+
+#ifndef hasV
+ MOVB internal∕cpu·RISCV64+const_offsetRISCV64HasV(SB), X6
+ BEQZ X6, compare_scalar
+#endif
+
+ // Use vector if not 8 byte aligned.
+ OR X10, X12, X6
+ AND $7, X6
+ BNEZ X6, vector_loop
+
+ // Use scalar if 8 byte aligned and <= 128 bytes.
+ SUB $128, X5, X6
+ BLEZ X6, compare_scalar_aligned
+
+ PCALIGN $16
+vector_loop:
+ VSETVLI X5, E8, M8, TA, MA, X6
+ VLE8V (X10), V8
+ VLE8V (X12), V16
+ VMSNEVV V8, V16, V0
+ VFIRSTM V0, X7
+ BGEZ X7, vector_not_eq
+ ADD X6, X10
+ ADD X6, X12
+ SUB X6, X5
+ BNEZ X5, vector_loop
+ JMP cmp_len
+
+vector_not_eq:
+ // Load first differing bytes in X8/X9.
+ ADD X7, X10
+ ADD X7, X12
+ MOVBU (X10), X8
+ MOVBU (X12), X9
+ JMP cmp
+
+compare_scalar:
MOV $32, X6
BLT X5, X6, check8_unaligned
@@ -57,9 +98,9 @@ align:
ADD $1, X12
BNEZ X7, align
-check32:
- // X6 contains $32
- BLT X5, X6, compare16
+compare_scalar_aligned:
+ MOV $32, X6
+ BLT X5, X6, check16
compare32:
MOV 0(X10), X15
MOV 0(X12), X16
diff --git a/src/internal/bytealg/equal_riscv64.s b/src/internal/bytealg/equal_riscv64.s
index 87b2d79302dc6a..58e033f8479b69 100644
--- a/src/internal/bytealg/equal_riscv64.s
+++ b/src/internal/bytealg/equal_riscv64.s
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+#include "asm_riscv64.h"
#include "go_asm.h"
#include "textflag.h"
@@ -28,6 +29,35 @@ length_check:
MOV $32, X23
BLT X12, X23, loop4_check
+#ifndef hasV
+ MOVB internal∕cpu·RISCV64+const_offsetRISCV64HasV(SB), X5
+ BEQZ X5, equal_scalar
+#endif
+
+ // Use vector if not 8 byte aligned.
+ OR X10, X11, X5
+ AND $7, X5
+ BNEZ X5, vector_loop
+
+ // Use scalar if 8 byte aligned and <= 64 bytes.
+ SUB $64, X12, X6
+ BLEZ X6, loop32_check
+
+ PCALIGN $16
+vector_loop:
+ VSETVLI X12, E8, M8, TA, MA, X5
+ VLE8V (X10), V8
+ VLE8V (X11), V16
+ VMSNEVV V8, V16, V0
+ VFIRSTM V0, X6
+ BGEZ X6, done
+ ADD X5, X10
+ ADD X5, X11
+ SUB X5, X12
+ BNEZ X12, vector_loop
+ JMP done
+
+equal_scalar:
// Check alignment - if alignment differs we have to do one byte at a time.
AND $7, X10, X9
AND $7, X11, X19
diff --git a/src/internal/bytealg/index_generic.go b/src/internal/bytealg/index_generic.go
index a59e32938e76ec..643bb59ab1edbb 100644
--- a/src/internal/bytealg/index_generic.go
+++ b/src/internal/bytealg/index_generic.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !amd64 && !arm64 && !s390x && !ppc64le && !ppc64
+//go:build !amd64 && !arm64 && !loong64 && !s390x && !ppc64le && !ppc64
package bytealg
diff --git a/src/internal/bytealg/index_loong64.go b/src/internal/bytealg/index_loong64.go
new file mode 100644
index 00000000000000..ad574d66faee18
--- /dev/null
+++ b/src/internal/bytealg/index_loong64.go
@@ -0,0 +1,30 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytealg
+
+import "internal/cpu"
+
+// Empirical data shows that using Index can get better
+// performance when len(s) <= 16.
+const MaxBruteForce = 16
+
+func init() {
+ // If SIMD is supported, optimize the cases where the substring length is less than 64 bytes,
+ // otherwise, cases the length less than 32 bytes is optimized.
+ if cpu.Loong64.HasLASX || cpu.Loong64.HasLSX {
+ MaxLen = 64
+ } else {
+ MaxLen = 32
+ }
+}
+
+// Cutover reports the number of failures of IndexByte we should tolerate
+// before switching over to Index.
+// n is the number of bytes processed so far.
+// See the bytes.Index implementation for details.
+func Cutover(n int) int {
+ // 1 error per 8 characters, plus a few slop to start.
+ return (n + 16) / 8
+}
diff --git a/src/internal/bytealg/index_loong64.s b/src/internal/bytealg/index_loong64.s
new file mode 100644
index 00000000000000..1016db738dee1b
--- /dev/null
+++ b/src/internal/bytealg/index_loong64.s
@@ -0,0 +1,303 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Index(SB),NOSPLIT,$0-56
+ MOVV R7, R6 // R6 = separator pointer
+ MOVV R8, R7 // R7 = separator length
+ JMP indexbody<>(SB)
+
+TEXT ·IndexString(SB),NOSPLIT,$0-40
+ JMP indexbody<>(SB)
+
+// input:
+// R4 = string
+// R5 = length
+// R6 = separator pointer
+// R7 = separator length (2 <= len <= 64)
+TEXT indexbody<>(SB),NOSPLIT,$0
+ // main idea is to load 'sep' into separate register(s)
+ // to avoid repeatedly re-load it again and again
+ // for sebsequent substring comparisons
+ SUBV R7, R5, R8
+ ADDV R4, R8 // R8 contains the start of last substring for comparison
+ ADDV $1, R4, R9 // store base for later
+
+ MOVV $8, R5
+ BGE R7, R5, len_gt_or_eq_8
+len_2_7:
+ AND $0x4, R7, R5
+ BNE R5, len_4_7
+
+len_2_3:
+ AND $0x1, R7, R5
+ BNE R5, len_3
+
+len_2:
+ MOVHU (R6), R10
+loop_2:
+ BLT R8, R4, not_found
+ MOVHU (R4), R11
+ ADDV $1, R4
+ BNE R10, R11, loop_2
+ JMP found
+
+len_3:
+ MOVHU (R6), R10
+ MOVBU 2(R6), R11
+loop_3:
+ BLT R8, R4, not_found
+ MOVHU (R4), R12
+ ADDV $1, R4
+ BNE R10, R12, loop_3
+ MOVBU 1(R4), R13
+ BNE R11, R13, loop_3
+ JMP found
+
+len_4_7:
+ AND $0x2, R7, R5
+ BNE R5, len_6_7
+ AND $0x1, R7, R5
+ BNE R5, len_5
+len_4:
+ MOVWU (R6), R10
+loop_4:
+ BLT R8, R4, not_found
+ MOVWU (R4), R11
+ ADDV $1, R4
+ BNE R10, R11, loop_4
+ JMP found
+
+len_5:
+ MOVWU (R6), R10
+ MOVBU 4(R6), R11
+loop_5:
+ BLT R8, R4, not_found
+ MOVWU (R4), R12
+ ADDV $1, R4
+ BNE R10, R12, loop_5
+ MOVBU 3(R4), R13
+ BNE R11, R13, loop_5
+ JMP found
+
+len_6_7:
+ AND $0x1, R7, R5
+ BNE R5, len_7
+len_6:
+ MOVWU (R6), R10
+ MOVHU 4(R6), R11
+loop_6:
+ BLT R8, R4, not_found
+ MOVWU (R4), R12
+ ADDV $1, R4
+ BNE R10, R12, loop_6
+ MOVHU 3(R4), R13
+ BNE R11, R13, loop_6
+ JMP found
+
+len_7:
+ MOVWU (R6), R10
+ MOVWU 3(R6), R11
+loop_7:
+ BLT R8, R4, not_found
+ MOVWU (R4), R12
+ ADDV $1, R4
+ BNE R10, R12, loop_7
+ MOVWU 2(R4), R13
+ BNE R11, R13, loop_7
+ JMP found
+
+len_gt_or_eq_8:
+ BEQ R5, R7, len_8
+ MOVV $17, R5
+ BGE R7, R5, len_gt_or_eq_17
+ JMP len_9_16
+len_8:
+ MOVV (R6), R10
+loop_8:
+ BLT R8, R4, not_found
+ MOVV (R4), R11
+ ADDV $1, R4
+ BNE R10, R11, loop_8
+ JMP found
+
+len_9_16:
+ MOVV (R6), R10
+ SUBV $8, R7
+ MOVV (R6)(R7), R11
+ SUBV $1, R7
+loop_9_16:
+ BLT R8, R4, not_found
+ MOVV (R4), R12
+ ADDV $1, R4
+ BNE R10, R12, loop_9_16
+ MOVV (R4)(R7), R13
+ BNE R11, R13, loop_9_16
+ JMP found
+
+len_gt_or_eq_17:
+ MOVV $25, R5
+ BGE R7, R5, len_gt_or_eq_25
+len_17_24:
+ MOVV 0(R6), R10
+ MOVV 8(R6), R11
+ SUBV $8, R7
+ MOVV (R6)(R7), R12
+ SUBV $1, R7
+loop_17_24:
+ BLT R8, R4, not_found
+ MOVV (R4), R13
+ ADDV $1, R4
+ BNE R10, R13, loop_17_24
+ MOVV 7(R4), R14
+ BNE R11, R14, loop_17_24
+ MOVV (R4)(R7), R15
+ BNE R12, R15, loop_17_24
+ JMP found
+
+len_gt_or_eq_25:
+ MOVV $33, R5
+ BGE R7, R5, len_gt_or_eq_33
+ MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLSX(SB), R10
+ BNE R10, lsx_len_25_32
+len_25_32:
+ MOVV 0(R6), R10
+ MOVV 8(R6), R11
+ MOVV 16(R6), R12
+ SUBV $8, R7
+ MOVV (R6)(R7), R13
+ SUBV $1, R7
+loop_25_32:
+ BLT R8, R4, not_found
+ MOVV (R4), R14
+ ADDV $1, R4
+ BNE R10, R14, loop_25_32
+ MOVV 7(R4), R15
+ BNE R11, R15, loop_25_32
+ MOVV 15(R4), R16
+ BNE R12, R16, loop_25_32
+ MOVV (R4)(R7), R17
+ BNE R13, R17, loop_25_32
+ JMP found
+
+ // On loong64, LSX is included if LASX is supported.
+lasx_len_25_32:
+lsx_len_25_32:
+ VMOVQ 0(R6), V0
+ SUBV $16, R7
+ VMOVQ (R6)(R7), V1
+ SUBV $1, R7
+lsx_loop_25_32:
+ BLT R8, R4, not_found
+ VMOVQ (R4), V2
+ ADDV $1, R4
+ VSEQV V0, V2, V2
+ VSETANYEQV V2, FCC0
+ BFPT FCC0, lsx_loop_25_32
+
+ VMOVQ (R4)(R7), V3
+ VSEQV V1, V3, V3
+ VSETANYEQV V3, FCC1
+ BFPT FCC1, lsx_loop_25_32
+ JMP found
+
+len_gt_or_eq_33:
+ MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLASX(SB), R10
+ MOVV $49, R5
+ BGE R7, R5, len_gt_or_eq_49
+len_33_48:
+ BNE R10, lasx_len_33_48
+ JMP lsx_len_33_48
+
+len_gt_or_eq_49:
+len_49_64:
+ BNE R10, lasx_len_49_64
+ JMP lsx_len_49_64
+
+lsx_len_33_48:
+ VMOVQ 0(R6), V0
+ VMOVQ 16(R6), V1
+ SUBV $16, R7
+ VMOVQ (R6)(R7), V2
+ SUBV $1, R7
+lsx_loop_33_48:
+ BLT R8, R4, not_found
+ VMOVQ 0(R4), V3
+ ADDV $1, R4
+ VSEQV V0, V3, V3
+ VSETANYEQV V3, FCC0
+ BFPT FCC0, lsx_loop_33_48
+
+ VMOVQ 15(R4), V4
+ VSEQV V1, V4, V4
+ VSETANYEQV V4, FCC1
+ BFPT FCC1, lsx_loop_33_48
+
+ VMOVQ (R4)(R7), V5
+ VSEQV V2, V5, V5
+ VSETANYEQV V5, FCC2
+ BFPT FCC2, lsx_loop_33_48
+ JMP found
+
+lsx_len_49_64:
+ VMOVQ 0(R6), V0
+ VMOVQ 16(R6), V1
+ VMOVQ 32(R6), V2
+ SUBV $16, R7
+ VMOVQ (R6)(R7), V3
+ SUBV $1, R7
+lsx_loop_49_64:
+ BLT R8, R4, not_found
+ VMOVQ 0(R4), V4
+ ADDV $1, R4
+ VSEQV V0, V4, V4
+ VSETANYEQV V4, FCC0
+ BFPT FCC0, lsx_loop_49_64
+
+ VMOVQ 15(R4), V5
+ VSEQV V1, V5, V5
+ VSETANYEQV V5, FCC1
+ BFPT FCC1, lsx_loop_49_64
+
+ VMOVQ 31(R4), V6
+ VSEQV V2, V6, V6
+ VSETANYEQV V6, FCC2
+ BFPT FCC2, lsx_loop_49_64
+
+ VMOVQ (R4)(R7), V7
+ VSEQV V3, V7, V7
+ VSETANYEQV V7, FCC3
+ BFPT FCC3, lsx_loop_49_64
+ JMP found
+
+lasx_len_33_48:
+lasx_len_49_64:
+lasx_len_33_64:
+ XVMOVQ (R6), X0
+ SUBV $32, R7
+ XVMOVQ (R6)(R7), X1
+ SUBV $1, R7
+lasx_loop_33_64:
+ BLT R8, R4, not_found
+ XVMOVQ (R4), X2
+ ADDV $1, R4
+ XVSEQV X0, X2, X3
+ XVSETANYEQV X3, FCC0
+ BFPT FCC0, lasx_loop_33_64
+
+ XVMOVQ (R4)(R7), X4
+ XVSEQV X1, X4, X5
+ XVSETANYEQV X5, FCC1
+ BFPT FCC1, lasx_loop_33_64
+ JMP found
+
+found:
+ SUBV R9, R4
+ RET
+
+not_found:
+ MOVV $-1, R4
+ RET
diff --git a/src/internal/bytealg/index_native.go b/src/internal/bytealg/index_native.go
index 59c93f9d126b90..f917c7a92adbf1 100644
--- a/src/internal/bytealg/index_native.go
+++ b/src/internal/bytealg/index_native.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build amd64 || arm64 || s390x || ppc64le || ppc64
+//go:build amd64 || arm64 || loong64 || s390x || ppc64le || ppc64
package bytealg
diff --git a/src/internal/bytealg/indexbyte_riscv64.s b/src/internal/bytealg/indexbyte_riscv64.s
index fde00da0eac7d9..527ae6d35ed55b 100644
--- a/src/internal/bytealg/indexbyte_riscv64.s
+++ b/src/internal/bytealg/indexbyte_riscv64.s
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+#include "asm_riscv64.h"
#include "go_asm.h"
#include "textflag.h"
@@ -11,12 +12,14 @@ TEXT ·IndexByte(SB),NOSPLIT,$0-40
// X12 = b_cap (unused)
// X13 = byte to find
AND $0xff, X13, X12 // x12 byte to look for
- MOV X10, X13 // store base for later
SLTI $24, X11, X14
- ADD X10, X11 // end
- BEQZ X14, bigBody
+ BNEZ X14, small
+ JMP indexByteBig<>(SB)
+small:
+ MOV X10, X13 // store base for later
+ ADD X10, X11 // end
SUB $1, X10
loop:
ADD $1, X10
@@ -31,21 +34,19 @@ notfound:
MOV $-1, X10
RET
-bigBody:
- JMP indexByteBig<>(SB)
-
TEXT ·IndexByteString(SB),NOSPLIT,$0-32
// X10 = b_base
// X11 = b_len
// X12 = byte to find
-
AND $0xff, X12 // x12 byte to look for
- MOV X10, X13 // store base for later
SLTI $24, X11, X14
- ADD X10, X11 // end
- BEQZ X14, bigBody
+ BNEZ X14, small
+ JMP indexByteBig<>(SB)
+small:
+ MOV X10, X13 // store base for later
+ ADD X10, X11 // end
SUB $1, X10
loop:
ADD $1, X10
@@ -60,20 +61,41 @@ notfound:
MOV $-1, X10
RET
-bigBody:
- JMP indexByteBig<>(SB)
-
TEXT indexByteBig<>(SB),NOSPLIT|NOFRAME,$0
- // On entry
+ // On entry:
// X10 = b_base
- // X11 = end
+ // X11 = b_len (at least 16 bytes)
// X12 = byte to find
- // X13 = b_base
- // X11 is at least 16 bytes > X10
-
- // On exit
+ // On exit:
// X10 = index of first instance of sought byte, if found, or -1 otherwise
+ MOV X10, X13 // store base for later
+
+#ifndef hasV
+ MOVB internal∕cpu·RISCV64+const_offsetRISCV64HasV(SB), X5
+ BEQZ X5, indexbyte_scalar
+#endif
+
+ PCALIGN $16
+vector_loop:
+ VSETVLI X11, E8, M8, TA, MA, X5
+ VLE8V (X10), V8
+ VMSEQVX X12, V8, V0
+ VFIRSTM V0, X6
+ BGEZ X6, vector_found
+ ADD X5, X10
+ SUB X5, X11
+ BNEZ X11, vector_loop
+ JMP notfound
+
+vector_found:
+ SUB X13, X10
+ ADD X6, X10
+ RET
+
+indexbyte_scalar:
+ ADD X10, X11 // end
+
// Process the first few bytes until we get to an 8 byte boundary
// No need to check for end here as we have at least 16 bytes in
// the buffer.
diff --git a/src/internal/chacha8rand/chacha8_loong64.s b/src/internal/chacha8rand/chacha8_loong64.s
index caa1426a054967..5e6857ed3a6598 100644
--- a/src/internal/chacha8rand/chacha8_loong64.s
+++ b/src/internal/chacha8rand/chacha8_loong64.s
@@ -49,35 +49,23 @@ lsx_chacha8:
MOVV $·chachaIncRot(SB), R11
// load contants
- // VLDREPL.W $0, R10, V0
- WORD $0x30200140
- // VLDREPL.W $1, R10, V1
- WORD $0x30200541
- // VLDREPL.W $2, R10, V2
- WORD $0x30200942
- // VLDREPL.W $3, R10, V3
- WORD $0x30200d43
+ VMOVQ (R10), V0.W4
+ VMOVQ 1(R10), V1.W4
+ VMOVQ 2(R10), V2.W4
+ VMOVQ 3(R10), V3.W4
// load 4-32bit data from incRotMatrix added to counter
VMOVQ (R11), V30
// load seed
- // VLDREPL.W $0, R4, V4
- WORD $0x30200084
- // VLDREPL.W $1, R4, V5
- WORD $0x30200485
- // VLDREPL.W $2, R4, V6
- WORD $0x30200886
- // VLDREPL.W $3, R4, V7
- WORD $0x30200c87
- // VLDREPL.W $4, R4, V8
- WORD $0x30201088
- // VLDREPL.W $5, R4, V9
- WORD $0x30201489
- // VLDREPL.W $6, R4, V10
- WORD $0x3020188a
- // VLDREPL.W $7, R4, V11
- WORD $0x30201c8b
+ VMOVQ (R4), V4.W4
+ VMOVQ 1(R4), V5.W4
+ VMOVQ 2(R4), V6.W4
+ VMOVQ 3(R4), V7.W4
+ VMOVQ 4(R4), V8.W4
+ VMOVQ 5(R4), V9.W4
+ VMOVQ 6(R4), V10.W4
+ VMOVQ 7(R4), V11.W4
// load counter and update counter
VMOVQ R6, V12.W4
diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go
index 6017b1acc9fe96..fca38532dc518f 100644
--- a/src/internal/cpu/cpu.go
+++ b/src/internal/cpu/cpu.go
@@ -31,9 +31,13 @@ var X86 struct {
HasADX bool
HasAVX bool
HasAVX2 bool
+ HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL
HasAVX512F bool
+ HasAVX512CD bool
HasAVX512BW bool
+ HasAVX512DQ bool
HasAVX512VL bool
+ HasAVX512VPCLMULQDQ bool
HasBMI1 bool
HasBMI2 bool
HasERMS bool
@@ -48,7 +52,6 @@ var X86 struct {
HasSSSE3 bool
HasSSE41 bool
HasSSE42 bool
- HasAVX512VPCLMULQDQ bool
_ CacheLinePad
}
@@ -161,6 +164,10 @@ var RISCV64 struct {
//go:linkname S390X
//go:linkname RISCV64
+// doDerived, if non-nil, is called after processing GODEBUG to set "derived"
+// feature flags.
+var doDerived func()
+
// Initialize examines the processor and sets the relevant variables above.
// This is called by the runtime package early in program initialization,
// before normal init functions are run. env is set by runtime if the OS supports
@@ -168,6 +175,9 @@ var RISCV64 struct {
func Initialize(env string) {
doinit()
processOptions(env)
+ if doDerived != nil {
+ doDerived()
+ }
}
// options contains the cpu debug options that can be used in GODEBUG.
diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go
index 69b9542ae2a1f5..315c26b0ddb735 100644
--- a/src/internal/cpu/cpu_x86.go
+++ b/src/internal/cpu/cpu_x86.go
@@ -36,7 +36,9 @@ const (
cpuid_BMI2 = 1 << 8
cpuid_ERMS = 1 << 9
cpuid_AVX512F = 1 << 16
+ cpuid_AVX512DQ = 1 << 17
cpuid_ADX = 1 << 19
+ cpuid_AVX512CD = 1 << 28
cpuid_SHA = 1 << 29
cpuid_AVX512BW = 1 << 30
cpuid_AVX512VL = 1 << 31
@@ -89,7 +91,9 @@ func doinit() {
// they can be turned off.
options = append(options,
option{Name: "avx512f", Feature: &X86.HasAVX512F},
+ option{Name: "avx512cd", Feature: &X86.HasAVX512CD},
option{Name: "avx512bw", Feature: &X86.HasAVX512BW},
+ option{Name: "avx512dq", Feature: &X86.HasAVX512DQ},
option{Name: "avx512vl", Feature: &X86.HasAVX512VL},
)
}
@@ -154,7 +158,9 @@ func doinit() {
X86.HasAVX512F = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512
if X86.HasAVX512F {
+ X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD)
X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW)
+ X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ)
X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL)
X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ)
}
@@ -170,6 +176,17 @@ func doinit() {
_, _, _, edxExt1 := cpuid(0x80000001, 0)
X86.HasRDTSCP = isSet(edxExt1, cpuid_RDTSCP)
+
+ doDerived = func() {
+ // Rather than carefully gating on fundamental AVX-512 features, we have
+ // a virtual "AVX512" feature that captures F+CD+BW+DQ+VL. BW, DQ, and
+ // VL have a huge effect on which AVX-512 instructions are available,
+ // and these have all been supported on everything except the earliest
+ // Phi chips with AVX-512. No CPU has had CD without F, so we include
+ // it. GOAMD64=v4 also implies exactly this set, and these are all
+ // included in AVX10.1.
+ X86.HasAVX512 = X86.HasAVX512F && X86.HasAVX512CD && X86.HasAVX512BW && X86.HasAVX512DQ && X86.HasAVX512VL
+ }
}
func isSet(hwc uint32, value uint32) bool {
diff --git a/src/internal/platform/supported.go b/src/internal/platform/supported.go
index 7d25fd7ee990ec..a07b66d3947cce 100644
--- a/src/internal/platform/supported.go
+++ b/src/internal/platform/supported.go
@@ -194,7 +194,7 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool {
"ios/amd64", "ios/arm64",
"aix/ppc64",
"openbsd/arm64",
- "windows/386", "windows/amd64", "windows/arm", "windows/arm64":
+ "windows/386", "windows/amd64", "windows/arm64":
return true
}
return false
@@ -226,7 +226,7 @@ func InternalLinkPIESupported(goos, goarch string) bool {
case "android/arm64",
"darwin/amd64", "darwin/arm64",
"linux/amd64", "linux/arm64", "linux/loong64", "linux/ppc64le",
- "windows/386", "windows/amd64", "windows/arm", "windows/arm64":
+ "windows/386", "windows/amd64", "windows/arm64":
return true
}
return false
diff --git a/src/internal/platform/zosarch.go b/src/internal/platform/zosarch.go
index ebde978a230f74..a2f5b22ea9a656 100644
--- a/src/internal/platform/zosarch.go
+++ b/src/internal/platform/zosarch.go
@@ -57,7 +57,6 @@ var List = []OSArch{
{"wasip1", "wasm"},
{"windows", "386"},
{"windows", "amd64"},
- {"windows", "arm"},
{"windows", "arm64"},
}
@@ -74,7 +73,7 @@ var distInfo = map[OSArch]osArchInfo{
{"freebsd", "amd64"}: {CgoSupported: true},
{"freebsd", "arm"}: {CgoSupported: true},
{"freebsd", "arm64"}: {CgoSupported: true},
- {"freebsd", "riscv64"}: {CgoSupported: true},
+ {"freebsd", "riscv64"}: {CgoSupported: true, Broken: true},
{"illumos", "amd64"}: {CgoSupported: true},
{"ios", "amd64"}: {CgoSupported: true},
{"ios", "arm64"}: {CgoSupported: true},
@@ -111,6 +110,5 @@ var distInfo = map[OSArch]osArchInfo{
{"wasip1", "wasm"}: {},
{"windows", "386"}: {CgoSupported: true, FirstClass: true},
{"windows", "amd64"}: {CgoSupported: true, FirstClass: true},
- {"windows", "arm"}: {Broken: true},
{"windows", "arm64"}: {CgoSupported: true},
}
diff --git a/src/internal/runtime/maps/group.go b/src/internal/runtime/maps/group.go
index b23ff76f983146..c8d38ba27c8c5d 100644
--- a/src/internal/runtime/maps/group.go
+++ b/src/internal/runtime/maps/group.go
@@ -22,10 +22,9 @@ const (
ctrlEmpty ctrl = 0b10000000
ctrlDeleted ctrl = 0b11111110
- bitsetLSB = 0x0101010101010101
- bitsetMSB = 0x8080808080808080
- bitsetEmpty = bitsetLSB * uint64(ctrlEmpty)
- bitsetDeleted = bitsetLSB * uint64(ctrlDeleted)
+ bitsetLSB = 0x0101010101010101
+ bitsetMSB = 0x8080808080808080
+ bitsetEmpty = bitsetLSB * uint64(ctrlEmpty)
)
// bitset represents a set of slots within a group.
diff --git a/src/internal/runtime/maps/runtime.go b/src/internal/runtime/maps/runtime.go
index ff8a7482494aef..8bba23f07003bd 100644
--- a/src/internal/runtime/maps/runtime.go
+++ b/src/internal/runtime/maps/runtime.go
@@ -94,10 +94,11 @@ func runtime_mapaccess1(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Poi
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -168,10 +169,11 @@ func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Po
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -262,9 +264,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -329,7 +332,7 @@ outer:
slotElem = emem
}
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
diff --git a/src/internal/runtime/maps/runtime_fast32.go b/src/internal/runtime/maps/runtime_fast32.go
index beed67ce286aa1..d5be04afd450c0 100644
--- a/src/internal/runtime/maps/runtime_fast32.go
+++ b/src/internal/runtime/maps/runtime_fast32.go
@@ -55,10 +55,11 @@ func runtime_mapaccess1_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Poin
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -124,10 +125,11 @@ func runtime_mapaccess2_fast32(typ *abi.MapType, m *Map, key uint32) (unsafe.Poi
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -245,9 +247,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -302,7 +305,7 @@ outer:
slotElem = g.elem(typ, i)
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
@@ -383,9 +386,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -435,7 +439,7 @@ outer:
slotElem = g.elem(typ, i)
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
diff --git a/src/internal/runtime/maps/runtime_fast64.go b/src/internal/runtime/maps/runtime_fast64.go
index 2f9cf28daafdb4..2bee2d4be019b3 100644
--- a/src/internal/runtime/maps/runtime_fast64.go
+++ b/src/internal/runtime/maps/runtime_fast64.go
@@ -55,10 +55,11 @@ func runtime_mapaccess1_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Poin
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -124,10 +125,12 @@ func runtime_mapaccess2_fast64(typ *abi.MapType, m *Map, key uint64) (unsafe.Poi
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -245,9 +248,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -302,7 +306,7 @@ outer:
slotElem = g.elem(typ, i)
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
@@ -422,9 +426,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -474,7 +479,7 @@ outer:
slotElem = g.elem(typ, i)
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
diff --git a/src/internal/runtime/maps/runtime_faststr.go b/src/internal/runtime/maps/runtime_faststr.go
index ddac7eacc52ece..374468b66438a6 100644
--- a/src/internal/runtime/maps/runtime_faststr.go
+++ b/src/internal/runtime/maps/runtime_faststr.go
@@ -131,10 +131,11 @@ func runtime_mapaccess1_faststr(typ *abi.MapType, m *Map, key string) unsafe.Poi
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -190,10 +191,11 @@ func runtime_mapaccess2_faststr(typ *abi.MapType, m *Map, key string) (unsafe.Po
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -313,9 +315,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -373,7 +376,7 @@ outer:
slotElem = g.elem(typ, i)
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
diff --git a/src/internal/runtime/maps/table.go b/src/internal/runtime/maps/table.go
index d4b9276b57078f..7e2c6e31bcaa1e 100644
--- a/src/internal/runtime/maps/table.go
+++ b/src/internal/runtime/maps/table.go
@@ -192,10 +192,11 @@ func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (
// load factors, k is less than 32, meaning that the number of false
// positive comparisons we must perform is less than 1/8 per find.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -225,10 +226,11 @@ func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (
func (t *table) getWithoutKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -271,9 +273,10 @@ func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Point
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -348,7 +351,7 @@ func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Point
slotElem = emem
}
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
@@ -420,9 +423,10 @@ func (t *table) uncheckedPutSlot(typ *abi.MapType, hash uintptr, key, elem unsaf
// Delete returns true if it put a tombstone in t.
func (t *table) Delete(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
diff --git a/src/internal/sync/hashtriemap.go b/src/internal/sync/hashtriemap.go
index 6f5e0b437fea23..db832974278f16 100644
--- a/src/internal/sync/hashtriemap.go
+++ b/src/internal/sync/hashtriemap.go
@@ -178,7 +178,7 @@ func (ht *HashTrieMap[K, V]) expand(oldEntry, newEntry *entry[K, V], newHash uin
top := newIndirect
for {
if hashShift == 0 {
- panic("internal/sync.HashTrieMap: ran out of hash bits while inserting")
+ panic("internal/sync.HashTrieMap: ran out of hash bits while inserting (incorrect use of unsafe or cgo, or data race?)")
}
hashShift -= nChildrenLog2 // hashShift is for the level parent is at. We need to go deeper.
oi := (oldHash >> hashShift) & nChildrenMask
@@ -196,8 +196,8 @@ func (ht *HashTrieMap[K, V]) expand(oldEntry, newEntry *entry[K, V], newHash uin
}
// Store sets the value for a key.
-func (ht *HashTrieMap[K, V]) Store(key K, old V) {
- _, _ = ht.Swap(key, old)
+func (ht *HashTrieMap[K, V]) Store(key K, new V) {
+ _, _ = ht.Swap(key, new)
}
// Swap swaps the value for a key and returns the previous value if any.
diff --git a/src/internal/syscall/unix/at_sysnum_netbsd.go b/src/internal/syscall/unix/at_sysnum_netbsd.go
index b59b5e0cf96d0d..db17852b748e32 100644
--- a/src/internal/syscall/unix/at_sysnum_netbsd.go
+++ b/src/internal/syscall/unix/at_sysnum_netbsd.go
@@ -7,16 +7,17 @@ package unix
import "syscall"
const (
- unlinkatTrap uintptr = syscall.SYS_UNLINKAT
- openatTrap uintptr = syscall.SYS_OPENAT
- fstatatTrap uintptr = syscall.SYS_FSTATAT
- readlinkatTrap uintptr = syscall.SYS_READLINKAT
- mkdiratTrap uintptr = syscall.SYS_MKDIRAT
- fchmodatTrap uintptr = syscall.SYS_FCHMODAT
- fchownatTrap uintptr = syscall.SYS_FCHOWNAT
- renameatTrap uintptr = syscall.SYS_RENAMEAT
- linkatTrap uintptr = syscall.SYS_LINKAT
- symlinkatTrap uintptr = syscall.SYS_SYMLINKAT
+ unlinkatTrap uintptr = syscall.SYS_UNLINKAT
+ openatTrap uintptr = syscall.SYS_OPENAT
+ fstatatTrap uintptr = syscall.SYS_FSTATAT
+ readlinkatTrap uintptr = syscall.SYS_READLINKAT
+ mkdiratTrap uintptr = syscall.SYS_MKDIRAT
+ fchmodatTrap uintptr = syscall.SYS_FCHMODAT
+ fchownatTrap uintptr = syscall.SYS_FCHOWNAT
+ renameatTrap uintptr = syscall.SYS_RENAMEAT
+ linkatTrap uintptr = syscall.SYS_LINKAT
+ symlinkatTrap uintptr = syscall.SYS_SYMLINKAT
+ posixFallocateTrap uintptr = 479
)
const (
diff --git a/src/internal/syscall/unix/fallocate_freebsd_386.go b/src/internal/syscall/unix/fallocate_bsd_386.go
similarity index 85%
rename from src/internal/syscall/unix/fallocate_freebsd_386.go
rename to src/internal/syscall/unix/fallocate_bsd_386.go
index 535b23dbc5b7eb..1dcdff4a5391d0 100644
--- a/src/internal/syscall/unix/fallocate_freebsd_386.go
+++ b/src/internal/syscall/unix/fallocate_bsd_386.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build (freebsd || netbsd) && 386
+
package unix
import "syscall"
@@ -9,6 +11,7 @@ import "syscall"
func PosixFallocate(fd int, off int64, size int64) error {
// If successful, posix_fallocate() returns zero. It returns an error on failure, without
// setting errno. See https://man.freebsd.org/cgi/man.cgi?query=posix_fallocate&sektion=2&n=1
+ // and https://man.netbsd.org/posix_fallocate.2#RETURN%20VALUES
r1, _, _ := syscall.Syscall6(posixFallocateTrap, uintptr(fd), uintptr(off), uintptr(off>>32), uintptr(size), uintptr(size>>32), 0)
if r1 != 0 {
return syscall.Errno(r1)
diff --git a/src/internal/syscall/unix/fallocate_freebsd_64bit.go b/src/internal/syscall/unix/fallocate_bsd_64bit.go
similarity index 82%
rename from src/internal/syscall/unix/fallocate_freebsd_64bit.go
rename to src/internal/syscall/unix/fallocate_bsd_64bit.go
index a9d52283f06a9b..177bb48382d54c 100644
--- a/src/internal/syscall/unix/fallocate_freebsd_64bit.go
+++ b/src/internal/syscall/unix/fallocate_bsd_64bit.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build freebsd && (amd64 || arm64 || riscv64)
+//go:build (freebsd || netbsd) && (amd64 || arm64 || riscv64)
package unix
@@ -11,6 +11,7 @@ import "syscall"
func PosixFallocate(fd int, off int64, size int64) error {
// If successful, posix_fallocate() returns zero. It returns an error on failure, without
// setting errno. See https://man.freebsd.org/cgi/man.cgi?query=posix_fallocate&sektion=2&n=1
+ // and https://man.netbsd.org/posix_fallocate.2#RETURN%20VALUES
r1, _, _ := syscall.Syscall(posixFallocateTrap, uintptr(fd), uintptr(off), uintptr(size))
if r1 != 0 {
return syscall.Errno(r1)
diff --git a/src/internal/syscall/unix/fallocate_freebsd_arm.go b/src/internal/syscall/unix/fallocate_bsd_arm.go
similarity index 90%
rename from src/internal/syscall/unix/fallocate_freebsd_arm.go
rename to src/internal/syscall/unix/fallocate_bsd_arm.go
index 1ded50f3b9a168..15e99d02b1c790 100644
--- a/src/internal/syscall/unix/fallocate_freebsd_arm.go
+++ b/src/internal/syscall/unix/fallocate_bsd_arm.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build (freebsd || netbsd) && arm
+
package unix
import "syscall"
@@ -9,6 +11,7 @@ import "syscall"
func PosixFallocate(fd int, off int64, size int64) error {
// If successful, posix_fallocate() returns zero. It returns an error on failure, without
// setting errno. See https://man.freebsd.org/cgi/man.cgi?query=posix_fallocate&sektion=2&n=1
+ // and https://man.netbsd.org/posix_fallocate.2#RETURN%20VALUES
//
// The padding 0 argument is needed because the ARM calling convention requires that if an
// argument (off in this case) needs double-word alignment (8-byte), the NCRN (next core
diff --git a/src/make.bash b/src/make.bash
index b67ae1529fa0ac..d4e927dfda7c43 100755
--- a/src/make.bash
+++ b/src/make.bash
@@ -64,14 +64,14 @@
# timing information to this file. Useful for profiling where the
# time goes when these scripts run.
#
-# GOROOT_BOOTSTRAP: A working Go tree >= Go 1.22.6 for bootstrap.
+# GOROOT_BOOTSTRAP: A working Go tree >= Go 1.24.6 for bootstrap.
# If $GOROOT_BOOTSTRAP/bin/go is missing, $(go env GOROOT) is
-# tried for all "go" in $PATH. By default, one of $HOME/go1.22.6,
-# $HOME/sdk/go1.22.6, or $HOME/go1.4, whichever exists, in that order.
+# tried for all "go" in $PATH. By default, one of $HOME/go1.24.6,
+# $HOME/sdk/go1.24.6, or $HOME/go1.4, whichever exists, in that order.
# We still check $HOME/go1.4 to allow for build scripts that still hard-code
# that name even though they put newer Go toolchains there.
-bootgo=1.22.6
+bootgo=1.24.6
set -e
diff --git a/src/make.bat b/src/make.bat
index d9f686452e8974..29105cd8a54d98 100644
--- a/src/make.bat
+++ b/src/make.bat
@@ -71,7 +71,7 @@ for /f "tokens=*" %%g in ('where go 2^>nul') do (
)
)
-set bootgo=1.22.6
+set bootgo=1.24.6
if "x%GOROOT_BOOTSTRAP%"=="x" if exist "%HOMEDRIVE%%HOMEPATH%\go%bootgo%" set GOROOT_BOOTSTRAP=%HOMEDRIVE%%HOMEPATH%\go%bootgo%
if "x%GOROOT_BOOTSTRAP%"=="x" if exist "%HOMEDRIVE%%HOMEPATH%\sdk\go%bootgo%" set GOROOT_BOOTSTRAP=%HOMEDRIVE%%HOMEPATH%\sdk\go%bootgo%
if "x%GOROOT_BOOTSTRAP%"=="x" set GOROOT_BOOTSTRAP=%HOMEDRIVE%%HOMEPATH%\Go1.4
diff --git a/src/make.rc b/src/make.rc
index b3beb75660d1e4..9ba2b7d76d7018 100755
--- a/src/make.rc
+++ b/src/make.rc
@@ -48,7 +48,7 @@ fn bootstrapenv {
GOROOT=$GOROOT_BOOTSTRAP GO111MODULE=off GOENV=off GOOS=() GOARCH=() GOEXPERIMENT=() GOFLAGS=() $*
}
-bootgo = 1.22.6
+bootgo = 1.24.6
GOROOT = `{cd .. && pwd}
goroot_bootstrap_set = 'true'
if(~ $"GOROOT_BOOTSTRAP ''){
diff --git a/src/math/exp.go b/src/math/exp.go
index 050e0ee9d88239..029a4f8163698f 100644
--- a/src/math/exp.go
+++ b/src/math/exp.go
@@ -109,13 +109,11 @@ func exp(x float64) float64 {
// special cases
switch {
- case IsNaN(x) || IsInf(x, 1):
+ case IsNaN(x):
return x
- case IsInf(x, -1):
- return 0
- case x > Overflow:
+ case x > Overflow: // handles case where x is +∞
return Inf(1)
- case x < Underflow:
+ case x < Underflow: // handles case where x is -∞
return 0
case -NearZero < x && x < NearZero:
return 1 + x
@@ -157,13 +155,11 @@ func exp2(x float64) float64 {
// special cases
switch {
- case IsNaN(x) || IsInf(x, 1):
+ case IsNaN(x):
return x
- case IsInf(x, -1):
- return 0
- case x > Overflow:
+ case x > Overflow: // handles case where x is +∞
return Inf(1)
- case x < Underflow:
+ case x < Underflow: // handles case where x is -∞
return 0
}
diff --git a/src/net/dial.go b/src/net/dial.go
index 6264984ceca182..a87c57603a813c 100644
--- a/src/net/dial.go
+++ b/src/net/dial.go
@@ -9,6 +9,7 @@ import (
"internal/bytealg"
"internal/godebug"
"internal/nettrace"
+ "net/netip"
"syscall"
"time"
)
@@ -523,30 +524,8 @@ func (d *Dialer) Dial(network, address string) (Conn, error) {
// See func [Dial] for a description of the network and address
// parameters.
func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn, error) {
- if ctx == nil {
- panic("nil context")
- }
- deadline := d.deadline(ctx, time.Now())
- if !deadline.IsZero() {
- testHookStepTime()
- if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
- subCtx, cancel := context.WithDeadline(ctx, deadline)
- defer cancel()
- ctx = subCtx
- }
- }
- if oldCancel := d.Cancel; oldCancel != nil {
- subCtx, cancel := context.WithCancel(ctx)
- defer cancel()
- go func() {
- select {
- case <-oldCancel:
- cancel()
- case <-subCtx.Done():
- }
- }()
- ctx = subCtx
- }
+ ctx, cancel := d.dialCtx(ctx)
+ defer cancel()
// Shadow the nettrace (if any) during resolve so Connect events don't fire for DNS lookups.
resolveCtx := ctx
@@ -578,6 +557,97 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn
return sd.dialParallel(ctx, primaries, fallbacks)
}
+func (d *Dialer) dialCtx(ctx context.Context) (context.Context, context.CancelFunc) {
+ if ctx == nil {
+ panic("nil context")
+ }
+ deadline := d.deadline(ctx, time.Now())
+ var cancel1, cancel2 context.CancelFunc
+ if !deadline.IsZero() {
+ testHookStepTime()
+ if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
+ var subCtx context.Context
+ subCtx, cancel1 = context.WithDeadline(ctx, deadline)
+ ctx = subCtx
+ }
+ }
+ if oldCancel := d.Cancel; oldCancel != nil {
+ subCtx, cancel2 := context.WithCancel(ctx)
+ go func() {
+ select {
+ case <-oldCancel:
+ cancel2()
+ case <-subCtx.Done():
+ }
+ }()
+ ctx = subCtx
+ }
+ return ctx, func() {
+ if cancel1 != nil {
+ cancel1()
+ }
+ if cancel2 != nil {
+ cancel2()
+ }
+ }
+}
+
+// DialTCP acts like Dial for TCP networks using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before
+// the connection is complete, an error is returned. Once successfully
+// connected, any expiration of the context will not affect the
+// connection.
+//
+// The network must be a TCP network name; see func Dial for details.
+func (d *Dialer) DialTCP(ctx context.Context, network string, laddr netip.AddrPort, raddr netip.AddrPort) (*TCPConn, error) {
+ ctx, cancel := d.dialCtx(ctx)
+ defer cancel()
+ return dialTCP(ctx, d, network, TCPAddrFromAddrPort(laddr), TCPAddrFromAddrPort(raddr))
+}
+
+// DialUDP acts like Dial for UDP networks using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before
+// the connection is complete, an error is returned. Once successfully
+// connected, any expiration of the context will not affect the
+// connection.
+//
+// The network must be a UDP network name; see func Dial for details.
+func (d *Dialer) DialUDP(ctx context.Context, network string, laddr netip.AddrPort, raddr netip.AddrPort) (*UDPConn, error) {
+ ctx, cancel := d.dialCtx(ctx)
+ defer cancel()
+ return dialUDP(ctx, d, network, UDPAddrFromAddrPort(laddr), UDPAddrFromAddrPort(raddr))
+}
+
+// DialIP acts like Dial for IP networks using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before
+// the connection is complete, an error is returned. Once successfully
+// connected, any expiration of the context will not affect the
+// connection.
+//
+// The network must be an IP network name; see func Dial for details.
+func (d *Dialer) DialIP(ctx context.Context, network string, laddr netip.Addr, raddr netip.Addr) (*IPConn, error) {
+ ctx, cancel := d.dialCtx(ctx)
+ defer cancel()
+ return dialIP(ctx, d, network, ipAddrFromAddr(laddr), ipAddrFromAddr(raddr))
+}
+
+// DialUnix acts like Dial for Unix networks using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before
+// the connection is complete, an error is returned. Once successfully
+// connected, any expiration of the context will not affect the
+// connection.
+//
+// The network must be a Unix network name; see func Dial for details.
+func (d *Dialer) DialUnix(ctx context.Context, network string, laddr *UnixAddr, raddr *UnixAddr) (*UnixConn, error) {
+ ctx, cancel := d.dialCtx(ctx)
+ defer cancel()
+ return dialUnix(ctx, d, network, laddr, raddr)
+}
+
// dialParallel races two copies of dialSerial, giving the first a
// head start. It returns the first established connection and
// closes the others. Otherwise it returns an error from the first
diff --git a/src/net/dial_test.go b/src/net/dial_test.go
index b3bedb2fa275c3..829b80c33a198d 100644
--- a/src/net/dial_test.go
+++ b/src/net/dial_test.go
@@ -11,6 +11,7 @@ import (
"fmt"
"internal/testenv"
"io"
+ "net/netip"
"os"
"runtime"
"strings"
@@ -1064,6 +1065,99 @@ func TestDialerControlContext(t *testing.T) {
})
}
+func TestDialContext(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ case "js", "wasip1":
+ t.Skipf("skipping: fake net does not support Dialer.ControlContext")
+ }
+
+ t.Run("StreamDial", func(t *testing.T) {
+ var err error
+ for i, network := range []string{"tcp", "tcp4", "tcp6", "unix", "unixpacket"} {
+ if !testableNetwork(network) {
+ continue
+ }
+ ln := newLocalListener(t, network)
+ defer ln.Close()
+ var id int
+ d := Dialer{ControlContext: func(ctx context.Context, network string, address string, c syscall.RawConn) error {
+ id = ctx.Value("id").(int)
+ return controlOnConnSetup(network, address, c)
+ }}
+ var c Conn
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ raddr, err := netip.ParseAddrPort(ln.Addr().String())
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ c, err = d.DialTCP(context.WithValue(context.Background(), "id", i+1), network, (*TCPAddr)(nil).AddrPort(), raddr)
+ case "unix", "unixpacket":
+ raddr, err := ResolveUnixAddr(network, ln.Addr().String())
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ c, err = d.DialUnix(context.WithValue(context.Background(), "id", i+1), network, nil, raddr)
+ }
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if id != i+1 {
+ t.Errorf("%s: got id %d, want %d", network, id, i+1)
+ }
+ c.Close()
+ }
+ })
+ t.Run("PacketDial", func(t *testing.T) {
+ var err error
+ for i, network := range []string{"udp", "udp4", "udp6", "unixgram"} {
+ if !testableNetwork(network) {
+ continue
+ }
+ c1 := newLocalPacketListener(t, network)
+ if network == "unixgram" {
+ defer os.Remove(c1.LocalAddr().String())
+ }
+ defer c1.Close()
+ var id int
+ d := Dialer{ControlContext: func(ctx context.Context, network string, address string, c syscall.RawConn) error {
+ id = ctx.Value("id").(int)
+ return controlOnConnSetup(network, address, c)
+ }}
+ var c2 Conn
+ switch network {
+ case "udp", "udp4", "udp6":
+ raddr, err := netip.ParseAddrPort(c1.LocalAddr().String())
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ c2, err = d.DialUDP(context.WithValue(context.Background(), "id", i+1), network, (*UDPAddr)(nil).AddrPort(), raddr)
+ case "unixgram":
+ raddr, err := ResolveUnixAddr(network, c1.LocalAddr().String())
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ c2, err = d.DialUnix(context.WithValue(context.Background(), "id", i+1), network, nil, raddr)
+ }
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if id != i+1 {
+ t.Errorf("%s: got id %d, want %d", network, id, i+1)
+ }
+ c2.Close()
+ }
+ })
+}
+
// mustHaveExternalNetwork is like testenv.MustHaveExternalNetwork
// except on non-Linux, non-mobile builders it permits the test to
// run in -short mode.
diff --git a/src/net/http/example_test.go b/src/net/http/example_test.go
index f40273f14a2a24..acb96bba5178c8 100644
--- a/src/net/http/example_test.go
+++ b/src/net/http/example_test.go
@@ -12,6 +12,7 @@ import (
"net/http"
"os"
"os/signal"
+ "time"
)
func ExampleHijacker() {
@@ -221,3 +222,22 @@ func ExampleProtocols_http1or2() {
}
res.Body.Close()
}
+
+func ExampleCrossOriginProtection() {
+ mux := http.NewServeMux()
+
+ mux.HandleFunc("/hello", func(w http.ResponseWriter, req *http.Request) {
+ io.WriteString(w, "request allowed\n")
+ })
+
+ srv := http.Server{
+ Addr: ":8080",
+ ReadTimeout: 15 * time.Second,
+ WriteTimeout: 15 * time.Second,
+ // Use CrossOriginProtection.Handler to block all non-safe cross-origin
+ // browser requests to mux.
+ Handler: http.NewCrossOriginProtection().Handler(mux),
+ }
+
+ log.Fatal(srv.ListenAndServe())
+}
diff --git a/src/net/http/httptrace/trace.go b/src/net/http/httptrace/trace.go
index 706a4329578ef7..cee13d2da8345d 100644
--- a/src/net/http/httptrace/trace.go
+++ b/src/net/http/httptrace/trace.go
@@ -76,7 +76,7 @@ func WithClientTrace(ctx context.Context, trace *ClientTrace) context.Context {
// during a single round trip and has no hooks that span a series
// of redirected requests.
//
-// See https://blog.golang.org/http-tracing for more.
+// See https://go.dev/blog/http-tracing for more.
type ClientTrace struct {
// GetConn is called before a connection is created or
// retrieved from an idle pool. The hostPort is the
diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go
index 6ba6b2c8e033b9..635d3ad9d9f132 100644
--- a/src/net/http/pprof/pprof.go
+++ b/src/net/http/pprof/pprof.go
@@ -67,7 +67,7 @@
// in your browser.
//
// For a study of the facility in action, visit
-// https://blog.golang.org/2011/06/profiling-go-programs.html.
+// https://go.dev/blog/pprof.
package pprof
import (
diff --git a/src/net/iprawsock.go b/src/net/iprawsock.go
index 76dded9ca16e12..26134d7e76a2ec 100644
--- a/src/net/iprawsock.go
+++ b/src/net/iprawsock.go
@@ -6,6 +6,7 @@ package net
import (
"context"
+ "net/netip"
"syscall"
)
@@ -24,6 +25,19 @@ import (
// BUG(mikio): On JS and Plan 9, methods and functions related
// to IPConn are not implemented.
+// BUG: On Windows, raw IP sockets are restricted by the operating system.
+// Sending TCP data, sending UDP data with invalid source addresses,
+// and calling bind with TCP protocol don't work.
+//
+// See Winsock reference for details.
+
+func ipAddrFromAddr(addr netip.Addr) *IPAddr {
+ return &IPAddr{
+ IP: addr.AsSlice(),
+ Zone: addr.Zone(),
+ }
+}
+
// IPAddr represents the address of an IP end point.
type IPAddr struct {
IP IP
@@ -206,11 +220,18 @@ func newIPConn(fd *netFD) *IPConn { return &IPConn{conn{fd}} }
// If the IP field of raddr is nil or an unspecified IP address, the
// local system is assumed.
func DialIP(network string, laddr, raddr *IPAddr) (*IPConn, error) {
+ return dialIP(context.Background(), nil, network, laddr, raddr)
+}
+
+func dialIP(ctx context.Context, dialer *Dialer, network string, laddr, raddr *IPAddr) (*IPConn, error) {
if raddr == nil {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
}
sd := &sysDialer{network: network, address: raddr.String()}
- c, err := sd.dialIP(context.Background(), laddr, raddr)
+ if dialer != nil {
+ sd.Dialer = *dialer
+ }
+ c, err := sd.dialIP(ctx, laddr, raddr)
if err != nil {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
}
diff --git a/src/net/ipsock_posix.go b/src/net/ipsock_posix.go
index 2aeabd44873f22..52712f932f7530 100644
--- a/src/net/ipsock_posix.go
+++ b/src/net/ipsock_posix.go
@@ -237,8 +237,12 @@ func ipToSockaddr(family int, ip IP, port int, zone string) (syscall.Sockaddr, e
func addrPortToSockaddrInet4(ap netip.AddrPort) (syscall.SockaddrInet4, error) {
// ipToSockaddrInet4 has special handling here for zero length slices.
// We do not, because netip has no concept of a generic zero IP address.
+ //
+ // addr is allowed to be an IPv4-mapped IPv6 address.
+ // As4 will unmap it to an IPv4 address.
+ // The error message is kept consistent with ipToSockaddrInet4.
addr := ap.Addr()
- if !addr.Is4() {
+ if !addr.Is4() && !addr.Is4In6() {
return syscall.SockaddrInet4{}, &AddrError{Err: "non-IPv4 address", Addr: addr.String()}
}
sa := syscall.SockaddrInet4{
diff --git a/src/net/net_windows_test.go b/src/net/net_windows_test.go
index 671de7678008ed..0a5c77f032527e 100644
--- a/src/net/net_windows_test.go
+++ b/src/net/net_windows_test.go
@@ -302,7 +302,7 @@ func TestInterfacesWithNetsh(t *testing.T) {
}
slices.Sort(want)
- if strings.Join(want, "/") != strings.Join(have, "/") {
+ if !slices.Equal(want, have) {
t.Fatalf("unexpected interface list %q, want %q", have, want)
}
}
@@ -487,7 +487,7 @@ func TestInterfaceAddrsWithNetsh(t *testing.T) {
want = append(want, wantIPv6...)
slices.Sort(want)
- if strings.Join(want, "/") != strings.Join(have, "/") {
+ if !slices.Equal(want, have) {
t.Errorf("%s: unexpected addresses list %q, want %q", ifi.Name, have, want)
}
}
diff --git a/src/net/tcpsock.go b/src/net/tcpsock.go
index 9d215db1b2eec3..376bf238c70d07 100644
--- a/src/net/tcpsock.go
+++ b/src/net/tcpsock.go
@@ -315,6 +315,10 @@ func newTCPConn(fd *netFD, keepAliveIdle time.Duration, keepAliveCfg KeepAliveCo
// If the IP field of raddr is nil or an unspecified IP address, the
// local system is assumed.
func DialTCP(network string, laddr, raddr *TCPAddr) (*TCPConn, error) {
+ return dialTCP(context.Background(), nil, network, laddr, raddr)
+}
+
+func dialTCP(ctx context.Context, dialer *Dialer, network string, laddr, raddr *TCPAddr) (*TCPConn, error) {
switch network {
case "tcp", "tcp4", "tcp6":
default:
@@ -328,10 +332,13 @@ func DialTCP(network string, laddr, raddr *TCPAddr) (*TCPConn, error) {
c *TCPConn
err error
)
+ if dialer != nil {
+ sd.Dialer = *dialer
+ }
if sd.MultipathTCP() {
- c, err = sd.dialMPTCP(context.Background(), laddr, raddr)
+ c, err = sd.dialMPTCP(ctx, laddr, raddr)
} else {
- c, err = sd.dialTCP(context.Background(), laddr, raddr)
+ c, err = sd.dialTCP(ctx, laddr, raddr)
}
if err != nil {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
diff --git a/src/net/udpsock.go b/src/net/udpsock.go
index 35da018c307afb..f9a3bee867d340 100644
--- a/src/net/udpsock.go
+++ b/src/net/udpsock.go
@@ -285,6 +285,10 @@ func newUDPConn(fd *netFD) *UDPConn { return &UDPConn{conn{fd}} }
// If the IP field of raddr is nil or an unspecified IP address, the
// local system is assumed.
func DialUDP(network string, laddr, raddr *UDPAddr) (*UDPConn, error) {
+ return dialUDP(context.Background(), nil, network, laddr, raddr)
+}
+
+func dialUDP(ctx context.Context, dialer *Dialer, network string, laddr, raddr *UDPAddr) (*UDPConn, error) {
switch network {
case "udp", "udp4", "udp6":
default:
@@ -294,7 +298,10 @@ func DialUDP(network string, laddr, raddr *UDPAddr) (*UDPConn, error) {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
}
sd := &sysDialer{network: network, address: raddr.String()}
- c, err := sd.dialUDP(context.Background(), laddr, raddr)
+ if dialer != nil {
+ sd.Dialer = *dialer
+ }
+ c, err := sd.dialUDP(ctx, laddr, raddr)
if err != nil {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
}
diff --git a/src/net/udpsock_test.go b/src/net/udpsock_test.go
index 6dacc81df6e059..7ad8a585b07e33 100644
--- a/src/net/udpsock_test.go
+++ b/src/net/udpsock_test.go
@@ -705,3 +705,35 @@ func TestIPv6WriteMsgUDPAddrPortTargetAddrIPVersion(t *testing.T) {
t.Fatal(err)
}
}
+
+// TestIPv4WriteMsgUDPAddrPortTargetAddrIPVersion verifies that
+// WriteMsgUDPAddrPort accepts IPv4 and IPv4-mapped IPv6 destination addresses,
+// and rejects IPv6 destination addresses on a "udp4" connection.
+func TestIPv4WriteMsgUDPAddrPortTargetAddrIPVersion(t *testing.T) {
+ if !testableNetwork("udp4") {
+ t.Skipf("skipping: udp4 not available")
+ }
+
+ conn, err := ListenUDP("udp4", &UDPAddr{IP: IPv4(127, 0, 0, 1)})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer conn.Close()
+
+ daddr4 := netip.AddrPortFrom(netip.MustParseAddr("127.0.0.1"), 12345)
+ daddr4in6 := netip.AddrPortFrom(netip.MustParseAddr("::ffff:127.0.0.1"), 12345)
+ daddr6 := netip.AddrPortFrom(netip.MustParseAddr("::1"), 12345)
+ buf := make([]byte, 8)
+
+ if _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr4); err != nil {
+ t.Errorf("conn.WriteMsgUDPAddrPort(buf, nil, daddr4) failed: %v", err)
+ }
+
+ if _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr4in6); err != nil {
+ t.Errorf("conn.WriteMsgUDPAddrPort(buf, nil, daddr4in6) failed: %v", err)
+ }
+
+ if _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr6); err == nil {
+ t.Errorf("conn.WriteMsgUDPAddrPort(buf, nil, daddr6) should have failed, but got no error")
+ }
+}
diff --git a/src/net/unixsock.go b/src/net/unixsock.go
index c93ef91d5730e6..0ee79f35dec8a4 100644
--- a/src/net/unixsock.go
+++ b/src/net/unixsock.go
@@ -201,13 +201,20 @@ func newUnixConn(fd *netFD) *UnixConn { return &UnixConn{conn{fd}} }
// If laddr is non-nil, it is used as the local address for the
// connection.
func DialUnix(network string, laddr, raddr *UnixAddr) (*UnixConn, error) {
+ return dialUnix(context.Background(), nil, network, laddr, raddr)
+}
+
+func dialUnix(ctx context.Context, dialer *Dialer, network string, laddr, raddr *UnixAddr) (*UnixConn, error) {
switch network {
case "unix", "unixgram", "unixpacket":
default:
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(network)}
}
sd := &sysDialer{network: network, address: raddr.String()}
- c, err := sd.dialUnix(context.Background(), laddr, raddr)
+ if dialer != nil {
+ sd.Dialer = *dialer
+ }
+ c, err := sd.dialUnix(ctx, laddr, raddr)
if err != nil {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
}
diff --git a/src/os/os_windows_test.go b/src/os/os_windows_test.go
index 515d1c135901e0..5a479051ee1e30 100644
--- a/src/os/os_windows_test.go
+++ b/src/os/os_windows_test.go
@@ -663,7 +663,7 @@ func TestOpenVolumeName(t *testing.T) {
}
slices.Sort(have)
- if strings.Join(want, "/") != strings.Join(have, "/") {
+ if !slices.Equal(want, have) {
t.Fatalf("unexpected file list %q, want %q", have, want)
}
}
diff --git a/src/path/filepath/match_test.go b/src/path/filepath/match_test.go
index f415b0408820af..2ae79980c753ee 100644
--- a/src/path/filepath/match_test.go
+++ b/src/path/filepath/match_test.go
@@ -231,7 +231,7 @@ func (test *globTest) globAbs(root, rootPattern string) error {
}
slices.Sort(have)
want := test.buildWant(root + `\`)
- if strings.Join(want, "_") == strings.Join(have, "_") {
+ if slices.Equal(want, have) {
return nil
}
return fmt.Errorf("Glob(%q) returns %q, but %q expected", p, have, want)
@@ -245,12 +245,12 @@ func (test *globTest) globRel(root string) error {
}
slices.Sort(have)
want := test.buildWant(root)
- if strings.Join(want, "_") == strings.Join(have, "_") {
+ if slices.Equal(want, have) {
return nil
}
// try also matching version without root prefix
wantWithNoRoot := test.buildWant("")
- if strings.Join(wantWithNoRoot, "_") == strings.Join(have, "_") {
+ if slices.Equal(wantWithNoRoot, have) {
return nil
}
return fmt.Errorf("Glob(%q) returns %q, but %q expected", p, have, want)
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index 742b97f888514f..d371e80d8484ac 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -794,9 +794,6 @@ TEXT setg<>(SB),NOSPLIT|NOFRAME,$0-0
MOVW R0, g
// Save g to thread-local storage.
-#ifdef GOOS_windows
- B runtime·save_g(SB)
-#else
#ifdef GOOS_openbsd
B runtime·save_g(SB)
#else
@@ -808,7 +805,6 @@ TEXT setg<>(SB),NOSPLIT|NOFRAME,$0-0
MOVW g, R0
RET
#endif
-#endif
TEXT runtime·emptyfunc(SB),0,$0-0
RET
diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s
index 46ef00bab8aa35..ee7f825e1f6681 100644
--- a/src/runtime/asm_loong64.s
+++ b/src/runtime/asm_loong64.s
@@ -70,8 +70,9 @@ nocgo:
// start this M
JAL runtime·mstart(SB)
- // Prevent dead-code elimination of debugCallV2, which is
+ // Prevent dead-code elimination of debugCallV2 and debugPinnerV1, which are
// intended to be called by debuggers.
+ MOVV $runtime·debugPinnerV1(SB), R0
MOVV $runtime·debugCallV2(SB), R0
MOVV R0, 1(R0)
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index 96c87afac8c92f..fc70fa82046056 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -1349,67 +1349,29 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$32-16
TW $31, R0, R0
RET
#endif
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSliceConvert(SB)
+
+TEXT runtime·panicBounds(SB),NOSPLIT,$88-0
+ // Note: frame size is 16 bytes larger than necessary
+ // in order to pacify vet. Vet doesn't understand ppc64
+ // layout properly.
+ NO_LOCAL_POINTERS
+ // Save all 7 int registers that could have an index in them.
+ // They may be pointers, but if so they are dead.
+ // Skip R0 aka ZERO, R1 aka SP, R2 aka SB
+ MOVD R3, 48(R1)
+ MOVD R4, 56(R1)
+ MOVD R5, 64(R1)
+ MOVD R6, 72(R1)
+ MOVD R7, 80(R1)
+ MOVD R8, 88(R1)
+ MOVD R9, 96(R1)
+ // Note: we only save 7 registers to keep under nosplit stack limit
+ // Also, R11 is clobbered in dynamic linking situations
+
+ MOVD LR, R3 // PC immediately after call to panicBounds
+ ADD $48, R1, R4 // pointer to save area
+ CALL runtime·panicBounds64(SB)
+ RET
// These functions are used when internal linking cgo with external
// objects compiled with the -Os on gcc. They reduce prologue/epilogue
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index 4031cdde9ee6b5..6b16d03c9a8070 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -884,80 +884,32 @@ TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0
MOV $64, X24
JMP gcWriteBarrier<>(SB)
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers (ssa/gen/RISCV64Ops.go), but the space for those
-// arguments are allocated in the caller's stack frame.
-// These stubs write the args into that stack space and then tail call to the
-// corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSliceConvert(SB)
+TEXT runtime·panicBounds(SB),NOSPLIT,$144-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ // Skip X0 aka ZERO, X1 aka LR, X2 aka SP, X3 aka GP, X4 aka TP.
+ MOV X5, 24(X2)
+ MOV X6, 32(X2)
+ MOV X7, 40(X2)
+ MOV X8, 48(X2)
+ MOV X9, 56(X2)
+ MOV X10, 64(X2)
+ MOV X11, 72(X2)
+ MOV X12, 80(X2)
+ MOV X13, 88(X2)
+ MOV X14, 96(X2)
+ MOV X15, 104(X2)
+ MOV X16, 112(X2)
+ MOV X17, 120(X2)
+ MOV X18, 128(X2)
+ MOV X19, 136(X2)
+ MOV X20, 144(X2)
+
+ MOV X1, X10 // PC immediately after call to panicBounds
+ ADD $24, X2, X11 // pointer to save area
+ CALL runtime·panicBounds64(SB)
+ RET
DATA runtime·mainPC+0(SB)/8,$runtime·main(SB)
GLOBL runtime·mainPC(SB),RODATA,$8
diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s
index 7fc88009e88a85..4cc1c0eb104886 100644
--- a/src/runtime/asm_s390x.s
+++ b/src/runtime/asm_s390x.s
@@ -892,76 +892,18 @@ TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0
MOVD $64, R9
JMP gcWriteBarrier<>(SB)
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSliceConvert(SB)
+TEXT runtime·panicBounds(SB),NOSPLIT,$144-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ STMG R0, R12, 24(R15)
+ // Note that R10 @ 104 is not needed, it is an assembler temp
+ // skip R13 aka G @ 128
+ // skip R14 aka LR @ 136
+ // skip R15 aka SP @ 144
+
+ MOVD R14, 8(R15) // PC immediately after call to panicBounds
+ ADD $24, R15, R0 // pointer to save area
+ MOVD R0, 16(R15)
+ CALL runtime·panicBounds64(SB)
+ RET
diff --git a/src/runtime/checkptr_test.go b/src/runtime/checkptr_test.go
index 811c0f03553420..119708be7f52ac 100644
--- a/src/runtime/checkptr_test.go
+++ b/src/runtime/checkptr_test.go
@@ -35,6 +35,7 @@ func TestCheckPtr(t *testing.T) {
{"CheckPtrAlignmentNilPtr", ""},
{"CheckPtrArithmetic", "fatal error: checkptr: pointer arithmetic result points to invalid allocation\n"},
{"CheckPtrArithmetic2", "fatal error: checkptr: pointer arithmetic result points to invalid allocation\n"},
+ {"CheckPtrArithmeticUnsafeAdd", "fatal error: checkptr: pointer arithmetic result points to invalid allocation\n"},
{"CheckPtrSize", "fatal error: checkptr: converted pointer straddles multiple allocations\n"},
{"CheckPtrSmall", "fatal error: checkptr: pointer arithmetic computed bad pointer value\n"},
{"CheckPtrSliceOK", ""},
diff --git a/src/runtime/cpuflags.go b/src/runtime/cpuflags.go
index bd1cb328d37b87..6452364b68ec32 100644
--- a/src/runtime/cpuflags.go
+++ b/src/runtime/cpuflags.go
@@ -13,6 +13,7 @@ import (
const (
offsetX86HasAVX = unsafe.Offsetof(cpu.X86.HasAVX)
offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
+ offsetX86HasAVX512 = unsafe.Offsetof(cpu.X86.HasAVX512) // F+CD+BW+DQ+VL
offsetX86HasERMS = unsafe.Offsetof(cpu.X86.HasERMS)
offsetX86HasRDTSCP = unsafe.Offsetof(cpu.X86.HasRDTSCP)
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index fa30efccb1efe3..1f55717f0a1a60 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -554,6 +554,8 @@ type G = g
type Sudog = sudog
+type XRegPerG = xRegPerG
+
func Getg() *G {
return getg()
}
diff --git a/src/runtime/export_vdso_linux_test.go b/src/runtime/export_vdso_linux_test.go
new file mode 100644
index 00000000000000..cd339c6038f717
--- /dev/null
+++ b/src/runtime/export_vdso_linux_test.go
@@ -0,0 +1,29 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (386 || amd64 || arm || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x)
+
+package runtime
+
+type VDSOSymbolKey vdsoSymbolKey
+
+func (v VDSOSymbolKey) Name() string {
+ return v.name
+}
+
+func (v VDSOSymbolKey) SymHash() uint32 {
+ return v.symHash
+}
+
+func (v VDSOSymbolKey) GNUHash() uint32 {
+ return v.gnuHash
+}
+
+func VDSOSymbolKeys() []VDSOSymbolKey {
+ keys := make([]VDSOSymbolKey, 0, len(vdsoSymbolKeys))
+ for _, k := range vdsoSymbolKeys {
+ keys = append(keys, VDSOSymbolKey(k))
+ }
+ return keys
+}
diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go
index 44015ce862d077..9821e499989951 100644
--- a/src/runtime/lockrank.go
+++ b/src/runtime/lockrank.go
@@ -70,6 +70,7 @@ const (
lockRankHchanLeaf
// WB
lockRankWbufSpans
+ lockRankXRegAlloc
lockRankMheap
lockRankMheapSpecial
lockRankGlobalAlloc
@@ -143,6 +144,7 @@ var lockNames = []string{
lockRankStackLarge: "stackLarge",
lockRankHchanLeaf: "hchanLeaf",
lockRankWbufSpans: "wbufSpans",
+ lockRankXRegAlloc: "xRegAlloc",
lockRankMheap: "mheap",
lockRankMheapSpecial: "mheapSpecial",
lockRankGlobalAlloc: "globalAlloc",
@@ -228,9 +230,10 @@ var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
+ lockRankXRegAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
- lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial},
+ lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankXRegAlloc, lockRankMheap, lockRankMheapSpecial},
lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
lockRankPanic: {},
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index cb0d34004899ca..1776206573892f 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -821,6 +821,8 @@ func (h *mheap) init() {
}
h.pages.init(&h.lock, &memstats.gcMiscSys, false)
+
+ xRegInitAlloc()
}
// reclaim sweeps and reclaims at least npage pages into the heap.
diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go
index 46a063fdce569c..9c503369a35841 100644
--- a/src/runtime/mklockrank.go
+++ b/src/runtime/mklockrank.go
@@ -193,6 +193,9 @@ defer,
# Below WB is the write barrier implementation.
< wbufSpans;
+# xRegState allocator
+sched < xRegAlloc;
+
# Span allocator
stackLarge,
stackpool,
@@ -205,7 +208,8 @@ stackLarge,
# an mspanSpecial lock, and they're part of the malloc implementation.
# Pinner bits might be freed by the span allocator.
mheap, mspanSpecial < mheapSpecial;
-mheap, mheapSpecial < globalAlloc;
+# Fixallocs
+mheap, mheapSpecial, xRegAlloc < globalAlloc;
# Execution tracer events (with a P)
hchan,
diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go
index 6a9cf77a43fcf0..2bd2ef07fa8292 100644
--- a/src/runtime/mkpreempt.go
+++ b/src/runtime/mkpreempt.go
@@ -9,8 +9,10 @@
package main
import (
+ "bytes"
"flag"
"fmt"
+ "go/format"
"io"
"log"
"os"
@@ -73,16 +75,14 @@ var regNamesAMD64 = []string{
"X15",
}
-var out io.Writer
-
-var arches = map[string]func(){
+var arches = map[string]func(g *gen){
"386": gen386,
"amd64": genAMD64,
"arm": genARM,
"arm64": genARM64,
"loong64": genLoong64,
- "mips64x": func() { genMIPS(true) },
- "mipsx": func() { genMIPS(false) },
+ "mips64x": func(g *gen) { genMIPS(g, true) },
+ "mipsx": func(g *gen) { genMIPS(g, false) },
"ppc64x": genPPC64,
"riscv64": genRISCV64,
"s390x": genS390X,
@@ -93,53 +93,100 @@ var beLe = map[string]bool{"mips64x": true, "mipsx": true, "ppc64x": true}
func main() {
flag.Parse()
if flag.NArg() > 0 {
- out = os.Stdout
for _, arch := range flag.Args() {
- gen, ok := arches[arch]
+ genFn, ok := arches[arch]
if !ok {
log.Fatalf("unknown arch %s", arch)
}
- header(arch)
- gen()
+ g := gen{os.Stdout, arch}
+ g.asmHeader()
+ genFn(&g)
}
return
}
- for arch, gen := range arches {
+ for arch, genFn := range arches {
f, err := os.Create(fmt.Sprintf("preempt_%s.s", arch))
if err != nil {
log.Fatal(err)
}
- out = f
- header(arch)
- gen()
+ g := gen{f, arch}
+ g.asmHeader()
+ genFn(&g)
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
}
-func header(arch string) {
- fmt.Fprintf(out, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n")
- if beLe[arch] {
- base := arch[:len(arch)-1]
- fmt.Fprintf(out, "//go:build %s || %sle\n\n", base, base)
+type gen struct {
+ w io.Writer
+ goarch string
+}
+
+func (g *gen) commonHeader() {
+ fmt.Fprintf(g.w, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n")
+ if beLe[g.goarch] {
+ base := g.goarch[:len(g.goarch)-1]
+ fmt.Fprintf(g.w, "//go:build %s || %sle\n\n", base, base)
}
- fmt.Fprintf(out, "#include \"go_asm.h\"\n")
- if arch == "amd64" {
- fmt.Fprintf(out, "#include \"asm_amd64.h\"\n")
+}
+
+func (g *gen) asmHeader() {
+ g.commonHeader()
+ fmt.Fprintf(g.w, "#include \"go_asm.h\"\n")
+ if g.goarch == "amd64" {
+ fmt.Fprintf(g.w, "#include \"go_tls.h\"\n")
+ fmt.Fprintf(g.w, "#include \"asm_amd64.h\"\n")
}
- fmt.Fprintf(out, "#include \"textflag.h\"\n\n")
- fmt.Fprintf(out, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n")
+ fmt.Fprintf(g.w, "#include \"textflag.h\"\n\n")
+ fmt.Fprintf(g.w, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n")
}
-func p(f string, args ...any) {
+func (g *gen) p(f string, args ...any) {
fmted := fmt.Sprintf(f, args...)
- fmt.Fprintf(out, "\t%s\n", strings.ReplaceAll(fmted, "\n", "\n\t"))
+ fmt.Fprintf(g.w, "\t%s\n", strings.ReplaceAll(fmted, "\n", "\n\t"))
}
-func label(l string) {
- fmt.Fprintf(out, "%s\n", l)
+func (g *gen) label(l string) {
+ fmt.Fprintf(g.w, "%s\n", l)
+}
+
+// writeXRegs writes an architecture xregs file.
+func writeXRegs(arch string, l *layout) {
+ var code bytes.Buffer
+ g := gen{&code, arch}
+ g.commonHeader()
+ fmt.Fprintf(g.w, `
+package runtime
+
+type xRegs struct {
+`)
+ pos := 0
+ for _, reg := range l.regs {
+ if reg.pos != pos {
+ log.Fatalf("padding not implemented")
+ }
+ typ := fmt.Sprintf("[%d]byte", reg.size)
+ switch {
+ case reg.size == 4 && reg.pos%4 == 0:
+ typ = "uint32"
+ case reg.size == 8 && reg.pos%8 == 0:
+ typ = "uint64"
+ }
+ fmt.Fprintf(g.w, "\t%s %s\n", reg.reg, typ)
+ pos += reg.size
+ }
+ fmt.Fprintf(g.w, "}\n")
+
+ path := fmt.Sprintf("preempt_%s.go", arch)
+ b, err := format.Source(code.Bytes())
+ if err != nil {
+ log.Fatalf("formatting %s: %s", path, err)
+ }
+ if err := os.WriteFile(path, b, 0666); err != nil {
+ log.Fatal(err)
+ }
}
type layout struct {
@@ -149,7 +196,7 @@ type layout struct {
}
type regPos struct {
- pos int
+ pos, size int
saveOp string
restoreOp string
@@ -162,42 +209,44 @@ type regPos struct {
}
func (l *layout) add(op, reg string, size int) {
- l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack})
+ l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack, size: size})
l.stack += size
}
func (l *layout) add2(sop, rop, reg string, size int) {
- l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack})
+ l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack, size: size})
l.stack += size
}
func (l *layout) addSpecial(save, restore string, size int) {
- l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack})
+ l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack, size: size})
l.stack += size
}
-func (l *layout) save() {
+func (l *layout) save(g *gen) {
for _, reg := range l.regs {
if reg.save != "" {
- p(reg.save, reg.pos)
+ g.p(reg.save, reg.pos)
} else {
- p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp)
+ g.p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp)
}
}
}
-func (l *layout) restore() {
+func (l *layout) restore(g *gen) {
for i := len(l.regs) - 1; i >= 0; i-- {
reg := l.regs[i]
if reg.restore != "" {
- p(reg.restore, reg.pos)
+ g.p(reg.restore, reg.pos)
} else {
- p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg)
+ g.p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg)
}
}
}
-func gen386() {
+func gen386(g *gen) {
+ p := g.p
+
p("PUSHFL")
// Save general purpose registers.
var l = layout{sp: "SP"}
@@ -218,22 +267,26 @@ func gen386() {
p("ADJSP $%d", lSSE.stack)
p("NOP SP")
- l.save()
+ l.save(g)
p("#ifndef %s", softfloat)
- lSSE.save()
+ lSSE.save(g)
p("#endif")
p("CALL ·asyncPreempt2(SB)")
p("#ifndef %s", softfloat)
- lSSE.restore()
+ lSSE.restore(g)
p("#endif")
- l.restore()
+ l.restore(g)
p("ADJSP $%d", -lSSE.stack)
p("POPFL")
p("RET")
}
-func genAMD64() {
+func genAMD64(g *gen) {
+ const xReg = "AX" // *xRegState
+
+ p, label := g.p, g.label
+
// Assign stack offsets.
var l = layout{sp: "SP"}
for _, reg := range regNamesAMD64 {
@@ -244,37 +297,121 @@ func genAMD64() {
l.add("MOVQ", reg, 8)
}
}
- lSSE := layout{stack: l.stack, sp: "SP"}
- for _, reg := range regNamesAMD64 {
- if strings.HasPrefix(reg, "X") {
- lSSE.add("MOVUPS", reg, 16)
+ // Create layouts for X, Y, and Z registers.
+ const (
+ numXRegs = 16
+ numZRegs = 16 // TODO: If we start using upper registers, change to 32
+ numKRegs = 8
+ )
+ lZRegs := layout{sp: xReg} // Non-GP registers
+ lXRegs, lYRegs := lZRegs, lZRegs
+ for i := range numZRegs {
+ lZRegs.add("VMOVDQU64", fmt.Sprintf("Z%d", i), 512/8)
+ if i < numXRegs {
+ // Use SSE-only instructions for X registers.
+ lXRegs.add("MOVUPS", fmt.Sprintf("X%d", i), 128/8)
+ lYRegs.add("VMOVDQU", fmt.Sprintf("Y%d", i), 256/8)
}
}
-
- // TODO: MXCSR register?
+ for i := range numKRegs {
+ lZRegs.add("KMOVQ", fmt.Sprintf("K%d", i), 8)
+ }
+ // The Z layout is the most general, so we line up the others with that one.
+ // We don't have to do this, but it results in a nice Go type. If we split
+ // this into multiple types, we probably should stop doing this.
+ for i := range lXRegs.regs {
+ lXRegs.regs[i].pos = lZRegs.regs[i].pos
+ lYRegs.regs[i].pos = lZRegs.regs[i].pos
+ }
+ writeXRegs(g.goarch, &lZRegs)
p("PUSHQ BP")
p("MOVQ SP, BP")
p("// Save flags before clobbering them")
p("PUSHFQ")
p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP")
- p("ADJSP $%d", lSSE.stack)
+ p("ADJSP $%d", l.stack)
p("// But vet doesn't know ADJSP, so suppress vet stack checking")
p("NOP SP")
- l.save()
+ p("// Save GPs")
+ l.save(g)
+
+ // In general, the limitations on asynchronous preemption mean we only
+ // preempt in ABIInternal code. However, there's at least one exception to
+ // this: when we're in an open-coded transition between an ABIInternal
+ // function and an ABI0 call. We could more carefully arrange unsafe points
+ // to avoid ever landing in ABI0, but it's easy to just make this code not
+ // sensitive to the ABI we're preempting. The CALL to asyncPreempt2 will
+ // ensure we're in ABIInternal register state.
+ p("// Save extended register state to p.xRegs.scratch")
+ p("// Don't make assumptions about ABI register state. See mkpreempt.go")
+ p("get_tls(CX)")
+ p("MOVQ g(CX), R14")
+ p("MOVQ g_m(R14), %s", xReg)
+ p("MOVQ m_p(%s), %s", xReg, xReg)
+ p("LEAQ (p_xRegs+xRegPerP_scratch)(%s), %s", xReg, xReg)
+
+ // Which registers do we need to save?
+ p("#ifdef GOEXPERIMENT_simd")
+ p("CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1")
+ p("JE saveAVX512")
+ p("CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1")
+ p("JE saveAVX2")
+ p("#endif")
+
+ // No features. Assume only SSE.
+ label("saveSSE:")
+ lXRegs.save(g)
+ p("JMP preempt")
+
+ label("saveAVX2:")
+ lYRegs.save(g)
+ p("JMP preempt")
- lSSE.save()
+ label("saveAVX512:")
+ lZRegs.save(g)
+ p("JMP preempt")
+
+ label("preempt:")
p("CALL ·asyncPreempt2(SB)")
- lSSE.restore()
- l.restore()
- p("ADJSP $%d", -lSSE.stack)
+
+ p("// Restore non-GPs from *p.xRegs.cache")
+ p("MOVQ g_m(R14), %s", xReg)
+ p("MOVQ m_p(%s), %s", xReg, xReg)
+ p("MOVQ (p_xRegs+xRegPerP_cache)(%s), %s", xReg, xReg)
+
+ p("#ifdef GOEXPERIMENT_simd")
+ p("CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1")
+ p("JE restoreAVX512")
+ p("CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1")
+ p("JE restoreAVX2")
+ p("#endif")
+
+ label("restoreSSE:")
+ lXRegs.restore(g)
+ p("JMP restoreGPs")
+
+ label("restoreAVX2:")
+ lYRegs.restore(g)
+ p("JMP restoreGPs")
+
+ label("restoreAVX512:")
+ lZRegs.restore(g)
+ p("JMP restoreGPs")
+
+ label("restoreGPs:")
+ p("// Restore GPs")
+ l.restore(g)
+ p("ADJSP $%d", -l.stack)
p("POPFQ")
p("POPQ BP")
p("RET")
}
-func genARM() {
+func genARM(g *gen) {
+ p := g.p
+
// Add integer registers R0-R12.
// R13 (SP), R14 (LR), R15 (PC) are special and not saved here.
var l = layout{sp: "R13", stack: 4} // add LR slot
@@ -303,22 +440,23 @@ func genARM() {
}
p("MOVW.W R14, -%d(R13)", lfp.stack) // allocate frame, save LR
- l.save()
+ l.save(g)
p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0.
- lfp.save()
- label("nofp:")
+ lfp.save(g)
+ g.label("nofp:")
p("CALL ·asyncPreempt2(SB)")
p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp2") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0.
- lfp.restore()
- label("nofp2:")
- l.restore()
+ lfp.restore(g)
+ g.label("nofp2:")
+ l.restore(g)
p("MOVW %d(R13), R14", lfp.stack) // sigctxt.pushCall pushes LR on stack, restore it
p("MOVW.P %d(R13), R15", lfp.stack+4) // load PC, pop frame (including the space pushed by sigctxt.pushCall)
p("UNDEF") // shouldn't get here
}
-func genARM64() {
+func genARM64(g *gen) {
+ p := g.p
// Add integer registers R0-R26
// R27 (REGTMP), R28 (g), R29 (FP), R30 (LR), R31 (SP) are special
// and not saved here.
@@ -362,9 +500,9 @@ func genARM64() {
p("MOVD R30, (RSP)")
p("#endif")
- l.save()
+ l.save(g)
p("CALL ·asyncPreempt2(SB)")
- l.restore()
+ l.restore(g)
p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
p("MOVD -8(RSP), R29") // restore frame pointer
@@ -373,7 +511,9 @@ func genARM64() {
p("RET (R27)")
}
-func genMIPS(_64bit bool) {
+func genMIPS(g *gen, _64bit bool) {
+ p := g.p
+
mov := "MOVW"
movf := "MOVF"
add := "ADD"
@@ -428,15 +568,15 @@ func genMIPS(_64bit bool) {
p(mov+" R31, -%d(R29)", lfp.stack)
p(sub+" $%d, R29", lfp.stack)
- l.save()
+ l.save(g)
p("#ifndef %s", softfloat)
- lfp.save()
+ lfp.save(g)
p("#endif")
p("CALL ·asyncPreempt2(SB)")
p("#ifndef %s", softfloat)
- lfp.restore()
+ lfp.restore(g)
p("#endif")
- l.restore()
+ l.restore(g)
p(mov+" %d(R29), R31", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
p(mov + " (R29), R23") // load PC to REGTMP
@@ -444,7 +584,9 @@ func genMIPS(_64bit bool) {
p("JMP (R23)")
}
-func genLoong64() {
+func genLoong64(g *gen) {
+ p := g.p
+
mov := "MOVV"
movf := "MOVD"
add := "ADDV"
@@ -478,9 +620,9 @@ func genLoong64() {
p(mov+" R1, -%d(R3)", l.stack)
p(sub+" $%d, R3", l.stack)
- l.save()
+ l.save(g)
p("CALL ·asyncPreempt2(SB)")
- l.restore()
+ l.restore(g)
p(mov+" %d(R3), R1", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
p(mov + " (R3), R30") // load PC to REGTMP
@@ -488,7 +630,9 @@ func genLoong64() {
p("JMP (R30)")
}
-func genPPC64() {
+func genPPC64(g *gen) {
+ p := g.p
+
// Add integer registers R3-R29
// R0 (zero), R1 (SP), R30 (g) are special and not saved here.
// R2 (TOC pointer in PIC mode), R12 (function entry address in PIC mode) have been saved in sigctxt.pushCall.
@@ -528,9 +672,9 @@ func genPPC64() {
p("MOVD LR, R31")
p("MOVDU R31, -%d(R1)", l.stack) // allocate frame, save PC of interrupted instruction (in LR)
- l.save()
+ l.save(g)
p("CALL ·asyncPreempt2(SB)")
- l.restore()
+ l.restore(g)
p("MOVD %d(R1), R31", l.stack) // sigctxt.pushCall has pushed LR, R2, R12 (at interrupt) on stack, restore them
p("MOVD R31, LR")
@@ -543,7 +687,9 @@ func genPPC64() {
p("JMP (CTR)")
}
-func genRISCV64() {
+func genRISCV64(g *gen) {
+ p := g.p
+
// X0 (zero), X1 (LR), X2 (SP), X3 (GP), X4 (TP), X27 (g), X31 (TMP) are special.
var l = layout{sp: "X2", stack: 8}
@@ -564,16 +710,18 @@ func genRISCV64() {
p("MOV X1, -%d(X2)", l.stack)
p("SUB $%d, X2", l.stack)
- l.save()
+ l.save(g)
p("CALL ·asyncPreempt2(SB)")
- l.restore()
+ l.restore(g)
p("MOV %d(X2), X1", l.stack)
p("MOV (X2), X31")
p("ADD $%d, X2", l.stack+8)
p("JMP (X31)")
}
-func genS390X() {
+func genS390X(g *gen) {
+ p := g.p
+
// Add integer registers R0-R12
// R13 (g), R14 (LR), R15 (SP) are special, and not saved here.
// Saving R10 (REGTMP) is not necessary, but it is saved anyway.
@@ -594,9 +742,9 @@ func genS390X() {
p("ADD $-%d, R15", l.stack)
p("MOVW R10, 8(R15)") // save flags
- l.save()
+ l.save(g)
p("CALL ·asyncPreempt2(SB)")
- l.restore()
+ l.restore(g)
p("MOVD %d(R15), R14", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
p("ADD $%d, R15", l.stack+8) // pop frame (including the space pushed by sigctxt.pushCall)
@@ -606,12 +754,14 @@ func genS390X() {
p("JMP (R10)")
}
-func genWasm() {
+func genWasm(g *gen) {
+ p := g.p
p("// No async preemption on wasm")
p("UNDEF")
}
-func notImplemented() {
+func notImplemented(g *gen) {
+ p := g.p
p("// Not implemented yet")
p("JMP ·abort(SB)")
}
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 8f9ab4dd47345b..8c91c9435abd18 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -103,9 +103,8 @@ func panicCheck2(err string) {
// these (they always look like they're called from the runtime).
// Hence, for these, we just check for clearly bad runtime conditions.
//
-// The panic{Index,Slice} functions are implemented in assembly and tail call
-// to the goPanic{Index,Slice} functions below. This is done so we can use
-// a space-minimal register calling convention.
+// The goPanic{Index,Slice} functions are only used by wasm. All the other architectures
+// use panic{Bounds,Extend} in assembly, which then call to panicBounds{64,32,32X}.
// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
//
@@ -205,28 +204,10 @@ func goPanicSliceConvert(x int, y int) {
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsConvert})
}
-// Implemented in assembly, as they take arguments in registers.
-// Declared here to mark them as ABIInternal.
-func panicIndex(x int, y int)
-func panicIndexU(x uint, y int)
-func panicSliceAlen(x int, y int)
-func panicSliceAlenU(x uint, y int)
-func panicSliceAcap(x int, y int)
-func panicSliceAcapU(x uint, y int)
-func panicSliceB(x int, y int)
-func panicSliceBU(x uint, y int)
-func panicSlice3Alen(x int, y int)
-func panicSlice3AlenU(x uint, y int)
-func panicSlice3Acap(x int, y int)
-func panicSlice3AcapU(x uint, y int)
-func panicSlice3B(x int, y int)
-func panicSlice3BU(x uint, y int)
-func panicSlice3C(x int, y int)
-func panicSlice3CU(x uint, y int)
-func panicSliceConvert(x int, y int)
-
+// Implemented in assembly. Declared here to mark them as ABIInternal.
func panicBounds() // in asm_GOARCH.s files, called from generated code
func panicExtend() // in asm_GOARCH.s files, called from generated code (on 32-bit archs)
+
func panicBounds64(pc uintptr, regs *[16]int64) { // called from panicBounds on 64-bit archs
f := findfunc(pc)
v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index 5f83f37b5078f2..99c5155806da63 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -635,10 +635,6 @@ func TestCPUProfileWithFork(t *testing.T) {
// Use smaller size for Android to avoid crash.
heap = 100 << 20
}
- if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
- // Use smaller heap for Windows/ARM to avoid crash.
- heap = 100 << 20
- }
if testing.Short() {
heap = 100 << 20
}
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
index c41c3558359c0c..22727df74eead2 100644
--- a/src/runtime/preempt.go
+++ b/src/runtime/preempt.go
@@ -292,21 +292,52 @@ func canPreemptM(mp *m) bool {
// asyncPreempt saves all user registers and calls asyncPreempt2.
//
-// When stack scanning encounters an asyncPreempt frame, it scans that
+// It saves GP registers (anything that might contain a pointer) to the G stack.
+// Hence, when stack scanning encounters an asyncPreempt frame, it scans that
// frame and its parent frame conservatively.
//
+// On some platforms, it saves large additional scalar-only register state such
+// as vector registers to an "extended register state" on the P.
+//
// asyncPreempt is implemented in assembly.
func asyncPreempt()
+// asyncPreempt2 is the Go continuation of asyncPreempt.
+//
+// It must be deeply nosplit because there's untyped data on the stack from
+// asyncPreempt.
+//
+// It must not have any write barriers because we need to limit the amount of
+// stack it uses.
+//
//go:nosplit
+//go:nowritebarrierrec
func asyncPreempt2() {
+ // We can't grow the stack with untyped data from asyncPreempt, so switch to
+ // the system stack right away.
+ mcall(func(gp *g) {
+ gp.asyncSafePoint = true
+
+ // Move the extended register state from the P to the G. We do this now that
+ // we're on the system stack to avoid stack splits.
+ xRegSave(gp)
+
+ if gp.preemptStop {
+ preemptPark(gp)
+ } else {
+ gopreempt_m(gp)
+ }
+ // The above functions never return.
+ })
+
+ // Do not grow the stack below here!
+
gp := getg()
- gp.asyncSafePoint = true
- if gp.preemptStop {
- mcall(preemptPark)
- } else {
- mcall(gopreempt_m)
- }
+
+ // Put the extended register state back on the M so resumption can find it.
+ // We can't do this in asyncPreemptM because the park calls never return.
+ xRegRestore(gp)
+
gp.asyncSafePoint = false
}
@@ -319,19 +350,13 @@ func init() {
total := funcMaxSPDelta(f)
f = findfunc(abi.FuncPCABIInternal(asyncPreempt2))
total += funcMaxSPDelta(f)
+ f = findfunc(abi.FuncPCABIInternal(xRegRestore))
+ total += funcMaxSPDelta(f)
// Add some overhead for return PCs, etc.
asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize
if asyncPreemptStack > stackNosplit {
- // We need more than the nosplit limit. This isn't
- // unsafe, but it may limit asynchronous preemption.
- //
- // This may be a problem if we start using more
- // registers. In that case, we should store registers
- // in a context object. If we pre-allocate one per P,
- // asyncPreempt can spill just a few registers to the
- // stack, then grab its context object and spill into
- // it. When it enters the runtime, it would allocate a
- // new context for the P.
+ // We need more than the nosplit limit. This isn't unsafe, but it may
+ // limit asynchronous preemption. Consider moving state into xRegState.
print("runtime: asyncPreemptStack=", asyncPreemptStack, "\n")
throw("async stack too large")
}
diff --git a/src/runtime/preempt_amd64.go b/src/runtime/preempt_amd64.go
new file mode 100644
index 00000000000000..88c0ddd34ade72
--- /dev/null
+++ b/src/runtime/preempt_amd64.go
@@ -0,0 +1,30 @@
+// Code generated by mkpreempt.go; DO NOT EDIT.
+
+package runtime
+
+type xRegs struct {
+ Z0 [64]byte
+ Z1 [64]byte
+ Z2 [64]byte
+ Z3 [64]byte
+ Z4 [64]byte
+ Z5 [64]byte
+ Z6 [64]byte
+ Z7 [64]byte
+ Z8 [64]byte
+ Z9 [64]byte
+ Z10 [64]byte
+ Z11 [64]byte
+ Z12 [64]byte
+ Z13 [64]byte
+ Z14 [64]byte
+ Z15 [64]byte
+ K0 uint64
+ K1 uint64
+ K2 uint64
+ K3 uint64
+ K4 uint64
+ K5 uint64
+ K6 uint64
+ K7 uint64
+}
diff --git a/src/runtime/preempt_amd64.s b/src/runtime/preempt_amd64.s
index 8e3ed0d7c59dce..c35de7f3b75726 100644
--- a/src/runtime/preempt_amd64.s
+++ b/src/runtime/preempt_amd64.s
@@ -1,6 +1,7 @@
// Code generated by mkpreempt.go; DO NOT EDIT.
#include "go_asm.h"
+#include "go_tls.h"
#include "asm_amd64.h"
#include "textflag.h"
@@ -10,9 +11,10 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
// Save flags before clobbering them
PUSHFQ
// obj doesn't understand ADD/SUB on SP, but does understand ADJSP
- ADJSP $368
+ ADJSP $112
// But vet doesn't know ADJSP, so suppress vet stack checking
NOP SP
+ // Save GPs
MOVQ AX, 0(SP)
MOVQ CX, 8(SP)
MOVQ DX, 16(SP)
@@ -27,39 +29,157 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVQ R13, 88(SP)
MOVQ R14, 96(SP)
MOVQ R15, 104(SP)
- MOVUPS X0, 112(SP)
- MOVUPS X1, 128(SP)
- MOVUPS X2, 144(SP)
- MOVUPS X3, 160(SP)
- MOVUPS X4, 176(SP)
- MOVUPS X5, 192(SP)
- MOVUPS X6, 208(SP)
- MOVUPS X7, 224(SP)
- MOVUPS X8, 240(SP)
- MOVUPS X9, 256(SP)
- MOVUPS X10, 272(SP)
- MOVUPS X11, 288(SP)
- MOVUPS X12, 304(SP)
- MOVUPS X13, 320(SP)
- MOVUPS X14, 336(SP)
- MOVUPS X15, 352(SP)
+ // Save extended register state to p.xRegs.scratch
+ // Don't make assumptions about ABI register state. See mkpreempt.go
+ get_tls(CX)
+ MOVQ g(CX), R14
+ MOVQ g_m(R14), AX
+ MOVQ m_p(AX), AX
+ LEAQ (p_xRegs+xRegPerP_scratch)(AX), AX
+ #ifdef GOEXPERIMENT_simd
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1
+ JE saveAVX512
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ JE saveAVX2
+ #endif
+saveSSE:
+ MOVUPS X0, 0(AX)
+ MOVUPS X1, 64(AX)
+ MOVUPS X2, 128(AX)
+ MOVUPS X3, 192(AX)
+ MOVUPS X4, 256(AX)
+ MOVUPS X5, 320(AX)
+ MOVUPS X6, 384(AX)
+ MOVUPS X7, 448(AX)
+ MOVUPS X8, 512(AX)
+ MOVUPS X9, 576(AX)
+ MOVUPS X10, 640(AX)
+ MOVUPS X11, 704(AX)
+ MOVUPS X12, 768(AX)
+ MOVUPS X13, 832(AX)
+ MOVUPS X14, 896(AX)
+ MOVUPS X15, 960(AX)
+ JMP preempt
+saveAVX2:
+ VMOVDQU Y0, 0(AX)
+ VMOVDQU Y1, 64(AX)
+ VMOVDQU Y2, 128(AX)
+ VMOVDQU Y3, 192(AX)
+ VMOVDQU Y4, 256(AX)
+ VMOVDQU Y5, 320(AX)
+ VMOVDQU Y6, 384(AX)
+ VMOVDQU Y7, 448(AX)
+ VMOVDQU Y8, 512(AX)
+ VMOVDQU Y9, 576(AX)
+ VMOVDQU Y10, 640(AX)
+ VMOVDQU Y11, 704(AX)
+ VMOVDQU Y12, 768(AX)
+ VMOVDQU Y13, 832(AX)
+ VMOVDQU Y14, 896(AX)
+ VMOVDQU Y15, 960(AX)
+ JMP preempt
+saveAVX512:
+ VMOVDQU64 Z0, 0(AX)
+ VMOVDQU64 Z1, 64(AX)
+ VMOVDQU64 Z2, 128(AX)
+ VMOVDQU64 Z3, 192(AX)
+ VMOVDQU64 Z4, 256(AX)
+ VMOVDQU64 Z5, 320(AX)
+ VMOVDQU64 Z6, 384(AX)
+ VMOVDQU64 Z7, 448(AX)
+ VMOVDQU64 Z8, 512(AX)
+ VMOVDQU64 Z9, 576(AX)
+ VMOVDQU64 Z10, 640(AX)
+ VMOVDQU64 Z11, 704(AX)
+ VMOVDQU64 Z12, 768(AX)
+ VMOVDQU64 Z13, 832(AX)
+ VMOVDQU64 Z14, 896(AX)
+ VMOVDQU64 Z15, 960(AX)
+ KMOVQ K0, 1024(AX)
+ KMOVQ K1, 1032(AX)
+ KMOVQ K2, 1040(AX)
+ KMOVQ K3, 1048(AX)
+ KMOVQ K4, 1056(AX)
+ KMOVQ K5, 1064(AX)
+ KMOVQ K6, 1072(AX)
+ KMOVQ K7, 1080(AX)
+ JMP preempt
+preempt:
CALL ·asyncPreempt2(SB)
- MOVUPS 352(SP), X15
- MOVUPS 336(SP), X14
- MOVUPS 320(SP), X13
- MOVUPS 304(SP), X12
- MOVUPS 288(SP), X11
- MOVUPS 272(SP), X10
- MOVUPS 256(SP), X9
- MOVUPS 240(SP), X8
- MOVUPS 224(SP), X7
- MOVUPS 208(SP), X6
- MOVUPS 192(SP), X5
- MOVUPS 176(SP), X4
- MOVUPS 160(SP), X3
- MOVUPS 144(SP), X2
- MOVUPS 128(SP), X1
- MOVUPS 112(SP), X0
+ // Restore non-GPs from *p.xRegs.cache
+ MOVQ g_m(R14), AX
+ MOVQ m_p(AX), AX
+ MOVQ (p_xRegs+xRegPerP_cache)(AX), AX
+ #ifdef GOEXPERIMENT_simd
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1
+ JE restoreAVX512
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ JE restoreAVX2
+ #endif
+restoreSSE:
+ MOVUPS 960(AX), X15
+ MOVUPS 896(AX), X14
+ MOVUPS 832(AX), X13
+ MOVUPS 768(AX), X12
+ MOVUPS 704(AX), X11
+ MOVUPS 640(AX), X10
+ MOVUPS 576(AX), X9
+ MOVUPS 512(AX), X8
+ MOVUPS 448(AX), X7
+ MOVUPS 384(AX), X6
+ MOVUPS 320(AX), X5
+ MOVUPS 256(AX), X4
+ MOVUPS 192(AX), X3
+ MOVUPS 128(AX), X2
+ MOVUPS 64(AX), X1
+ MOVUPS 0(AX), X0
+ JMP restoreGPs
+restoreAVX2:
+ VMOVDQU 960(AX), Y15
+ VMOVDQU 896(AX), Y14
+ VMOVDQU 832(AX), Y13
+ VMOVDQU 768(AX), Y12
+ VMOVDQU 704(AX), Y11
+ VMOVDQU 640(AX), Y10
+ VMOVDQU 576(AX), Y9
+ VMOVDQU 512(AX), Y8
+ VMOVDQU 448(AX), Y7
+ VMOVDQU 384(AX), Y6
+ VMOVDQU 320(AX), Y5
+ VMOVDQU 256(AX), Y4
+ VMOVDQU 192(AX), Y3
+ VMOVDQU 128(AX), Y2
+ VMOVDQU 64(AX), Y1
+ VMOVDQU 0(AX), Y0
+ JMP restoreGPs
+restoreAVX512:
+ KMOVQ 1080(AX), K7
+ KMOVQ 1072(AX), K6
+ KMOVQ 1064(AX), K5
+ KMOVQ 1056(AX), K4
+ KMOVQ 1048(AX), K3
+ KMOVQ 1040(AX), K2
+ KMOVQ 1032(AX), K1
+ KMOVQ 1024(AX), K0
+ VMOVDQU64 960(AX), Z15
+ VMOVDQU64 896(AX), Z14
+ VMOVDQU64 832(AX), Z13
+ VMOVDQU64 768(AX), Z12
+ VMOVDQU64 704(AX), Z11
+ VMOVDQU64 640(AX), Z10
+ VMOVDQU64 576(AX), Z9
+ VMOVDQU64 512(AX), Z8
+ VMOVDQU64 448(AX), Z7
+ VMOVDQU64 384(AX), Z6
+ VMOVDQU64 320(AX), Z5
+ VMOVDQU64 256(AX), Z4
+ VMOVDQU64 192(AX), Z3
+ VMOVDQU64 128(AX), Z2
+ VMOVDQU64 64(AX), Z1
+ VMOVDQU64 0(AX), Z0
+ JMP restoreGPs
+restoreGPs:
+ // Restore GPs
MOVQ 104(SP), R15
MOVQ 96(SP), R14
MOVQ 88(SP), R13
@@ -74,7 +194,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVQ 16(SP), DX
MOVQ 8(SP), CX
MOVQ 0(SP), AX
- ADJSP $-368
+ ADJSP $-112
POPFQ
POPQ BP
RET
diff --git a/src/runtime/preempt_noxreg.go b/src/runtime/preempt_noxreg.go
new file mode 100644
index 00000000000000..dfe46559b5b723
--- /dev/null
+++ b/src/runtime/preempt_noxreg.go
@@ -0,0 +1,27 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64
+
+// This provides common support for architectures that DO NOT use extended
+// register state in asynchronous preemption.
+
+package runtime
+
+type xRegPerG struct{}
+
+type xRegPerP struct{}
+
+// xRegState is defined only so the build fails if we try to define a real
+// xRegState on a noxreg architecture.
+type xRegState struct{}
+
+func xRegInitAlloc() {}
+
+func xRegSave(gp *g) {}
+
+//go:nosplit
+func xRegRestore(gp *g) {}
+
+func (*xRegPerP) free() {}
diff --git a/src/runtime/preempt_xreg.go b/src/runtime/preempt_xreg.go
new file mode 100644
index 00000000000000..9e05455ddbb747
--- /dev/null
+++ b/src/runtime/preempt_xreg.go
@@ -0,0 +1,137 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64
+
+// This provides common support for architectures that use extended register
+// state in asynchronous preemption.
+//
+// While asynchronous preemption stores general-purpose (GP) registers on the
+// preempted goroutine's own stack, extended register state can be used to save
+// non-GP state off the stack. In particular, this is meant for large vector
+// register files. Currently, we assume this contains only scalar data, though
+// we could change this constraint by conservatively scanning this memory.
+//
+// For an architecture to support extended register state, it must provide a Go
+// definition of an xRegState type for storing the state, and its asyncPreempt
+// implementation must write this register state to p.xRegs.scratch.
+
+package runtime
+
+import (
+ "internal/runtime/sys"
+ "unsafe"
+)
+
+// xRegState is long-lived extended register state. It is allocated off-heap and
+// manually managed.
+type xRegState struct {
+ _ sys.NotInHeap // Allocated from xRegAlloc
+ regs xRegs
+}
+
+// xRegPerG stores extended register state while a goroutine is asynchronously
+// preempted. This is nil otherwise, so we can reuse a (likely small) pool of
+// xRegState objects.
+type xRegPerG struct {
+ state *xRegState
+}
+
+type xRegPerP struct {
+ // scratch temporary per-P space where [asyncPreempt] saves the register
+ // state before entering Go. It's quickly copied to per-G state.
+ scratch xRegs
+
+ // cache is a 1-element allocation cache of extended register state used by
+ // asynchronous preemption. On entry to preemption, this is used as a simple
+ // allocation cache. On exit from preemption, the G's xRegState is always
+ // stored here where it can be restored, and later either freed or reused
+ // for another preemption. On exit, this serves the dual purpose of
+ // delay-freeing the allocated xRegState until after we've definitely
+ // restored it.
+ cache *xRegState
+}
+
+// xRegAlloc allocates xRegState objects.
+var xRegAlloc struct {
+ lock mutex
+ alloc fixalloc
+}
+
+func xRegInitAlloc() {
+ lockInit(&xRegAlloc.lock, lockRankXRegAlloc)
+ xRegAlloc.alloc.init(unsafe.Sizeof(xRegState{}), nil, nil, &memstats.other_sys)
+}
+
+// xRegSave saves the extended register state on this P to gp.
+//
+// This must run on the system stack because it assumes the P won't change.
+//
+//go:systemstack
+func xRegSave(gp *g) {
+ if gp.xRegs.state != nil {
+ // Double preempt?
+ throw("gp.xRegState.p != nil on async preempt")
+ }
+
+ // Get the place to save the register state.
+ var dest *xRegState
+ pp := gp.m.p.ptr()
+ if pp.xRegs.cache != nil {
+ // Use the cached allocation.
+ dest = pp.xRegs.cache
+ pp.xRegs.cache = nil
+ } else {
+ // Allocate a new save block.
+ lock(&xRegAlloc.lock)
+ dest = (*xRegState)(xRegAlloc.alloc.alloc())
+ unlock(&xRegAlloc.lock)
+ }
+
+ // Copy state saved in the scratchpad to dest.
+ //
+ // If we ever need to save less state (e.g., avoid saving vector registers
+ // that aren't in use), we could have multiple allocation pools for
+ // different size states and copy only the registers we need.
+ dest.regs = pp.xRegs.scratch
+
+ // Save on the G.
+ gp.xRegs.state = dest
+}
+
+// xRegRestore prepares the extended register state on gp to be restored.
+//
+// It moves the state to gp.m.p.xRegs.cache where [asyncPreempt] expects to find
+// it. This means nothing else may use the cache between this call and the
+// return to asyncPreempt. This is not quite symmetric with [xRegSave], which
+// uses gp.m.p.xRegs.scratch. By using cache instead, we save a block copy.
+//
+// This is called with asyncPreempt on the stack and thus must not grow the
+// stack.
+//
+//go:nosplit
+func xRegRestore(gp *g) {
+ if gp.xRegs.state == nil {
+ throw("gp.xRegState.p == nil on return from async preempt")
+ }
+ // If the P has a block cached on it, free that so we can replace it.
+ pp := gp.m.p.ptr()
+ if pp.xRegs.cache != nil {
+ // Don't grow the G stack.
+ systemstack(func() {
+ pp.xRegs.free()
+ })
+ }
+ pp.xRegs.cache = gp.xRegs.state
+ gp.xRegs.state = nil
+}
+
+func (xRegs *xRegPerP) free() {
+ if xRegs.cache != nil {
+ lock(&xRegAlloc.lock)
+ xRegAlloc.alloc.free(unsafe.Pointer(xRegs.cache))
+ xRegs.cache = nil
+ unlock(&xRegAlloc.lock)
+ }
+}
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index ec66384a75fa1b..25d39d9ba389ad 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -5838,6 +5838,7 @@ func (pp *p) destroy() {
pp.gcAssistTime = 0
gcCleanups.queued += pp.cleanupsQueued
pp.cleanupsQueued = 0
+ pp.xRegs.free()
pp.status = _Pdead
}
diff --git a/src/runtime/race/README b/src/runtime/race/README
index def7bfec868308..4fc20efeb30965 100644
--- a/src/runtime/race/README
+++ b/src/runtime/race/README
@@ -4,14 +4,14 @@ the LLVM project (https://github.com/llvm/llvm-project/tree/main/compiler-rt).
To update the .syso files use golang.org/x/build/cmd/racebuild.
-internal/amd64v1/race_darwin.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+internal/amd64v1/race_darwin.syso built with LLVM 0398ad41bdf1ce5f74c80a74494bfe733fe3e214 and Go b2960e35804aafbbb0df9973f99b034bea8c150a.
internal/amd64v1/race_freebsd.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
internal/amd64v1/race_linux.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
internal/amd64v1/race_netbsd.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
internal/amd64v1/race_openbsd.syso built with LLVM fcf6ae2f070eba73074b6ec8d8281e54d29dbeeb and Go 8f2db14cd35bbd674cb2988a508306de6655e425.
internal/amd64v1/race_windows.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
internal/amd64v3/race_linux.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
-race_darwin_arm64.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
+race_darwin_arm64.syso built with LLVM 0398ad41bdf1ce5f74c80a74494bfe733fe3e214 and Go b2960e35804aafbbb0df9973f99b034bea8c150a.
race_linux_arm64.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
race_linux_loong64.syso built with LLVM 83fe85115da9dc25fa270d2ea8140113c8d49670 and Go 037112464b4439571b45536de9ebe4bc9e10ecb7.
race_linux_ppc64le.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5.
diff --git a/src/runtime/race/internal/amd64v1/race_darwin.syso b/src/runtime/race/internal/amd64v1/race_darwin.syso
index e92f4ce74533f7..eb9782e105a151 100644
Binary files a/src/runtime/race/internal/amd64v1/race_darwin.syso and b/src/runtime/race/internal/amd64v1/race_darwin.syso differ
diff --git a/src/runtime/race/race_darwin_amd64.go b/src/runtime/race/race_darwin_amd64.go
index 02d73f8d388d3c..ef92e41c2760cc 100644
--- a/src/runtime/race/race_darwin_amd64.go
+++ b/src/runtime/race/race_darwin_amd64.go
@@ -28,9 +28,6 @@ package race
//go:cgo_import_dynamic _dyld_get_shared_cache_uuid _dyld_get_shared_cache_uuid ""
//go:cgo_import_dynamic _dyld_image_count _dyld_image_count ""
//go:cgo_import_dynamic _exit _exit ""
-//go:cgo_import_dynamic _sanitizer_internal_memcpy _sanitizer_internal_memcpy ""
-//go:cgo_import_dynamic _sanitizer_internal_memmove _sanitizer_internal_memmove ""
-//go:cgo_import_dynamic _sanitizer_internal_memset _sanitizer_internal_memset ""
//go:cgo_import_dynamic abort abort ""
//go:cgo_import_dynamic arc4random_buf arc4random_buf ""
//go:cgo_import_dynamic close close ""
@@ -40,6 +37,7 @@ package race
//go:cgo_import_dynamic dyld_shared_cache_iterate_text dyld_shared_cache_iterate_text ""
//go:cgo_import_dynamic execve execve ""
//go:cgo_import_dynamic exit exit ""
+//go:cgo_import_dynamic fcntl fcntl ""
//go:cgo_import_dynamic fstat$INODE64 fstat$INODE64 ""
//go:cgo_import_dynamic ftruncate ftruncate ""
//go:cgo_import_dynamic getpid getpid ""
@@ -57,7 +55,6 @@ package race
//go:cgo_import_dynamic madvise madvise ""
//go:cgo_import_dynamic malloc_num_zones malloc_num_zones ""
//go:cgo_import_dynamic malloc_zones malloc_zones ""
-//go:cgo_import_dynamic memcpy memcpy ""
//go:cgo_import_dynamic memset_pattern16 memset_pattern16 ""
//go:cgo_import_dynamic mkdir mkdir ""
//go:cgo_import_dynamic mprotect mprotect ""
@@ -103,6 +100,3 @@ package race
//go:cgo_import_dynamic vm_region_recurse_64 vm_region_recurse_64 ""
//go:cgo_import_dynamic waitpid waitpid ""
//go:cgo_import_dynamic write write ""
-//go:cgo_import_dynamic memcpy memcpy ""
-//go:cgo_import_dynamic memmove memmove ""
-//go:cgo_import_dynamic memset memset ""
diff --git a/src/runtime/race/race_darwin_arm64.go b/src/runtime/race/race_darwin_arm64.go
index cb703a6dedd3de..97eab4d6840acd 100644
--- a/src/runtime/race/race_darwin_arm64.go
+++ b/src/runtime/race/race_darwin_arm64.go
@@ -27,9 +27,6 @@ package race
//go:cgo_import_dynamic _dyld_get_shared_cache_uuid _dyld_get_shared_cache_uuid ""
//go:cgo_import_dynamic _dyld_image_count _dyld_image_count ""
//go:cgo_import_dynamic _exit _exit ""
-//go:cgo_import_dynamic _sanitizer_internal_memcpy _sanitizer_internal_memcpy ""
-//go:cgo_import_dynamic _sanitizer_internal_memmove _sanitizer_internal_memmove ""
-//go:cgo_import_dynamic _sanitizer_internal_memset _sanitizer_internal_memset ""
//go:cgo_import_dynamic abort abort ""
//go:cgo_import_dynamic arc4random_buf arc4random_buf ""
//go:cgo_import_dynamic bzero bzero ""
@@ -40,6 +37,7 @@ package race
//go:cgo_import_dynamic dyld_shared_cache_iterate_text dyld_shared_cache_iterate_text ""
//go:cgo_import_dynamic execve execve ""
//go:cgo_import_dynamic exit exit ""
+//go:cgo_import_dynamic fcntl fcntl ""
//go:cgo_import_dynamic fstat fstat ""
//go:cgo_import_dynamic ftruncate ftruncate ""
//go:cgo_import_dynamic getpid getpid ""
@@ -57,7 +55,6 @@ package race
//go:cgo_import_dynamic madvise madvise ""
//go:cgo_import_dynamic malloc_num_zones malloc_num_zones ""
//go:cgo_import_dynamic malloc_zones malloc_zones ""
-//go:cgo_import_dynamic memcpy memcpy ""
//go:cgo_import_dynamic memset_pattern16 memset_pattern16 ""
//go:cgo_import_dynamic mkdir mkdir ""
//go:cgo_import_dynamic mprotect mprotect ""
@@ -103,6 +100,3 @@ package race
//go:cgo_import_dynamic vm_region_recurse_64 vm_region_recurse_64 ""
//go:cgo_import_dynamic waitpid waitpid ""
//go:cgo_import_dynamic write write ""
-//go:cgo_import_dynamic memcpy memcpy ""
-//go:cgo_import_dynamic memmove memmove ""
-//go:cgo_import_dynamic memset memset ""
diff --git a/src/runtime/race/race_darwin_arm64.syso b/src/runtime/race/race_darwin_arm64.syso
index 8d8c120717fe71..eb25805f000104 100644
Binary files a/src/runtime/race/race_darwin_arm64.syso and b/src/runtime/race/race_darwin_arm64.syso differ
diff --git a/src/runtime/rt0_windows_arm.s b/src/runtime/rt0_windows_arm.s
deleted file mode 100644
index c5787d0dee0034..00000000000000
--- a/src/runtime/rt0_windows_arm.s
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "go_asm.h"
-#include "go_tls.h"
-#include "textflag.h"
-
-// This is the entry point for the program from the
-// kernel for an ordinary -buildmode=exe program.
-TEXT _rt0_arm_windows(SB),NOSPLIT|NOFRAME,$0
- B ·rt0_go(SB)
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 29e9b8a7b999ae..b5d2dcefaded99 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -492,6 +492,10 @@ type g struct {
coroarg *coro // argument during coroutine transfers
bubble *synctestBubble
+ // xRegs stores the extended register state if this G has been
+ // asynchronously preempted.
+ xRegs xRegPerG
+
// Per-G tracer state.
trace gTraceState
@@ -760,6 +764,11 @@ type p struct {
// gcStopTime is the nanotime timestamp that this P last entered _Pgcstop.
gcStopTime int64
+ // xRegs is the per-P extended register state used by asynchronous
+ // preemption. This is an empty struct on platforms that don't use extended
+ // register state.
+ xRegs xRegPerP
+
// Padding is no longer needed. False sharing is now not a worry because p is large enough
// that its size class is an integer multiple of the cache line size (for any of our architectures).
}
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index 07778c8ebed7a8..f7628a0165b2ed 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -39,7 +39,7 @@ func enableWER() {
}
}
-// in sys_windows_386.s, sys_windows_amd64.s, sys_windows_arm.s, and sys_windows_arm64.s
+// in sys_windows_386.s, sys_windows_amd64.s, and sys_windows_arm64.s
func exceptiontramp()
func firstcontinuetramp()
func lastcontinuetramp()
@@ -64,10 +64,9 @@ func initExceptionHandler() {
//go:nosplit
func isAbort(r *context) bool {
pc := r.ip()
- if GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" {
+ if GOARCH == "386" || GOARCH == "amd64" {
// In the case of an abort, the exception IP is one byte after
- // the INT3 (this differs from UNIX OSes). Note that on ARM,
- // this means that the exception IP is no longer aligned.
+ // the INT3 (this differs from UNIX OSes).
pc--
}
return isAbortPC(pc)
diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go
index a5dc8aed3443bc..de859866a5adb2 100644
--- a/src/runtime/sizeof_test.go
+++ b/src/runtime/sizeof_test.go
@@ -15,13 +15,18 @@ import (
func TestSizeof(t *testing.T) {
const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+ const xreg = unsafe.Sizeof(runtime.XRegPerG{}) // Varies per architecture
var tests = []struct {
val any // type as a value
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {runtime.G{}, 280, 440}, // g, but exported for testing
- {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing
+ {runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing
+ {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing
+ }
+
+ if xreg > runtime.PtrSize {
+ t.Errorf("unsafe.Sizeof(xRegPerG) = %d, want <= %d", xreg, runtime.PtrSize)
}
for _, tt := range tests {
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
index b3c3d8c0d53864..e86ebf41c7d6ac 100644
--- a/src/runtime/syscall_windows.go
+++ b/src/runtime/syscall_windows.go
@@ -236,7 +236,7 @@ func callbackasm()
// and we want callback to arrive at
// correspondent call instruction instead of start of
// runtime.callbackasm.
-// On ARM, runtime.callbackasm is a series of mov and branch instructions.
+// On ARM64, runtime.callbackasm is a series of mov and branch instructions.
// R12 is loaded with the callback index. Each entry is two instructions,
// hence 8 bytes.
func callbackasmAddr(i int) uintptr {
@@ -246,8 +246,8 @@ func callbackasmAddr(i int) uintptr {
panic("unsupported architecture")
case "386", "amd64":
entrySize = 5
- case "arm", "arm64":
- // On ARM and ARM64, each entry is a MOV instruction
+ case "arm64":
+ // On ARM64, each entry is a MOV instruction
// followed by a branch instruction
entrySize = 8
}
diff --git a/src/runtime/testdata/testprog/checkptr.go b/src/runtime/testdata/testprog/checkptr.go
index 60e71e66d7f4f5..ff99fa8c7b702f 100644
--- a/src/runtime/testdata/testprog/checkptr.go
+++ b/src/runtime/testdata/testprog/checkptr.go
@@ -16,6 +16,7 @@ func init() {
register("CheckPtrAlignmentNilPtr", CheckPtrAlignmentNilPtr)
register("CheckPtrArithmetic", CheckPtrArithmetic)
register("CheckPtrArithmetic2", CheckPtrArithmetic2)
+ register("CheckPtrArithmeticUnsafeAdd", CheckPtrArithmeticUnsafeAdd)
register("CheckPtrSize", CheckPtrSize)
register("CheckPtrSmall", CheckPtrSmall)
register("CheckPtrSliceOK", CheckPtrSliceOK)
@@ -79,6 +80,11 @@ func CheckPtrArithmetic2() {
sink2 = unsafe.Pointer(uintptr(p) & ^one)
}
+func CheckPtrArithmeticUnsafeAdd() {
+ data := make([]byte, 128)
+ sink2 = (*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(data)), len(data)))
+}
+
func CheckPtrSize() {
p := new(int64)
sink2 = p
diff --git a/src/runtime/time_windows_arm.s b/src/runtime/time_windows_arm.s
deleted file mode 100644
index ff5686d9c41139..00000000000000
--- a/src/runtime/time_windows_arm.s
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !faketime
-
-#include "go_asm.h"
-#include "textflag.h"
-#include "time_windows.h"
-
-TEXT time·now(SB),NOSPLIT,$0-20
- MOVW $_INTERRUPT_TIME, R3
-loop:
- MOVW time_hi1(R3), R1
- DMB MB_ISH
- MOVW time_lo(R3), R0
- DMB MB_ISH
- MOVW time_hi2(R3), R2
- CMP R1, R2
- BNE loop
-
- // wintime = R1:R0, multiply by 100
- MOVW $100, R2
- MULLU R0, R2, (R4, R3) // R4:R3 = R1:R0 * R2
- MULA R1, R2, R4, R4
-
- // wintime*100 = R4:R3
- MOVW R3, mono+12(FP)
- MOVW R4, mono+16(FP)
-
- MOVW $_SYSTEM_TIME, R3
-wall:
- MOVW time_hi1(R3), R1
- DMB MB_ISH
- MOVW time_lo(R3), R0
- DMB MB_ISH
- MOVW time_hi2(R3), R2
- CMP R1, R2
- BNE wall
-
- // w = R1:R0 in 100ns untis
- // convert to Unix epoch (but still 100ns units)
- #define delta 116444736000000000
- SUB.S $(delta & 0xFFFFFFFF), R0
- SBC $(delta >> 32), R1
-
- // Convert to nSec
- MOVW $100, R2
- MULLU R0, R2, (R4, R3) // R4:R3 = R1:R0 * R2
- MULA R1, R2, R4, R4
- // w = R2:R1 in nSec
- MOVW R3, R1 // R4:R3 -> R2:R1
- MOVW R4, R2
-
- // multiply nanoseconds by reciprocal of 10**9 (scaled by 2**61)
- // to get seconds (96 bit scaled result)
- MOVW $0x89705f41, R3 // 2**61 * 10**-9
- MULLU R1,R3,(R6,R5) // R7:R6:R5 = R2:R1 * R3
- MOVW $0,R7
- MULALU R2,R3,(R7,R6)
-
- // unscale by discarding low 32 bits, shifting the rest by 29
- MOVW R6>>29,R6 // R7:R6 = (R7:R6:R5 >> 61)
- ORR R7<<3,R6
- MOVW R7>>29,R7
-
- // subtract (10**9 * sec) from nsec to get nanosecond remainder
- MOVW $1000000000, R5 // 10**9
- MULLU R6,R5,(R9,R8) // R9:R8 = R7:R6 * R5
- MULA R7,R5,R9,R9
- SUB.S R8,R1 // R2:R1 -= R9:R8
- SBC R9,R2
-
- // because reciprocal was a truncated repeating fraction, quotient
- // may be slightly too small -- adjust to make remainder < 10**9
- CMP R5,R1 // if remainder > 10**9
- SUB.HS R5,R1 // remainder -= 10**9
- ADD.HS $1,R6 // sec += 1
-
- MOVW R6,sec_lo+0(FP)
- MOVW R7,sec_hi+4(FP)
- MOVW R1,nsec+8(FP)
- RET
-
diff --git a/src/runtime/vdso_linux.go b/src/runtime/vdso_linux.go
index 72b17ce4ac4efa..c068eede777918 100644
--- a/src/runtime/vdso_linux.go
+++ b/src/runtime/vdso_linux.go
@@ -285,7 +285,7 @@ func vdsoauxv(tag, val uintptr) {
}
}
-// vdsoMarker reports whether PC is on the VDSO page.
+// inVDSOPage reports whether PC is on the VDSO page.
//
//go:nosplit
func inVDSOPage(pc uintptr) bool {
diff --git a/src/runtime/vdso_linux_test.go b/src/runtime/vdso_linux_test.go
new file mode 100644
index 00000000000000..313dd6e7185a6a
--- /dev/null
+++ b/src/runtime/vdso_linux_test.go
@@ -0,0 +1,52 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (386 || amd64 || arm || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x)
+
+package runtime_test
+
+import (
+ "runtime"
+ "testing"
+)
+
+// DT_GNU_HASH hash function.
+func gnuHash(s string) uint32 {
+ h := uint32(5381)
+ for _, r := range s {
+ h = (h << 5) + h + uint32(r)
+ }
+ return h
+}
+
+// DT_HASH hash function.
+func symHash(s string) uint32 {
+ var h, g uint32
+ for _, r := range s {
+ h = (h << 4) + uint32(r)
+ g = h & 0xf0000000
+ if g != 0 {
+ h ^= g >> 24
+ }
+ h &^= g
+ }
+ return h
+}
+
+func TestVDSOHash(t *testing.T) {
+ for _, sym := range runtime.VDSOSymbolKeys() {
+ name := sym.Name()
+ t.Run(name, func(t *testing.T) {
+ want := symHash(name)
+ if sym.SymHash() != want {
+ t.Errorf("SymHash got %#x want %#x", sym.SymHash(), want)
+ }
+
+ want = gnuHash(name)
+ if sym.GNUHash() != want {
+ t.Errorf("GNUHash got %#x want %#x", sym.GNUHash(), want)
+ }
+ })
+ }
+}
diff --git a/src/runtime/wincallback.go b/src/runtime/wincallback.go
index 14847db3fdbc78..7f0ac70bfd1ef3 100644
--- a/src/runtime/wincallback.go
+++ b/src/runtime/wincallback.go
@@ -47,34 +47,6 @@ TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
}
}
-func genasmArm() {
- var buf bytes.Buffer
-
- buf.WriteString(`// Code generated by wincallback.go using 'go generate'. DO NOT EDIT.
-
-// External code calls into callbackasm at an offset corresponding
-// to the callback index. Callbackasm is a table of MOV and B instructions.
-// The MOV instruction loads R12 with the callback index, and the
-// B instruction branches to callbackasm1.
-// callbackasm1 takes the callback index from R12 and
-// indexes into an array that stores information about each callback.
-// It then calls the Go implementation for that callback.
-#include "textflag.h"
-
-TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
-`)
- for i := 0; i < maxCallback; i++ {
- fmt.Fprintf(&buf, "\tMOVW\t$%d, R12\n", i)
- buf.WriteString("\tB\truntime·callbackasm1(SB)\n")
- }
-
- err := os.WriteFile("zcallback_windows_arm.s", buf.Bytes(), 0666)
- if err != nil {
- fmt.Fprintf(os.Stderr, "wincallback: %s\n", err)
- os.Exit(2)
- }
-}
-
func genasmArm64() {
var buf bytes.Buffer
@@ -121,7 +93,6 @@ const cb_max = %d // maximum number of windows callbacks allowed
func main() {
genasm386Amd64()
- genasmArm()
genasmArm64()
gengo()
}
diff --git a/src/runtime/zcallback_windows_arm.s b/src/runtime/zcallback_windows_arm.s
deleted file mode 100644
index f943d84cbfe5fd..00000000000000
--- a/src/runtime/zcallback_windows_arm.s
+++ /dev/null
@@ -1,4012 +0,0 @@
-// Code generated by wincallback.go using 'go generate'. DO NOT EDIT.
-
-// External code calls into callbackasm at an offset corresponding
-// to the callback index. Callbackasm is a table of MOV and B instructions.
-// The MOV instruction loads R12 with the callback index, and the
-// B instruction branches to callbackasm1.
-// callbackasm1 takes the callback index from R12 and
-// indexes into an array that stores information about each callback.
-// It then calls the Go implementation for that callback.
-#include "textflag.h"
-
-TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
- MOVW $0, R12
- B runtime·callbackasm1(SB)
- MOVW $1, R12
- B runtime·callbackasm1(SB)
- MOVW $2, R12
- B runtime·callbackasm1(SB)
- MOVW $3, R12
- B runtime·callbackasm1(SB)
- MOVW $4, R12
- B runtime·callbackasm1(SB)
- MOVW $5, R12
- B runtime·callbackasm1(SB)
- MOVW $6, R12
- B runtime·callbackasm1(SB)
- MOVW $7, R12
- B runtime·callbackasm1(SB)
- MOVW $8, R12
- B runtime·callbackasm1(SB)
- MOVW $9, R12
- B runtime·callbackasm1(SB)
- MOVW $10, R12
- B runtime·callbackasm1(SB)
- MOVW $11, R12
- B runtime·callbackasm1(SB)
- MOVW $12, R12
- B runtime·callbackasm1(SB)
- MOVW $13, R12
- B runtime·callbackasm1(SB)
- MOVW $14, R12
- B runtime·callbackasm1(SB)
- MOVW $15, R12
- B runtime·callbackasm1(SB)
- MOVW $16, R12
- B runtime·callbackasm1(SB)
- MOVW $17, R12
- B runtime·callbackasm1(SB)
- MOVW $18, R12
- B runtime·callbackasm1(SB)
- MOVW $19, R12
- B runtime·callbackasm1(SB)
- MOVW $20, R12
- B runtime·callbackasm1(SB)
- MOVW $21, R12
- B runtime·callbackasm1(SB)
- MOVW $22, R12
- B runtime·callbackasm1(SB)
- MOVW $23, R12
- B runtime·callbackasm1(SB)
- MOVW $24, R12
- B runtime·callbackasm1(SB)
- MOVW $25, R12
- B runtime·callbackasm1(SB)
- MOVW $26, R12
- B runtime·callbackasm1(SB)
- MOVW $27, R12
- B runtime·callbackasm1(SB)
- MOVW $28, R12
- B runtime·callbackasm1(SB)
- MOVW $29, R12
- B runtime·callbackasm1(SB)
- MOVW $30, R12
- B runtime·callbackasm1(SB)
- MOVW $31, R12
- B runtime·callbackasm1(SB)
- MOVW $32, R12
- B runtime·callbackasm1(SB)
- MOVW $33, R12
- B runtime·callbackasm1(SB)
- MOVW $34, R12
- B runtime·callbackasm1(SB)
- MOVW $35, R12
- B runtime·callbackasm1(SB)
- MOVW $36, R12
- B runtime·callbackasm1(SB)
- MOVW $37, R12
- B runtime·callbackasm1(SB)
- MOVW $38, R12
- B runtime·callbackasm1(SB)
- MOVW $39, R12
- B runtime·callbackasm1(SB)
- MOVW $40, R12
- B runtime·callbackasm1(SB)
- MOVW $41, R12
- B runtime·callbackasm1(SB)
- MOVW $42, R12
- B runtime·callbackasm1(SB)
- MOVW $43, R12
- B runtime·callbackasm1(SB)
- MOVW $44, R12
- B runtime·callbackasm1(SB)
- MOVW $45, R12
- B runtime·callbackasm1(SB)
- MOVW $46, R12
- B runtime·callbackasm1(SB)
- MOVW $47, R12
- B runtime·callbackasm1(SB)
- MOVW $48, R12
- B runtime·callbackasm1(SB)
- MOVW $49, R12
- B runtime·callbackasm1(SB)
- MOVW $50, R12
- B runtime·callbackasm1(SB)
- MOVW $51, R12
- B runtime·callbackasm1(SB)
- MOVW $52, R12
- B runtime·callbackasm1(SB)
- MOVW $53, R12
- B runtime·callbackasm1(SB)
- MOVW $54, R12
- B runtime·callbackasm1(SB)
- MOVW $55, R12
- B runtime·callbackasm1(SB)
- MOVW $56, R12
- B runtime·callbackasm1(SB)
- MOVW $57, R12
- B runtime·callbackasm1(SB)
- MOVW $58, R12
- B runtime·callbackasm1(SB)
- MOVW $59, R12
- B runtime·callbackasm1(SB)
- MOVW $60, R12
- B runtime·callbackasm1(SB)
- MOVW $61, R12
- B runtime·callbackasm1(SB)
- MOVW $62, R12
- B runtime·callbackasm1(SB)
- MOVW $63, R12
- B runtime·callbackasm1(SB)
- MOVW $64, R12
- B runtime·callbackasm1(SB)
- MOVW $65, R12
- B runtime·callbackasm1(SB)
- MOVW $66, R12
- B runtime·callbackasm1(SB)
- MOVW $67, R12
- B runtime·callbackasm1(SB)
- MOVW $68, R12
- B runtime·callbackasm1(SB)
- MOVW $69, R12
- B runtime·callbackasm1(SB)
- MOVW $70, R12
- B runtime·callbackasm1(SB)
- MOVW $71, R12
- B runtime·callbackasm1(SB)
- MOVW $72, R12
- B runtime·callbackasm1(SB)
- MOVW $73, R12
- B runtime·callbackasm1(SB)
- MOVW $74, R12
- B runtime·callbackasm1(SB)
- MOVW $75, R12
- B runtime·callbackasm1(SB)
- MOVW $76, R12
- B runtime·callbackasm1(SB)
- MOVW $77, R12
- B runtime·callbackasm1(SB)
- MOVW $78, R12
- B runtime·callbackasm1(SB)
- MOVW $79, R12
- B runtime·callbackasm1(SB)
- MOVW $80, R12
- B runtime·callbackasm1(SB)
- MOVW $81, R12
- B runtime·callbackasm1(SB)
- MOVW $82, R12
- B runtime·callbackasm1(SB)
- MOVW $83, R12
- B runtime·callbackasm1(SB)
- MOVW $84, R12
- B runtime·callbackasm1(SB)
- MOVW $85, R12
- B runtime·callbackasm1(SB)
- MOVW $86, R12
- B runtime·callbackasm1(SB)
- MOVW $87, R12
- B runtime·callbackasm1(SB)
- MOVW $88, R12
- B runtime·callbackasm1(SB)
- MOVW $89, R12
- B runtime·callbackasm1(SB)
- MOVW $90, R12
- B runtime·callbackasm1(SB)
- MOVW $91, R12
- B runtime·callbackasm1(SB)
- MOVW $92, R12
- B runtime·callbackasm1(SB)
- MOVW $93, R12
- B runtime·callbackasm1(SB)
- MOVW $94, R12
- B runtime·callbackasm1(SB)
- MOVW $95, R12
- B runtime·callbackasm1(SB)
- MOVW $96, R12
- B runtime·callbackasm1(SB)
- MOVW $97, R12
- B runtime·callbackasm1(SB)
- MOVW $98, R12
- B runtime·callbackasm1(SB)
- MOVW $99, R12
- B runtime·callbackasm1(SB)
- MOVW $100, R12
- B runtime·callbackasm1(SB)
- MOVW $101, R12
- B runtime·callbackasm1(SB)
- MOVW $102, R12
- B runtime·callbackasm1(SB)
- MOVW $103, R12
- B runtime·callbackasm1(SB)
- MOVW $104, R12
- B runtime·callbackasm1(SB)
- MOVW $105, R12
- B runtime·callbackasm1(SB)
- MOVW $106, R12
- B runtime·callbackasm1(SB)
- MOVW $107, R12
- B runtime·callbackasm1(SB)
- MOVW $108, R12
- B runtime·callbackasm1(SB)
- MOVW $109, R12
- B runtime·callbackasm1(SB)
- MOVW $110, R12
- B runtime·callbackasm1(SB)
- MOVW $111, R12
- B runtime·callbackasm1(SB)
- MOVW $112, R12
- B runtime·callbackasm1(SB)
- MOVW $113, R12
- B runtime·callbackasm1(SB)
- MOVW $114, R12
- B runtime·callbackasm1(SB)
- MOVW $115, R12
- B runtime·callbackasm1(SB)
- MOVW $116, R12
- B runtime·callbackasm1(SB)
- MOVW $117, R12
- B runtime·callbackasm1(SB)
- MOVW $118, R12
- B runtime·callbackasm1(SB)
- MOVW $119, R12
- B runtime·callbackasm1(SB)
- MOVW $120, R12
- B runtime·callbackasm1(SB)
- MOVW $121, R12
- B runtime·callbackasm1(SB)
- MOVW $122, R12
- B runtime·callbackasm1(SB)
- MOVW $123, R12
- B runtime·callbackasm1(SB)
- MOVW $124, R12
- B runtime·callbackasm1(SB)
- MOVW $125, R12
- B runtime·callbackasm1(SB)
- MOVW $126, R12
- B runtime·callbackasm1(SB)
- MOVW $127, R12
- B runtime·callbackasm1(SB)
- MOVW $128, R12
- B runtime·callbackasm1(SB)
- MOVW $129, R12
- B runtime·callbackasm1(SB)
- MOVW $130, R12
- B runtime·callbackasm1(SB)
- MOVW $131, R12
- B runtime·callbackasm1(SB)
- MOVW $132, R12
- B runtime·callbackasm1(SB)
- MOVW $133, R12
- B runtime·callbackasm1(SB)
- MOVW $134, R12
- B runtime·callbackasm1(SB)
- MOVW $135, R12
- B runtime·callbackasm1(SB)
- MOVW $136, R12
- B runtime·callbackasm1(SB)
- MOVW $137, R12
- B runtime·callbackasm1(SB)
- MOVW $138, R12
- B runtime·callbackasm1(SB)
- MOVW $139, R12
- B runtime·callbackasm1(SB)
- MOVW $140, R12
- B runtime·callbackasm1(SB)
- MOVW $141, R12
- B runtime·callbackasm1(SB)
- MOVW $142, R12
- B runtime·callbackasm1(SB)
- MOVW $143, R12
- B runtime·callbackasm1(SB)
- MOVW $144, R12
- B runtime·callbackasm1(SB)
- MOVW $145, R12
- B runtime·callbackasm1(SB)
- MOVW $146, R12
- B runtime·callbackasm1(SB)
- MOVW $147, R12
- B runtime·callbackasm1(SB)
- MOVW $148, R12
- B runtime·callbackasm1(SB)
- MOVW $149, R12
- B runtime·callbackasm1(SB)
- MOVW $150, R12
- B runtime·callbackasm1(SB)
- MOVW $151, R12
- B runtime·callbackasm1(SB)
- MOVW $152, R12
- B runtime·callbackasm1(SB)
- MOVW $153, R12
- B runtime·callbackasm1(SB)
- MOVW $154, R12
- B runtime·callbackasm1(SB)
- MOVW $155, R12
- B runtime·callbackasm1(SB)
- MOVW $156, R12
- B runtime·callbackasm1(SB)
- MOVW $157, R12
- B runtime·callbackasm1(SB)
- MOVW $158, R12
- B runtime·callbackasm1(SB)
- MOVW $159, R12
- B runtime·callbackasm1(SB)
- MOVW $160, R12
- B runtime·callbackasm1(SB)
- MOVW $161, R12
- B runtime·callbackasm1(SB)
- MOVW $162, R12
- B runtime·callbackasm1(SB)
- MOVW $163, R12
- B runtime·callbackasm1(SB)
- MOVW $164, R12
- B runtime·callbackasm1(SB)
- MOVW $165, R12
- B runtime·callbackasm1(SB)
- MOVW $166, R12
- B runtime·callbackasm1(SB)
- MOVW $167, R12
- B runtime·callbackasm1(SB)
- MOVW $168, R12
- B runtime·callbackasm1(SB)
- MOVW $169, R12
- B runtime·callbackasm1(SB)
- MOVW $170, R12
- B runtime·callbackasm1(SB)
- MOVW $171, R12
- B runtime·callbackasm1(SB)
- MOVW $172, R12
- B runtime·callbackasm1(SB)
- MOVW $173, R12
- B runtime·callbackasm1(SB)
- MOVW $174, R12
- B runtime·callbackasm1(SB)
- MOVW $175, R12
- B runtime·callbackasm1(SB)
- MOVW $176, R12
- B runtime·callbackasm1(SB)
- MOVW $177, R12
- B runtime·callbackasm1(SB)
- MOVW $178, R12
- B runtime·callbackasm1(SB)
- MOVW $179, R12
- B runtime·callbackasm1(SB)
- MOVW $180, R12
- B runtime·callbackasm1(SB)
- MOVW $181, R12
- B runtime·callbackasm1(SB)
- MOVW $182, R12
- B runtime·callbackasm1(SB)
- MOVW $183, R12
- B runtime·callbackasm1(SB)
- MOVW $184, R12
- B runtime·callbackasm1(SB)
- MOVW $185, R12
- B runtime·callbackasm1(SB)
- MOVW $186, R12
- B runtime·callbackasm1(SB)
- MOVW $187, R12
- B runtime·callbackasm1(SB)
- MOVW $188, R12
- B runtime·callbackasm1(SB)
- MOVW $189, R12
- B runtime·callbackasm1(SB)
- MOVW $190, R12
- B runtime·callbackasm1(SB)
- MOVW $191, R12
- B runtime·callbackasm1(SB)
- MOVW $192, R12
- B runtime·callbackasm1(SB)
- MOVW $193, R12
- B runtime·callbackasm1(SB)
- MOVW $194, R12
- B runtime·callbackasm1(SB)
- MOVW $195, R12
- B runtime·callbackasm1(SB)
- MOVW $196, R12
- B runtime·callbackasm1(SB)
- MOVW $197, R12
- B runtime·callbackasm1(SB)
- MOVW $198, R12
- B runtime·callbackasm1(SB)
- MOVW $199, R12
- B runtime·callbackasm1(SB)
- MOVW $200, R12
- B runtime·callbackasm1(SB)
- MOVW $201, R12
- B runtime·callbackasm1(SB)
- MOVW $202, R12
- B runtime·callbackasm1(SB)
- MOVW $203, R12
- B runtime·callbackasm1(SB)
- MOVW $204, R12
- B runtime·callbackasm1(SB)
- MOVW $205, R12
- B runtime·callbackasm1(SB)
- MOVW $206, R12
- B runtime·callbackasm1(SB)
- MOVW $207, R12
- B runtime·callbackasm1(SB)
- MOVW $208, R12
- B runtime·callbackasm1(SB)
- MOVW $209, R12
- B runtime·callbackasm1(SB)
- MOVW $210, R12
- B runtime·callbackasm1(SB)
- MOVW $211, R12
- B runtime·callbackasm1(SB)
- MOVW $212, R12
- B runtime·callbackasm1(SB)
- MOVW $213, R12
- B runtime·callbackasm1(SB)
- MOVW $214, R12
- B runtime·callbackasm1(SB)
- MOVW $215, R12
- B runtime·callbackasm1(SB)
- MOVW $216, R12
- B runtime·callbackasm1(SB)
- MOVW $217, R12
- B runtime·callbackasm1(SB)
- MOVW $218, R12
- B runtime·callbackasm1(SB)
- MOVW $219, R12
- B runtime·callbackasm1(SB)
- MOVW $220, R12
- B runtime·callbackasm1(SB)
- MOVW $221, R12
- B runtime·callbackasm1(SB)
- MOVW $222, R12
- B runtime·callbackasm1(SB)
- MOVW $223, R12
- B runtime·callbackasm1(SB)
- MOVW $224, R12
- B runtime·callbackasm1(SB)
- MOVW $225, R12
- B runtime·callbackasm1(SB)
- MOVW $226, R12
- B runtime·callbackasm1(SB)
- MOVW $227, R12
- B runtime·callbackasm1(SB)
- MOVW $228, R12
- B runtime·callbackasm1(SB)
- MOVW $229, R12
- B runtime·callbackasm1(SB)
- MOVW $230, R12
- B runtime·callbackasm1(SB)
- MOVW $231, R12
- B runtime·callbackasm1(SB)
- MOVW $232, R12
- B runtime·callbackasm1(SB)
- MOVW $233, R12
- B runtime·callbackasm1(SB)
- MOVW $234, R12
- B runtime·callbackasm1(SB)
- MOVW $235, R12
- B runtime·callbackasm1(SB)
- MOVW $236, R12
- B runtime·callbackasm1(SB)
- MOVW $237, R12
- B runtime·callbackasm1(SB)
- MOVW $238, R12
- B runtime·callbackasm1(SB)
- MOVW $239, R12
- B runtime·callbackasm1(SB)
- MOVW $240, R12
- B runtime·callbackasm1(SB)
- MOVW $241, R12
- B runtime·callbackasm1(SB)
- MOVW $242, R12
- B runtime·callbackasm1(SB)
- MOVW $243, R12
- B runtime·callbackasm1(SB)
- MOVW $244, R12
- B runtime·callbackasm1(SB)
- MOVW $245, R12
- B runtime·callbackasm1(SB)
- MOVW $246, R12
- B runtime·callbackasm1(SB)
- MOVW $247, R12
- B runtime·callbackasm1(SB)
- MOVW $248, R12
- B runtime·callbackasm1(SB)
- MOVW $249, R12
- B runtime·callbackasm1(SB)
- MOVW $250, R12
- B runtime·callbackasm1(SB)
- MOVW $251, R12
- B runtime·callbackasm1(SB)
- MOVW $252, R12
- B runtime·callbackasm1(SB)
- MOVW $253, R12
- B runtime·callbackasm1(SB)
- MOVW $254, R12
- B runtime·callbackasm1(SB)
- MOVW $255, R12
- B runtime·callbackasm1(SB)
- MOVW $256, R12
- B runtime·callbackasm1(SB)
- MOVW $257, R12
- B runtime·callbackasm1(SB)
- MOVW $258, R12
- B runtime·callbackasm1(SB)
- MOVW $259, R12
- B runtime·callbackasm1(SB)
- MOVW $260, R12
- B runtime·callbackasm1(SB)
- MOVW $261, R12
- B runtime·callbackasm1(SB)
- MOVW $262, R12
- B runtime·callbackasm1(SB)
- MOVW $263, R12
- B runtime·callbackasm1(SB)
- MOVW $264, R12
- B runtime·callbackasm1(SB)
- MOVW $265, R12
- B runtime·callbackasm1(SB)
- MOVW $266, R12
- B runtime·callbackasm1(SB)
- MOVW $267, R12
- B runtime·callbackasm1(SB)
- MOVW $268, R12
- B runtime·callbackasm1(SB)
- MOVW $269, R12
- B runtime·callbackasm1(SB)
- MOVW $270, R12
- B runtime·callbackasm1(SB)
- MOVW $271, R12
- B runtime·callbackasm1(SB)
- MOVW $272, R12
- B runtime·callbackasm1(SB)
- MOVW $273, R12
- B runtime·callbackasm1(SB)
- MOVW $274, R12
- B runtime·callbackasm1(SB)
- MOVW $275, R12
- B runtime·callbackasm1(SB)
- MOVW $276, R12
- B runtime·callbackasm1(SB)
- MOVW $277, R12
- B runtime·callbackasm1(SB)
- MOVW $278, R12
- B runtime·callbackasm1(SB)
- MOVW $279, R12
- B runtime·callbackasm1(SB)
- MOVW $280, R12
- B runtime·callbackasm1(SB)
- MOVW $281, R12
- B runtime·callbackasm1(SB)
- MOVW $282, R12
- B runtime·callbackasm1(SB)
- MOVW $283, R12
- B runtime·callbackasm1(SB)
- MOVW $284, R12
- B runtime·callbackasm1(SB)
- MOVW $285, R12
- B runtime·callbackasm1(SB)
- MOVW $286, R12
- B runtime·callbackasm1(SB)
- MOVW $287, R12
- B runtime·callbackasm1(SB)
- MOVW $288, R12
- B runtime·callbackasm1(SB)
- MOVW $289, R12
- B runtime·callbackasm1(SB)
- MOVW $290, R12
- B runtime·callbackasm1(SB)
- MOVW $291, R12
- B runtime·callbackasm1(SB)
- MOVW $292, R12
- B runtime·callbackasm1(SB)
- MOVW $293, R12
- B runtime·callbackasm1(SB)
- MOVW $294, R12
- B runtime·callbackasm1(SB)
- MOVW $295, R12
- B runtime·callbackasm1(SB)
- MOVW $296, R12
- B runtime·callbackasm1(SB)
- MOVW $297, R12
- B runtime·callbackasm1(SB)
- MOVW $298, R12
- B runtime·callbackasm1(SB)
- MOVW $299, R12
- B runtime·callbackasm1(SB)
- MOVW $300, R12
- B runtime·callbackasm1(SB)
- MOVW $301, R12
- B runtime·callbackasm1(SB)
- MOVW $302, R12
- B runtime·callbackasm1(SB)
- MOVW $303, R12
- B runtime·callbackasm1(SB)
- MOVW $304, R12
- B runtime·callbackasm1(SB)
- MOVW $305, R12
- B runtime·callbackasm1(SB)
- MOVW $306, R12
- B runtime·callbackasm1(SB)
- MOVW $307, R12
- B runtime·callbackasm1(SB)
- MOVW $308, R12
- B runtime·callbackasm1(SB)
- MOVW $309, R12
- B runtime·callbackasm1(SB)
- MOVW $310, R12
- B runtime·callbackasm1(SB)
- MOVW $311, R12
- B runtime·callbackasm1(SB)
- MOVW $312, R12
- B runtime·callbackasm1(SB)
- MOVW $313, R12
- B runtime·callbackasm1(SB)
- MOVW $314, R12
- B runtime·callbackasm1(SB)
- MOVW $315, R12
- B runtime·callbackasm1(SB)
- MOVW $316, R12
- B runtime·callbackasm1(SB)
- MOVW $317, R12
- B runtime·callbackasm1(SB)
- MOVW $318, R12
- B runtime·callbackasm1(SB)
- MOVW $319, R12
- B runtime·callbackasm1(SB)
- MOVW $320, R12
- B runtime·callbackasm1(SB)
- MOVW $321, R12
- B runtime·callbackasm1(SB)
- MOVW $322, R12
- B runtime·callbackasm1(SB)
- MOVW $323, R12
- B runtime·callbackasm1(SB)
- MOVW $324, R12
- B runtime·callbackasm1(SB)
- MOVW $325, R12
- B runtime·callbackasm1(SB)
- MOVW $326, R12
- B runtime·callbackasm1(SB)
- MOVW $327, R12
- B runtime·callbackasm1(SB)
- MOVW $328, R12
- B runtime·callbackasm1(SB)
- MOVW $329, R12
- B runtime·callbackasm1(SB)
- MOVW $330, R12
- B runtime·callbackasm1(SB)
- MOVW $331, R12
- B runtime·callbackasm1(SB)
- MOVW $332, R12
- B runtime·callbackasm1(SB)
- MOVW $333, R12
- B runtime·callbackasm1(SB)
- MOVW $334, R12
- B runtime·callbackasm1(SB)
- MOVW $335, R12
- B runtime·callbackasm1(SB)
- MOVW $336, R12
- B runtime·callbackasm1(SB)
- MOVW $337, R12
- B runtime·callbackasm1(SB)
- MOVW $338, R12
- B runtime·callbackasm1(SB)
- MOVW $339, R12
- B runtime·callbackasm1(SB)
- MOVW $340, R12
- B runtime·callbackasm1(SB)
- MOVW $341, R12
- B runtime·callbackasm1(SB)
- MOVW $342, R12
- B runtime·callbackasm1(SB)
- MOVW $343, R12
- B runtime·callbackasm1(SB)
- MOVW $344, R12
- B runtime·callbackasm1(SB)
- MOVW $345, R12
- B runtime·callbackasm1(SB)
- MOVW $346, R12
- B runtime·callbackasm1(SB)
- MOVW $347, R12
- B runtime·callbackasm1(SB)
- MOVW $348, R12
- B runtime·callbackasm1(SB)
- MOVW $349, R12
- B runtime·callbackasm1(SB)
- MOVW $350, R12
- B runtime·callbackasm1(SB)
- MOVW $351, R12
- B runtime·callbackasm1(SB)
- MOVW $352, R12
- B runtime·callbackasm1(SB)
- MOVW $353, R12
- B runtime·callbackasm1(SB)
- MOVW $354, R12
- B runtime·callbackasm1(SB)
- MOVW $355, R12
- B runtime·callbackasm1(SB)
- MOVW $356, R12
- B runtime·callbackasm1(SB)
- MOVW $357, R12
- B runtime·callbackasm1(SB)
- MOVW $358, R12
- B runtime·callbackasm1(SB)
- MOVW $359, R12
- B runtime·callbackasm1(SB)
- MOVW $360, R12
- B runtime·callbackasm1(SB)
- MOVW $361, R12
- B runtime·callbackasm1(SB)
- MOVW $362, R12
- B runtime·callbackasm1(SB)
- MOVW $363, R12
- B runtime·callbackasm1(SB)
- MOVW $364, R12
- B runtime·callbackasm1(SB)
- MOVW $365, R12
- B runtime·callbackasm1(SB)
- MOVW $366, R12
- B runtime·callbackasm1(SB)
- MOVW $367, R12
- B runtime·callbackasm1(SB)
- MOVW $368, R12
- B runtime·callbackasm1(SB)
- MOVW $369, R12
- B runtime·callbackasm1(SB)
- MOVW $370, R12
- B runtime·callbackasm1(SB)
- MOVW $371, R12
- B runtime·callbackasm1(SB)
- MOVW $372, R12
- B runtime·callbackasm1(SB)
- MOVW $373, R12
- B runtime·callbackasm1(SB)
- MOVW $374, R12
- B runtime·callbackasm1(SB)
- MOVW $375, R12
- B runtime·callbackasm1(SB)
- MOVW $376, R12
- B runtime·callbackasm1(SB)
- MOVW $377, R12
- B runtime·callbackasm1(SB)
- MOVW $378, R12
- B runtime·callbackasm1(SB)
- MOVW $379, R12
- B runtime·callbackasm1(SB)
- MOVW $380, R12
- B runtime·callbackasm1(SB)
- MOVW $381, R12
- B runtime·callbackasm1(SB)
- MOVW $382, R12
- B runtime·callbackasm1(SB)
- MOVW $383, R12
- B runtime·callbackasm1(SB)
- MOVW $384, R12
- B runtime·callbackasm1(SB)
- MOVW $385, R12
- B runtime·callbackasm1(SB)
- MOVW $386, R12
- B runtime·callbackasm1(SB)
- MOVW $387, R12
- B runtime·callbackasm1(SB)
- MOVW $388, R12
- B runtime·callbackasm1(SB)
- MOVW $389, R12
- B runtime·callbackasm1(SB)
- MOVW $390, R12
- B runtime·callbackasm1(SB)
- MOVW $391, R12
- B runtime·callbackasm1(SB)
- MOVW $392, R12
- B runtime·callbackasm1(SB)
- MOVW $393, R12
- B runtime·callbackasm1(SB)
- MOVW $394, R12
- B runtime·callbackasm1(SB)
- MOVW $395, R12
- B runtime·callbackasm1(SB)
- MOVW $396, R12
- B runtime·callbackasm1(SB)
- MOVW $397, R12
- B runtime·callbackasm1(SB)
- MOVW $398, R12
- B runtime·callbackasm1(SB)
- MOVW $399, R12
- B runtime·callbackasm1(SB)
- MOVW $400, R12
- B runtime·callbackasm1(SB)
- MOVW $401, R12
- B runtime·callbackasm1(SB)
- MOVW $402, R12
- B runtime·callbackasm1(SB)
- MOVW $403, R12
- B runtime·callbackasm1(SB)
- MOVW $404, R12
- B runtime·callbackasm1(SB)
- MOVW $405, R12
- B runtime·callbackasm1(SB)
- MOVW $406, R12
- B runtime·callbackasm1(SB)
- MOVW $407, R12
- B runtime·callbackasm1(SB)
- MOVW $408, R12
- B runtime·callbackasm1(SB)
- MOVW $409, R12
- B runtime·callbackasm1(SB)
- MOVW $410, R12
- B runtime·callbackasm1(SB)
- MOVW $411, R12
- B runtime·callbackasm1(SB)
- MOVW $412, R12
- B runtime·callbackasm1(SB)
- MOVW $413, R12
- B runtime·callbackasm1(SB)
- MOVW $414, R12
- B runtime·callbackasm1(SB)
- MOVW $415, R12
- B runtime·callbackasm1(SB)
- MOVW $416, R12
- B runtime·callbackasm1(SB)
- MOVW $417, R12
- B runtime·callbackasm1(SB)
- MOVW $418, R12
- B runtime·callbackasm1(SB)
- MOVW $419, R12
- B runtime·callbackasm1(SB)
- MOVW $420, R12
- B runtime·callbackasm1(SB)
- MOVW $421, R12
- B runtime·callbackasm1(SB)
- MOVW $422, R12
- B runtime·callbackasm1(SB)
- MOVW $423, R12
- B runtime·callbackasm1(SB)
- MOVW $424, R12
- B runtime·callbackasm1(SB)
- MOVW $425, R12
- B runtime·callbackasm1(SB)
- MOVW $426, R12
- B runtime·callbackasm1(SB)
- MOVW $427, R12
- B runtime·callbackasm1(SB)
- MOVW $428, R12
- B runtime·callbackasm1(SB)
- MOVW $429, R12
- B runtime·callbackasm1(SB)
- MOVW $430, R12
- B runtime·callbackasm1(SB)
- MOVW $431, R12
- B runtime·callbackasm1(SB)
- MOVW $432, R12
- B runtime·callbackasm1(SB)
- MOVW $433, R12
- B runtime·callbackasm1(SB)
- MOVW $434, R12
- B runtime·callbackasm1(SB)
- MOVW $435, R12
- B runtime·callbackasm1(SB)
- MOVW $436, R12
- B runtime·callbackasm1(SB)
- MOVW $437, R12
- B runtime·callbackasm1(SB)
- MOVW $438, R12
- B runtime·callbackasm1(SB)
- MOVW $439, R12
- B runtime·callbackasm1(SB)
- MOVW $440, R12
- B runtime·callbackasm1(SB)
- MOVW $441, R12
- B runtime·callbackasm1(SB)
- MOVW $442, R12
- B runtime·callbackasm1(SB)
- MOVW $443, R12
- B runtime·callbackasm1(SB)
- MOVW $444, R12
- B runtime·callbackasm1(SB)
- MOVW $445, R12
- B runtime·callbackasm1(SB)
- MOVW $446, R12
- B runtime·callbackasm1(SB)
- MOVW $447, R12
- B runtime·callbackasm1(SB)
- MOVW $448, R12
- B runtime·callbackasm1(SB)
- MOVW $449, R12
- B runtime·callbackasm1(SB)
- MOVW $450, R12
- B runtime·callbackasm1(SB)
- MOVW $451, R12
- B runtime·callbackasm1(SB)
- MOVW $452, R12
- B runtime·callbackasm1(SB)
- MOVW $453, R12
- B runtime·callbackasm1(SB)
- MOVW $454, R12
- B runtime·callbackasm1(SB)
- MOVW $455, R12
- B runtime·callbackasm1(SB)
- MOVW $456, R12
- B runtime·callbackasm1(SB)
- MOVW $457, R12
- B runtime·callbackasm1(SB)
- MOVW $458, R12
- B runtime·callbackasm1(SB)
- MOVW $459, R12
- B runtime·callbackasm1(SB)
- MOVW $460, R12
- B runtime·callbackasm1(SB)
- MOVW $461, R12
- B runtime·callbackasm1(SB)
- MOVW $462, R12
- B runtime·callbackasm1(SB)
- MOVW $463, R12
- B runtime·callbackasm1(SB)
- MOVW $464, R12
- B runtime·callbackasm1(SB)
- MOVW $465, R12
- B runtime·callbackasm1(SB)
- MOVW $466, R12
- B runtime·callbackasm1(SB)
- MOVW $467, R12
- B runtime·callbackasm1(SB)
- MOVW $468, R12
- B runtime·callbackasm1(SB)
- MOVW $469, R12
- B runtime·callbackasm1(SB)
- MOVW $470, R12
- B runtime·callbackasm1(SB)
- MOVW $471, R12
- B runtime·callbackasm1(SB)
- MOVW $472, R12
- B runtime·callbackasm1(SB)
- MOVW $473, R12
- B runtime·callbackasm1(SB)
- MOVW $474, R12
- B runtime·callbackasm1(SB)
- MOVW $475, R12
- B runtime·callbackasm1(SB)
- MOVW $476, R12
- B runtime·callbackasm1(SB)
- MOVW $477, R12
- B runtime·callbackasm1(SB)
- MOVW $478, R12
- B runtime·callbackasm1(SB)
- MOVW $479, R12
- B runtime·callbackasm1(SB)
- MOVW $480, R12
- B runtime·callbackasm1(SB)
- MOVW $481, R12
- B runtime·callbackasm1(SB)
- MOVW $482, R12
- B runtime·callbackasm1(SB)
- MOVW $483, R12
- B runtime·callbackasm1(SB)
- MOVW $484, R12
- B runtime·callbackasm1(SB)
- MOVW $485, R12
- B runtime·callbackasm1(SB)
- MOVW $486, R12
- B runtime·callbackasm1(SB)
- MOVW $487, R12
- B runtime·callbackasm1(SB)
- MOVW $488, R12
- B runtime·callbackasm1(SB)
- MOVW $489, R12
- B runtime·callbackasm1(SB)
- MOVW $490, R12
- B runtime·callbackasm1(SB)
- MOVW $491, R12
- B runtime·callbackasm1(SB)
- MOVW $492, R12
- B runtime·callbackasm1(SB)
- MOVW $493, R12
- B runtime·callbackasm1(SB)
- MOVW $494, R12
- B runtime·callbackasm1(SB)
- MOVW $495, R12
- B runtime·callbackasm1(SB)
- MOVW $496, R12
- B runtime·callbackasm1(SB)
- MOVW $497, R12
- B runtime·callbackasm1(SB)
- MOVW $498, R12
- B runtime·callbackasm1(SB)
- MOVW $499, R12
- B runtime·callbackasm1(SB)
- MOVW $500, R12
- B runtime·callbackasm1(SB)
- MOVW $501, R12
- B runtime·callbackasm1(SB)
- MOVW $502, R12
- B runtime·callbackasm1(SB)
- MOVW $503, R12
- B runtime·callbackasm1(SB)
- MOVW $504, R12
- B runtime·callbackasm1(SB)
- MOVW $505, R12
- B runtime·callbackasm1(SB)
- MOVW $506, R12
- B runtime·callbackasm1(SB)
- MOVW $507, R12
- B runtime·callbackasm1(SB)
- MOVW $508, R12
- B runtime·callbackasm1(SB)
- MOVW $509, R12
- B runtime·callbackasm1(SB)
- MOVW $510, R12
- B runtime·callbackasm1(SB)
- MOVW $511, R12
- B runtime·callbackasm1(SB)
- MOVW $512, R12
- B runtime·callbackasm1(SB)
- MOVW $513, R12
- B runtime·callbackasm1(SB)
- MOVW $514, R12
- B runtime·callbackasm1(SB)
- MOVW $515, R12
- B runtime·callbackasm1(SB)
- MOVW $516, R12
- B runtime·callbackasm1(SB)
- MOVW $517, R12
- B runtime·callbackasm1(SB)
- MOVW $518, R12
- B runtime·callbackasm1(SB)
- MOVW $519, R12
- B runtime·callbackasm1(SB)
- MOVW $520, R12
- B runtime·callbackasm1(SB)
- MOVW $521, R12
- B runtime·callbackasm1(SB)
- MOVW $522, R12
- B runtime·callbackasm1(SB)
- MOVW $523, R12
- B runtime·callbackasm1(SB)
- MOVW $524, R12
- B runtime·callbackasm1(SB)
- MOVW $525, R12
- B runtime·callbackasm1(SB)
- MOVW $526, R12
- B runtime·callbackasm1(SB)
- MOVW $527, R12
- B runtime·callbackasm1(SB)
- MOVW $528, R12
- B runtime·callbackasm1(SB)
- MOVW $529, R12
- B runtime·callbackasm1(SB)
- MOVW $530, R12
- B runtime·callbackasm1(SB)
- MOVW $531, R12
- B runtime·callbackasm1(SB)
- MOVW $532, R12
- B runtime·callbackasm1(SB)
- MOVW $533, R12
- B runtime·callbackasm1(SB)
- MOVW $534, R12
- B runtime·callbackasm1(SB)
- MOVW $535, R12
- B runtime·callbackasm1(SB)
- MOVW $536, R12
- B runtime·callbackasm1(SB)
- MOVW $537, R12
- B runtime·callbackasm1(SB)
- MOVW $538, R12
- B runtime·callbackasm1(SB)
- MOVW $539, R12
- B runtime·callbackasm1(SB)
- MOVW $540, R12
- B runtime·callbackasm1(SB)
- MOVW $541, R12
- B runtime·callbackasm1(SB)
- MOVW $542, R12
- B runtime·callbackasm1(SB)
- MOVW $543, R12
- B runtime·callbackasm1(SB)
- MOVW $544, R12
- B runtime·callbackasm1(SB)
- MOVW $545, R12
- B runtime·callbackasm1(SB)
- MOVW $546, R12
- B runtime·callbackasm1(SB)
- MOVW $547, R12
- B runtime·callbackasm1(SB)
- MOVW $548, R12
- B runtime·callbackasm1(SB)
- MOVW $549, R12
- B runtime·callbackasm1(SB)
- MOVW $550, R12
- B runtime·callbackasm1(SB)
- MOVW $551, R12
- B runtime·callbackasm1(SB)
- MOVW $552, R12
- B runtime·callbackasm1(SB)
- MOVW $553, R12
- B runtime·callbackasm1(SB)
- MOVW $554, R12
- B runtime·callbackasm1(SB)
- MOVW $555, R12
- B runtime·callbackasm1(SB)
- MOVW $556, R12
- B runtime·callbackasm1(SB)
- MOVW $557, R12
- B runtime·callbackasm1(SB)
- MOVW $558, R12
- B runtime·callbackasm1(SB)
- MOVW $559, R12
- B runtime·callbackasm1(SB)
- MOVW $560, R12
- B runtime·callbackasm1(SB)
- MOVW $561, R12
- B runtime·callbackasm1(SB)
- MOVW $562, R12
- B runtime·callbackasm1(SB)
- MOVW $563, R12
- B runtime·callbackasm1(SB)
- MOVW $564, R12
- B runtime·callbackasm1(SB)
- MOVW $565, R12
- B runtime·callbackasm1(SB)
- MOVW $566, R12
- B runtime·callbackasm1(SB)
- MOVW $567, R12
- B runtime·callbackasm1(SB)
- MOVW $568, R12
- B runtime·callbackasm1(SB)
- MOVW $569, R12
- B runtime·callbackasm1(SB)
- MOVW $570, R12
- B runtime·callbackasm1(SB)
- MOVW $571, R12
- B runtime·callbackasm1(SB)
- MOVW $572, R12
- B runtime·callbackasm1(SB)
- MOVW $573, R12
- B runtime·callbackasm1(SB)
- MOVW $574, R12
- B runtime·callbackasm1(SB)
- MOVW $575, R12
- B runtime·callbackasm1(SB)
- MOVW $576, R12
- B runtime·callbackasm1(SB)
- MOVW $577, R12
- B runtime·callbackasm1(SB)
- MOVW $578, R12
- B runtime·callbackasm1(SB)
- MOVW $579, R12
- B runtime·callbackasm1(SB)
- MOVW $580, R12
- B runtime·callbackasm1(SB)
- MOVW $581, R12
- B runtime·callbackasm1(SB)
- MOVW $582, R12
- B runtime·callbackasm1(SB)
- MOVW $583, R12
- B runtime·callbackasm1(SB)
- MOVW $584, R12
- B runtime·callbackasm1(SB)
- MOVW $585, R12
- B runtime·callbackasm1(SB)
- MOVW $586, R12
- B runtime·callbackasm1(SB)
- MOVW $587, R12
- B runtime·callbackasm1(SB)
- MOVW $588, R12
- B runtime·callbackasm1(SB)
- MOVW $589, R12
- B runtime·callbackasm1(SB)
- MOVW $590, R12
- B runtime·callbackasm1(SB)
- MOVW $591, R12
- B runtime·callbackasm1(SB)
- MOVW $592, R12
- B runtime·callbackasm1(SB)
- MOVW $593, R12
- B runtime·callbackasm1(SB)
- MOVW $594, R12
- B runtime·callbackasm1(SB)
- MOVW $595, R12
- B runtime·callbackasm1(SB)
- MOVW $596, R12
- B runtime·callbackasm1(SB)
- MOVW $597, R12
- B runtime·callbackasm1(SB)
- MOVW $598, R12
- B runtime·callbackasm1(SB)
- MOVW $599, R12
- B runtime·callbackasm1(SB)
- MOVW $600, R12
- B runtime·callbackasm1(SB)
- MOVW $601, R12
- B runtime·callbackasm1(SB)
- MOVW $602, R12
- B runtime·callbackasm1(SB)
- MOVW $603, R12
- B runtime·callbackasm1(SB)
- MOVW $604, R12
- B runtime·callbackasm1(SB)
- MOVW $605, R12
- B runtime·callbackasm1(SB)
- MOVW $606, R12
- B runtime·callbackasm1(SB)
- MOVW $607, R12
- B runtime·callbackasm1(SB)
- MOVW $608, R12
- B runtime·callbackasm1(SB)
- MOVW $609, R12
- B runtime·callbackasm1(SB)
- MOVW $610, R12
- B runtime·callbackasm1(SB)
- MOVW $611, R12
- B runtime·callbackasm1(SB)
- MOVW $612, R12
- B runtime·callbackasm1(SB)
- MOVW $613, R12
- B runtime·callbackasm1(SB)
- MOVW $614, R12
- B runtime·callbackasm1(SB)
- MOVW $615, R12
- B runtime·callbackasm1(SB)
- MOVW $616, R12
- B runtime·callbackasm1(SB)
- MOVW $617, R12
- B runtime·callbackasm1(SB)
- MOVW $618, R12
- B runtime·callbackasm1(SB)
- MOVW $619, R12
- B runtime·callbackasm1(SB)
- MOVW $620, R12
- B runtime·callbackasm1(SB)
- MOVW $621, R12
- B runtime·callbackasm1(SB)
- MOVW $622, R12
- B runtime·callbackasm1(SB)
- MOVW $623, R12
- B runtime·callbackasm1(SB)
- MOVW $624, R12
- B runtime·callbackasm1(SB)
- MOVW $625, R12
- B runtime·callbackasm1(SB)
- MOVW $626, R12
- B runtime·callbackasm1(SB)
- MOVW $627, R12
- B runtime·callbackasm1(SB)
- MOVW $628, R12
- B runtime·callbackasm1(SB)
- MOVW $629, R12
- B runtime·callbackasm1(SB)
- MOVW $630, R12
- B runtime·callbackasm1(SB)
- MOVW $631, R12
- B runtime·callbackasm1(SB)
- MOVW $632, R12
- B runtime·callbackasm1(SB)
- MOVW $633, R12
- B runtime·callbackasm1(SB)
- MOVW $634, R12
- B runtime·callbackasm1(SB)
- MOVW $635, R12
- B runtime·callbackasm1(SB)
- MOVW $636, R12
- B runtime·callbackasm1(SB)
- MOVW $637, R12
- B runtime·callbackasm1(SB)
- MOVW $638, R12
- B runtime·callbackasm1(SB)
- MOVW $639, R12
- B runtime·callbackasm1(SB)
- MOVW $640, R12
- B runtime·callbackasm1(SB)
- MOVW $641, R12
- B runtime·callbackasm1(SB)
- MOVW $642, R12
- B runtime·callbackasm1(SB)
- MOVW $643, R12
- B runtime·callbackasm1(SB)
- MOVW $644, R12
- B runtime·callbackasm1(SB)
- MOVW $645, R12
- B runtime·callbackasm1(SB)
- MOVW $646, R12
- B runtime·callbackasm1(SB)
- MOVW $647, R12
- B runtime·callbackasm1(SB)
- MOVW $648, R12
- B runtime·callbackasm1(SB)
- MOVW $649, R12
- B runtime·callbackasm1(SB)
- MOVW $650, R12
- B runtime·callbackasm1(SB)
- MOVW $651, R12
- B runtime·callbackasm1(SB)
- MOVW $652, R12
- B runtime·callbackasm1(SB)
- MOVW $653, R12
- B runtime·callbackasm1(SB)
- MOVW $654, R12
- B runtime·callbackasm1(SB)
- MOVW $655, R12
- B runtime·callbackasm1(SB)
- MOVW $656, R12
- B runtime·callbackasm1(SB)
- MOVW $657, R12
- B runtime·callbackasm1(SB)
- MOVW $658, R12
- B runtime·callbackasm1(SB)
- MOVW $659, R12
- B runtime·callbackasm1(SB)
- MOVW $660, R12
- B runtime·callbackasm1(SB)
- MOVW $661, R12
- B runtime·callbackasm1(SB)
- MOVW $662, R12
- B runtime·callbackasm1(SB)
- MOVW $663, R12
- B runtime·callbackasm1(SB)
- MOVW $664, R12
- B runtime·callbackasm1(SB)
- MOVW $665, R12
- B runtime·callbackasm1(SB)
- MOVW $666, R12
- B runtime·callbackasm1(SB)
- MOVW $667, R12
- B runtime·callbackasm1(SB)
- MOVW $668, R12
- B runtime·callbackasm1(SB)
- MOVW $669, R12
- B runtime·callbackasm1(SB)
- MOVW $670, R12
- B runtime·callbackasm1(SB)
- MOVW $671, R12
- B runtime·callbackasm1(SB)
- MOVW $672, R12
- B runtime·callbackasm1(SB)
- MOVW $673, R12
- B runtime·callbackasm1(SB)
- MOVW $674, R12
- B runtime·callbackasm1(SB)
- MOVW $675, R12
- B runtime·callbackasm1(SB)
- MOVW $676, R12
- B runtime·callbackasm1(SB)
- MOVW $677, R12
- B runtime·callbackasm1(SB)
- MOVW $678, R12
- B runtime·callbackasm1(SB)
- MOVW $679, R12
- B runtime·callbackasm1(SB)
- MOVW $680, R12
- B runtime·callbackasm1(SB)
- MOVW $681, R12
- B runtime·callbackasm1(SB)
- MOVW $682, R12
- B runtime·callbackasm1(SB)
- MOVW $683, R12
- B runtime·callbackasm1(SB)
- MOVW $684, R12
- B runtime·callbackasm1(SB)
- MOVW $685, R12
- B runtime·callbackasm1(SB)
- MOVW $686, R12
- B runtime·callbackasm1(SB)
- MOVW $687, R12
- B runtime·callbackasm1(SB)
- MOVW $688, R12
- B runtime·callbackasm1(SB)
- MOVW $689, R12
- B runtime·callbackasm1(SB)
- MOVW $690, R12
- B runtime·callbackasm1(SB)
- MOVW $691, R12
- B runtime·callbackasm1(SB)
- MOVW $692, R12
- B runtime·callbackasm1(SB)
- MOVW $693, R12
- B runtime·callbackasm1(SB)
- MOVW $694, R12
- B runtime·callbackasm1(SB)
- MOVW $695, R12
- B runtime·callbackasm1(SB)
- MOVW $696, R12
- B runtime·callbackasm1(SB)
- MOVW $697, R12
- B runtime·callbackasm1(SB)
- MOVW $698, R12
- B runtime·callbackasm1(SB)
- MOVW $699, R12
- B runtime·callbackasm1(SB)
- MOVW $700, R12
- B runtime·callbackasm1(SB)
- MOVW $701, R12
- B runtime·callbackasm1(SB)
- MOVW $702, R12
- B runtime·callbackasm1(SB)
- MOVW $703, R12
- B runtime·callbackasm1(SB)
- MOVW $704, R12
- B runtime·callbackasm1(SB)
- MOVW $705, R12
- B runtime·callbackasm1(SB)
- MOVW $706, R12
- B runtime·callbackasm1(SB)
- MOVW $707, R12
- B runtime·callbackasm1(SB)
- MOVW $708, R12
- B runtime·callbackasm1(SB)
- MOVW $709, R12
- B runtime·callbackasm1(SB)
- MOVW $710, R12
- B runtime·callbackasm1(SB)
- MOVW $711, R12
- B runtime·callbackasm1(SB)
- MOVW $712, R12
- B runtime·callbackasm1(SB)
- MOVW $713, R12
- B runtime·callbackasm1(SB)
- MOVW $714, R12
- B runtime·callbackasm1(SB)
- MOVW $715, R12
- B runtime·callbackasm1(SB)
- MOVW $716, R12
- B runtime·callbackasm1(SB)
- MOVW $717, R12
- B runtime·callbackasm1(SB)
- MOVW $718, R12
- B runtime·callbackasm1(SB)
- MOVW $719, R12
- B runtime·callbackasm1(SB)
- MOVW $720, R12
- B runtime·callbackasm1(SB)
- MOVW $721, R12
- B runtime·callbackasm1(SB)
- MOVW $722, R12
- B runtime·callbackasm1(SB)
- MOVW $723, R12
- B runtime·callbackasm1(SB)
- MOVW $724, R12
- B runtime·callbackasm1(SB)
- MOVW $725, R12
- B runtime·callbackasm1(SB)
- MOVW $726, R12
- B runtime·callbackasm1(SB)
- MOVW $727, R12
- B runtime·callbackasm1(SB)
- MOVW $728, R12
- B runtime·callbackasm1(SB)
- MOVW $729, R12
- B runtime·callbackasm1(SB)
- MOVW $730, R12
- B runtime·callbackasm1(SB)
- MOVW $731, R12
- B runtime·callbackasm1(SB)
- MOVW $732, R12
- B runtime·callbackasm1(SB)
- MOVW $733, R12
- B runtime·callbackasm1(SB)
- MOVW $734, R12
- B runtime·callbackasm1(SB)
- MOVW $735, R12
- B runtime·callbackasm1(SB)
- MOVW $736, R12
- B runtime·callbackasm1(SB)
- MOVW $737, R12
- B runtime·callbackasm1(SB)
- MOVW $738, R12
- B runtime·callbackasm1(SB)
- MOVW $739, R12
- B runtime·callbackasm1(SB)
- MOVW $740, R12
- B runtime·callbackasm1(SB)
- MOVW $741, R12
- B runtime·callbackasm1(SB)
- MOVW $742, R12
- B runtime·callbackasm1(SB)
- MOVW $743, R12
- B runtime·callbackasm1(SB)
- MOVW $744, R12
- B runtime·callbackasm1(SB)
- MOVW $745, R12
- B runtime·callbackasm1(SB)
- MOVW $746, R12
- B runtime·callbackasm1(SB)
- MOVW $747, R12
- B runtime·callbackasm1(SB)
- MOVW $748, R12
- B runtime·callbackasm1(SB)
- MOVW $749, R12
- B runtime·callbackasm1(SB)
- MOVW $750, R12
- B runtime·callbackasm1(SB)
- MOVW $751, R12
- B runtime·callbackasm1(SB)
- MOVW $752, R12
- B runtime·callbackasm1(SB)
- MOVW $753, R12
- B runtime·callbackasm1(SB)
- MOVW $754, R12
- B runtime·callbackasm1(SB)
- MOVW $755, R12
- B runtime·callbackasm1(SB)
- MOVW $756, R12
- B runtime·callbackasm1(SB)
- MOVW $757, R12
- B runtime·callbackasm1(SB)
- MOVW $758, R12
- B runtime·callbackasm1(SB)
- MOVW $759, R12
- B runtime·callbackasm1(SB)
- MOVW $760, R12
- B runtime·callbackasm1(SB)
- MOVW $761, R12
- B runtime·callbackasm1(SB)
- MOVW $762, R12
- B runtime·callbackasm1(SB)
- MOVW $763, R12
- B runtime·callbackasm1(SB)
- MOVW $764, R12
- B runtime·callbackasm1(SB)
- MOVW $765, R12
- B runtime·callbackasm1(SB)
- MOVW $766, R12
- B runtime·callbackasm1(SB)
- MOVW $767, R12
- B runtime·callbackasm1(SB)
- MOVW $768, R12
- B runtime·callbackasm1(SB)
- MOVW $769, R12
- B runtime·callbackasm1(SB)
- MOVW $770, R12
- B runtime·callbackasm1(SB)
- MOVW $771, R12
- B runtime·callbackasm1(SB)
- MOVW $772, R12
- B runtime·callbackasm1(SB)
- MOVW $773, R12
- B runtime·callbackasm1(SB)
- MOVW $774, R12
- B runtime·callbackasm1(SB)
- MOVW $775, R12
- B runtime·callbackasm1(SB)
- MOVW $776, R12
- B runtime·callbackasm1(SB)
- MOVW $777, R12
- B runtime·callbackasm1(SB)
- MOVW $778, R12
- B runtime·callbackasm1(SB)
- MOVW $779, R12
- B runtime·callbackasm1(SB)
- MOVW $780, R12
- B runtime·callbackasm1(SB)
- MOVW $781, R12
- B runtime·callbackasm1(SB)
- MOVW $782, R12
- B runtime·callbackasm1(SB)
- MOVW $783, R12
- B runtime·callbackasm1(SB)
- MOVW $784, R12
- B runtime·callbackasm1(SB)
- MOVW $785, R12
- B runtime·callbackasm1(SB)
- MOVW $786, R12
- B runtime·callbackasm1(SB)
- MOVW $787, R12
- B runtime·callbackasm1(SB)
- MOVW $788, R12
- B runtime·callbackasm1(SB)
- MOVW $789, R12
- B runtime·callbackasm1(SB)
- MOVW $790, R12
- B runtime·callbackasm1(SB)
- MOVW $791, R12
- B runtime·callbackasm1(SB)
- MOVW $792, R12
- B runtime·callbackasm1(SB)
- MOVW $793, R12
- B runtime·callbackasm1(SB)
- MOVW $794, R12
- B runtime·callbackasm1(SB)
- MOVW $795, R12
- B runtime·callbackasm1(SB)
- MOVW $796, R12
- B runtime·callbackasm1(SB)
- MOVW $797, R12
- B runtime·callbackasm1(SB)
- MOVW $798, R12
- B runtime·callbackasm1(SB)
- MOVW $799, R12
- B runtime·callbackasm1(SB)
- MOVW $800, R12
- B runtime·callbackasm1(SB)
- MOVW $801, R12
- B runtime·callbackasm1(SB)
- MOVW $802, R12
- B runtime·callbackasm1(SB)
- MOVW $803, R12
- B runtime·callbackasm1(SB)
- MOVW $804, R12
- B runtime·callbackasm1(SB)
- MOVW $805, R12
- B runtime·callbackasm1(SB)
- MOVW $806, R12
- B runtime·callbackasm1(SB)
- MOVW $807, R12
- B runtime·callbackasm1(SB)
- MOVW $808, R12
- B runtime·callbackasm1(SB)
- MOVW $809, R12
- B runtime·callbackasm1(SB)
- MOVW $810, R12
- B runtime·callbackasm1(SB)
- MOVW $811, R12
- B runtime·callbackasm1(SB)
- MOVW $812, R12
- B runtime·callbackasm1(SB)
- MOVW $813, R12
- B runtime·callbackasm1(SB)
- MOVW $814, R12
- B runtime·callbackasm1(SB)
- MOVW $815, R12
- B runtime·callbackasm1(SB)
- MOVW $816, R12
- B runtime·callbackasm1(SB)
- MOVW $817, R12
- B runtime·callbackasm1(SB)
- MOVW $818, R12
- B runtime·callbackasm1(SB)
- MOVW $819, R12
- B runtime·callbackasm1(SB)
- MOVW $820, R12
- B runtime·callbackasm1(SB)
- MOVW $821, R12
- B runtime·callbackasm1(SB)
- MOVW $822, R12
- B runtime·callbackasm1(SB)
- MOVW $823, R12
- B runtime·callbackasm1(SB)
- MOVW $824, R12
- B runtime·callbackasm1(SB)
- MOVW $825, R12
- B runtime·callbackasm1(SB)
- MOVW $826, R12
- B runtime·callbackasm1(SB)
- MOVW $827, R12
- B runtime·callbackasm1(SB)
- MOVW $828, R12
- B runtime·callbackasm1(SB)
- MOVW $829, R12
- B runtime·callbackasm1(SB)
- MOVW $830, R12
- B runtime·callbackasm1(SB)
- MOVW $831, R12
- B runtime·callbackasm1(SB)
- MOVW $832, R12
- B runtime·callbackasm1(SB)
- MOVW $833, R12
- B runtime·callbackasm1(SB)
- MOVW $834, R12
- B runtime·callbackasm1(SB)
- MOVW $835, R12
- B runtime·callbackasm1(SB)
- MOVW $836, R12
- B runtime·callbackasm1(SB)
- MOVW $837, R12
- B runtime·callbackasm1(SB)
- MOVW $838, R12
- B runtime·callbackasm1(SB)
- MOVW $839, R12
- B runtime·callbackasm1(SB)
- MOVW $840, R12
- B runtime·callbackasm1(SB)
- MOVW $841, R12
- B runtime·callbackasm1(SB)
- MOVW $842, R12
- B runtime·callbackasm1(SB)
- MOVW $843, R12
- B runtime·callbackasm1(SB)
- MOVW $844, R12
- B runtime·callbackasm1(SB)
- MOVW $845, R12
- B runtime·callbackasm1(SB)
- MOVW $846, R12
- B runtime·callbackasm1(SB)
- MOVW $847, R12
- B runtime·callbackasm1(SB)
- MOVW $848, R12
- B runtime·callbackasm1(SB)
- MOVW $849, R12
- B runtime·callbackasm1(SB)
- MOVW $850, R12
- B runtime·callbackasm1(SB)
- MOVW $851, R12
- B runtime·callbackasm1(SB)
- MOVW $852, R12
- B runtime·callbackasm1(SB)
- MOVW $853, R12
- B runtime·callbackasm1(SB)
- MOVW $854, R12
- B runtime·callbackasm1(SB)
- MOVW $855, R12
- B runtime·callbackasm1(SB)
- MOVW $856, R12
- B runtime·callbackasm1(SB)
- MOVW $857, R12
- B runtime·callbackasm1(SB)
- MOVW $858, R12
- B runtime·callbackasm1(SB)
- MOVW $859, R12
- B runtime·callbackasm1(SB)
- MOVW $860, R12
- B runtime·callbackasm1(SB)
- MOVW $861, R12
- B runtime·callbackasm1(SB)
- MOVW $862, R12
- B runtime·callbackasm1(SB)
- MOVW $863, R12
- B runtime·callbackasm1(SB)
- MOVW $864, R12
- B runtime·callbackasm1(SB)
- MOVW $865, R12
- B runtime·callbackasm1(SB)
- MOVW $866, R12
- B runtime·callbackasm1(SB)
- MOVW $867, R12
- B runtime·callbackasm1(SB)
- MOVW $868, R12
- B runtime·callbackasm1(SB)
- MOVW $869, R12
- B runtime·callbackasm1(SB)
- MOVW $870, R12
- B runtime·callbackasm1(SB)
- MOVW $871, R12
- B runtime·callbackasm1(SB)
- MOVW $872, R12
- B runtime·callbackasm1(SB)
- MOVW $873, R12
- B runtime·callbackasm1(SB)
- MOVW $874, R12
- B runtime·callbackasm1(SB)
- MOVW $875, R12
- B runtime·callbackasm1(SB)
- MOVW $876, R12
- B runtime·callbackasm1(SB)
- MOVW $877, R12
- B runtime·callbackasm1(SB)
- MOVW $878, R12
- B runtime·callbackasm1(SB)
- MOVW $879, R12
- B runtime·callbackasm1(SB)
- MOVW $880, R12
- B runtime·callbackasm1(SB)
- MOVW $881, R12
- B runtime·callbackasm1(SB)
- MOVW $882, R12
- B runtime·callbackasm1(SB)
- MOVW $883, R12
- B runtime·callbackasm1(SB)
- MOVW $884, R12
- B runtime·callbackasm1(SB)
- MOVW $885, R12
- B runtime·callbackasm1(SB)
- MOVW $886, R12
- B runtime·callbackasm1(SB)
- MOVW $887, R12
- B runtime·callbackasm1(SB)
- MOVW $888, R12
- B runtime·callbackasm1(SB)
- MOVW $889, R12
- B runtime·callbackasm1(SB)
- MOVW $890, R12
- B runtime·callbackasm1(SB)
- MOVW $891, R12
- B runtime·callbackasm1(SB)
- MOVW $892, R12
- B runtime·callbackasm1(SB)
- MOVW $893, R12
- B runtime·callbackasm1(SB)
- MOVW $894, R12
- B runtime·callbackasm1(SB)
- MOVW $895, R12
- B runtime·callbackasm1(SB)
- MOVW $896, R12
- B runtime·callbackasm1(SB)
- MOVW $897, R12
- B runtime·callbackasm1(SB)
- MOVW $898, R12
- B runtime·callbackasm1(SB)
- MOVW $899, R12
- B runtime·callbackasm1(SB)
- MOVW $900, R12
- B runtime·callbackasm1(SB)
- MOVW $901, R12
- B runtime·callbackasm1(SB)
- MOVW $902, R12
- B runtime·callbackasm1(SB)
- MOVW $903, R12
- B runtime·callbackasm1(SB)
- MOVW $904, R12
- B runtime·callbackasm1(SB)
- MOVW $905, R12
- B runtime·callbackasm1(SB)
- MOVW $906, R12
- B runtime·callbackasm1(SB)
- MOVW $907, R12
- B runtime·callbackasm1(SB)
- MOVW $908, R12
- B runtime·callbackasm1(SB)
- MOVW $909, R12
- B runtime·callbackasm1(SB)
- MOVW $910, R12
- B runtime·callbackasm1(SB)
- MOVW $911, R12
- B runtime·callbackasm1(SB)
- MOVW $912, R12
- B runtime·callbackasm1(SB)
- MOVW $913, R12
- B runtime·callbackasm1(SB)
- MOVW $914, R12
- B runtime·callbackasm1(SB)
- MOVW $915, R12
- B runtime·callbackasm1(SB)
- MOVW $916, R12
- B runtime·callbackasm1(SB)
- MOVW $917, R12
- B runtime·callbackasm1(SB)
- MOVW $918, R12
- B runtime·callbackasm1(SB)
- MOVW $919, R12
- B runtime·callbackasm1(SB)
- MOVW $920, R12
- B runtime·callbackasm1(SB)
- MOVW $921, R12
- B runtime·callbackasm1(SB)
- MOVW $922, R12
- B runtime·callbackasm1(SB)
- MOVW $923, R12
- B runtime·callbackasm1(SB)
- MOVW $924, R12
- B runtime·callbackasm1(SB)
- MOVW $925, R12
- B runtime·callbackasm1(SB)
- MOVW $926, R12
- B runtime·callbackasm1(SB)
- MOVW $927, R12
- B runtime·callbackasm1(SB)
- MOVW $928, R12
- B runtime·callbackasm1(SB)
- MOVW $929, R12
- B runtime·callbackasm1(SB)
- MOVW $930, R12
- B runtime·callbackasm1(SB)
- MOVW $931, R12
- B runtime·callbackasm1(SB)
- MOVW $932, R12
- B runtime·callbackasm1(SB)
- MOVW $933, R12
- B runtime·callbackasm1(SB)
- MOVW $934, R12
- B runtime·callbackasm1(SB)
- MOVW $935, R12
- B runtime·callbackasm1(SB)
- MOVW $936, R12
- B runtime·callbackasm1(SB)
- MOVW $937, R12
- B runtime·callbackasm1(SB)
- MOVW $938, R12
- B runtime·callbackasm1(SB)
- MOVW $939, R12
- B runtime·callbackasm1(SB)
- MOVW $940, R12
- B runtime·callbackasm1(SB)
- MOVW $941, R12
- B runtime·callbackasm1(SB)
- MOVW $942, R12
- B runtime·callbackasm1(SB)
- MOVW $943, R12
- B runtime·callbackasm1(SB)
- MOVW $944, R12
- B runtime·callbackasm1(SB)
- MOVW $945, R12
- B runtime·callbackasm1(SB)
- MOVW $946, R12
- B runtime·callbackasm1(SB)
- MOVW $947, R12
- B runtime·callbackasm1(SB)
- MOVW $948, R12
- B runtime·callbackasm1(SB)
- MOVW $949, R12
- B runtime·callbackasm1(SB)
- MOVW $950, R12
- B runtime·callbackasm1(SB)
- MOVW $951, R12
- B runtime·callbackasm1(SB)
- MOVW $952, R12
- B runtime·callbackasm1(SB)
- MOVW $953, R12
- B runtime·callbackasm1(SB)
- MOVW $954, R12
- B runtime·callbackasm1(SB)
- MOVW $955, R12
- B runtime·callbackasm1(SB)
- MOVW $956, R12
- B runtime·callbackasm1(SB)
- MOVW $957, R12
- B runtime·callbackasm1(SB)
- MOVW $958, R12
- B runtime·callbackasm1(SB)
- MOVW $959, R12
- B runtime·callbackasm1(SB)
- MOVW $960, R12
- B runtime·callbackasm1(SB)
- MOVW $961, R12
- B runtime·callbackasm1(SB)
- MOVW $962, R12
- B runtime·callbackasm1(SB)
- MOVW $963, R12
- B runtime·callbackasm1(SB)
- MOVW $964, R12
- B runtime·callbackasm1(SB)
- MOVW $965, R12
- B runtime·callbackasm1(SB)
- MOVW $966, R12
- B runtime·callbackasm1(SB)
- MOVW $967, R12
- B runtime·callbackasm1(SB)
- MOVW $968, R12
- B runtime·callbackasm1(SB)
- MOVW $969, R12
- B runtime·callbackasm1(SB)
- MOVW $970, R12
- B runtime·callbackasm1(SB)
- MOVW $971, R12
- B runtime·callbackasm1(SB)
- MOVW $972, R12
- B runtime·callbackasm1(SB)
- MOVW $973, R12
- B runtime·callbackasm1(SB)
- MOVW $974, R12
- B runtime·callbackasm1(SB)
- MOVW $975, R12
- B runtime·callbackasm1(SB)
- MOVW $976, R12
- B runtime·callbackasm1(SB)
- MOVW $977, R12
- B runtime·callbackasm1(SB)
- MOVW $978, R12
- B runtime·callbackasm1(SB)
- MOVW $979, R12
- B runtime·callbackasm1(SB)
- MOVW $980, R12
- B runtime·callbackasm1(SB)
- MOVW $981, R12
- B runtime·callbackasm1(SB)
- MOVW $982, R12
- B runtime·callbackasm1(SB)
- MOVW $983, R12
- B runtime·callbackasm1(SB)
- MOVW $984, R12
- B runtime·callbackasm1(SB)
- MOVW $985, R12
- B runtime·callbackasm1(SB)
- MOVW $986, R12
- B runtime·callbackasm1(SB)
- MOVW $987, R12
- B runtime·callbackasm1(SB)
- MOVW $988, R12
- B runtime·callbackasm1(SB)
- MOVW $989, R12
- B runtime·callbackasm1(SB)
- MOVW $990, R12
- B runtime·callbackasm1(SB)
- MOVW $991, R12
- B runtime·callbackasm1(SB)
- MOVW $992, R12
- B runtime·callbackasm1(SB)
- MOVW $993, R12
- B runtime·callbackasm1(SB)
- MOVW $994, R12
- B runtime·callbackasm1(SB)
- MOVW $995, R12
- B runtime·callbackasm1(SB)
- MOVW $996, R12
- B runtime·callbackasm1(SB)
- MOVW $997, R12
- B runtime·callbackasm1(SB)
- MOVW $998, R12
- B runtime·callbackasm1(SB)
- MOVW $999, R12
- B runtime·callbackasm1(SB)
- MOVW $1000, R12
- B runtime·callbackasm1(SB)
- MOVW $1001, R12
- B runtime·callbackasm1(SB)
- MOVW $1002, R12
- B runtime·callbackasm1(SB)
- MOVW $1003, R12
- B runtime·callbackasm1(SB)
- MOVW $1004, R12
- B runtime·callbackasm1(SB)
- MOVW $1005, R12
- B runtime·callbackasm1(SB)
- MOVW $1006, R12
- B runtime·callbackasm1(SB)
- MOVW $1007, R12
- B runtime·callbackasm1(SB)
- MOVW $1008, R12
- B runtime·callbackasm1(SB)
- MOVW $1009, R12
- B runtime·callbackasm1(SB)
- MOVW $1010, R12
- B runtime·callbackasm1(SB)
- MOVW $1011, R12
- B runtime·callbackasm1(SB)
- MOVW $1012, R12
- B runtime·callbackasm1(SB)
- MOVW $1013, R12
- B runtime·callbackasm1(SB)
- MOVW $1014, R12
- B runtime·callbackasm1(SB)
- MOVW $1015, R12
- B runtime·callbackasm1(SB)
- MOVW $1016, R12
- B runtime·callbackasm1(SB)
- MOVW $1017, R12
- B runtime·callbackasm1(SB)
- MOVW $1018, R12
- B runtime·callbackasm1(SB)
- MOVW $1019, R12
- B runtime·callbackasm1(SB)
- MOVW $1020, R12
- B runtime·callbackasm1(SB)
- MOVW $1021, R12
- B runtime·callbackasm1(SB)
- MOVW $1022, R12
- B runtime·callbackasm1(SB)
- MOVW $1023, R12
- B runtime·callbackasm1(SB)
- MOVW $1024, R12
- B runtime·callbackasm1(SB)
- MOVW $1025, R12
- B runtime·callbackasm1(SB)
- MOVW $1026, R12
- B runtime·callbackasm1(SB)
- MOVW $1027, R12
- B runtime·callbackasm1(SB)
- MOVW $1028, R12
- B runtime·callbackasm1(SB)
- MOVW $1029, R12
- B runtime·callbackasm1(SB)
- MOVW $1030, R12
- B runtime·callbackasm1(SB)
- MOVW $1031, R12
- B runtime·callbackasm1(SB)
- MOVW $1032, R12
- B runtime·callbackasm1(SB)
- MOVW $1033, R12
- B runtime·callbackasm1(SB)
- MOVW $1034, R12
- B runtime·callbackasm1(SB)
- MOVW $1035, R12
- B runtime·callbackasm1(SB)
- MOVW $1036, R12
- B runtime·callbackasm1(SB)
- MOVW $1037, R12
- B runtime·callbackasm1(SB)
- MOVW $1038, R12
- B runtime·callbackasm1(SB)
- MOVW $1039, R12
- B runtime·callbackasm1(SB)
- MOVW $1040, R12
- B runtime·callbackasm1(SB)
- MOVW $1041, R12
- B runtime·callbackasm1(SB)
- MOVW $1042, R12
- B runtime·callbackasm1(SB)
- MOVW $1043, R12
- B runtime·callbackasm1(SB)
- MOVW $1044, R12
- B runtime·callbackasm1(SB)
- MOVW $1045, R12
- B runtime·callbackasm1(SB)
- MOVW $1046, R12
- B runtime·callbackasm1(SB)
- MOVW $1047, R12
- B runtime·callbackasm1(SB)
- MOVW $1048, R12
- B runtime·callbackasm1(SB)
- MOVW $1049, R12
- B runtime·callbackasm1(SB)
- MOVW $1050, R12
- B runtime·callbackasm1(SB)
- MOVW $1051, R12
- B runtime·callbackasm1(SB)
- MOVW $1052, R12
- B runtime·callbackasm1(SB)
- MOVW $1053, R12
- B runtime·callbackasm1(SB)
- MOVW $1054, R12
- B runtime·callbackasm1(SB)
- MOVW $1055, R12
- B runtime·callbackasm1(SB)
- MOVW $1056, R12
- B runtime·callbackasm1(SB)
- MOVW $1057, R12
- B runtime·callbackasm1(SB)
- MOVW $1058, R12
- B runtime·callbackasm1(SB)
- MOVW $1059, R12
- B runtime·callbackasm1(SB)
- MOVW $1060, R12
- B runtime·callbackasm1(SB)
- MOVW $1061, R12
- B runtime·callbackasm1(SB)
- MOVW $1062, R12
- B runtime·callbackasm1(SB)
- MOVW $1063, R12
- B runtime·callbackasm1(SB)
- MOVW $1064, R12
- B runtime·callbackasm1(SB)
- MOVW $1065, R12
- B runtime·callbackasm1(SB)
- MOVW $1066, R12
- B runtime·callbackasm1(SB)
- MOVW $1067, R12
- B runtime·callbackasm1(SB)
- MOVW $1068, R12
- B runtime·callbackasm1(SB)
- MOVW $1069, R12
- B runtime·callbackasm1(SB)
- MOVW $1070, R12
- B runtime·callbackasm1(SB)
- MOVW $1071, R12
- B runtime·callbackasm1(SB)
- MOVW $1072, R12
- B runtime·callbackasm1(SB)
- MOVW $1073, R12
- B runtime·callbackasm1(SB)
- MOVW $1074, R12
- B runtime·callbackasm1(SB)
- MOVW $1075, R12
- B runtime·callbackasm1(SB)
- MOVW $1076, R12
- B runtime·callbackasm1(SB)
- MOVW $1077, R12
- B runtime·callbackasm1(SB)
- MOVW $1078, R12
- B runtime·callbackasm1(SB)
- MOVW $1079, R12
- B runtime·callbackasm1(SB)
- MOVW $1080, R12
- B runtime·callbackasm1(SB)
- MOVW $1081, R12
- B runtime·callbackasm1(SB)
- MOVW $1082, R12
- B runtime·callbackasm1(SB)
- MOVW $1083, R12
- B runtime·callbackasm1(SB)
- MOVW $1084, R12
- B runtime·callbackasm1(SB)
- MOVW $1085, R12
- B runtime·callbackasm1(SB)
- MOVW $1086, R12
- B runtime·callbackasm1(SB)
- MOVW $1087, R12
- B runtime·callbackasm1(SB)
- MOVW $1088, R12
- B runtime·callbackasm1(SB)
- MOVW $1089, R12
- B runtime·callbackasm1(SB)
- MOVW $1090, R12
- B runtime·callbackasm1(SB)
- MOVW $1091, R12
- B runtime·callbackasm1(SB)
- MOVW $1092, R12
- B runtime·callbackasm1(SB)
- MOVW $1093, R12
- B runtime·callbackasm1(SB)
- MOVW $1094, R12
- B runtime·callbackasm1(SB)
- MOVW $1095, R12
- B runtime·callbackasm1(SB)
- MOVW $1096, R12
- B runtime·callbackasm1(SB)
- MOVW $1097, R12
- B runtime·callbackasm1(SB)
- MOVW $1098, R12
- B runtime·callbackasm1(SB)
- MOVW $1099, R12
- B runtime·callbackasm1(SB)
- MOVW $1100, R12
- B runtime·callbackasm1(SB)
- MOVW $1101, R12
- B runtime·callbackasm1(SB)
- MOVW $1102, R12
- B runtime·callbackasm1(SB)
- MOVW $1103, R12
- B runtime·callbackasm1(SB)
- MOVW $1104, R12
- B runtime·callbackasm1(SB)
- MOVW $1105, R12
- B runtime·callbackasm1(SB)
- MOVW $1106, R12
- B runtime·callbackasm1(SB)
- MOVW $1107, R12
- B runtime·callbackasm1(SB)
- MOVW $1108, R12
- B runtime·callbackasm1(SB)
- MOVW $1109, R12
- B runtime·callbackasm1(SB)
- MOVW $1110, R12
- B runtime·callbackasm1(SB)
- MOVW $1111, R12
- B runtime·callbackasm1(SB)
- MOVW $1112, R12
- B runtime·callbackasm1(SB)
- MOVW $1113, R12
- B runtime·callbackasm1(SB)
- MOVW $1114, R12
- B runtime·callbackasm1(SB)
- MOVW $1115, R12
- B runtime·callbackasm1(SB)
- MOVW $1116, R12
- B runtime·callbackasm1(SB)
- MOVW $1117, R12
- B runtime·callbackasm1(SB)
- MOVW $1118, R12
- B runtime·callbackasm1(SB)
- MOVW $1119, R12
- B runtime·callbackasm1(SB)
- MOVW $1120, R12
- B runtime·callbackasm1(SB)
- MOVW $1121, R12
- B runtime·callbackasm1(SB)
- MOVW $1122, R12
- B runtime·callbackasm1(SB)
- MOVW $1123, R12
- B runtime·callbackasm1(SB)
- MOVW $1124, R12
- B runtime·callbackasm1(SB)
- MOVW $1125, R12
- B runtime·callbackasm1(SB)
- MOVW $1126, R12
- B runtime·callbackasm1(SB)
- MOVW $1127, R12
- B runtime·callbackasm1(SB)
- MOVW $1128, R12
- B runtime·callbackasm1(SB)
- MOVW $1129, R12
- B runtime·callbackasm1(SB)
- MOVW $1130, R12
- B runtime·callbackasm1(SB)
- MOVW $1131, R12
- B runtime·callbackasm1(SB)
- MOVW $1132, R12
- B runtime·callbackasm1(SB)
- MOVW $1133, R12
- B runtime·callbackasm1(SB)
- MOVW $1134, R12
- B runtime·callbackasm1(SB)
- MOVW $1135, R12
- B runtime·callbackasm1(SB)
- MOVW $1136, R12
- B runtime·callbackasm1(SB)
- MOVW $1137, R12
- B runtime·callbackasm1(SB)
- MOVW $1138, R12
- B runtime·callbackasm1(SB)
- MOVW $1139, R12
- B runtime·callbackasm1(SB)
- MOVW $1140, R12
- B runtime·callbackasm1(SB)
- MOVW $1141, R12
- B runtime·callbackasm1(SB)
- MOVW $1142, R12
- B runtime·callbackasm1(SB)
- MOVW $1143, R12
- B runtime·callbackasm1(SB)
- MOVW $1144, R12
- B runtime·callbackasm1(SB)
- MOVW $1145, R12
- B runtime·callbackasm1(SB)
- MOVW $1146, R12
- B runtime·callbackasm1(SB)
- MOVW $1147, R12
- B runtime·callbackasm1(SB)
- MOVW $1148, R12
- B runtime·callbackasm1(SB)
- MOVW $1149, R12
- B runtime·callbackasm1(SB)
- MOVW $1150, R12
- B runtime·callbackasm1(SB)
- MOVW $1151, R12
- B runtime·callbackasm1(SB)
- MOVW $1152, R12
- B runtime·callbackasm1(SB)
- MOVW $1153, R12
- B runtime·callbackasm1(SB)
- MOVW $1154, R12
- B runtime·callbackasm1(SB)
- MOVW $1155, R12
- B runtime·callbackasm1(SB)
- MOVW $1156, R12
- B runtime·callbackasm1(SB)
- MOVW $1157, R12
- B runtime·callbackasm1(SB)
- MOVW $1158, R12
- B runtime·callbackasm1(SB)
- MOVW $1159, R12
- B runtime·callbackasm1(SB)
- MOVW $1160, R12
- B runtime·callbackasm1(SB)
- MOVW $1161, R12
- B runtime·callbackasm1(SB)
- MOVW $1162, R12
- B runtime·callbackasm1(SB)
- MOVW $1163, R12
- B runtime·callbackasm1(SB)
- MOVW $1164, R12
- B runtime·callbackasm1(SB)
- MOVW $1165, R12
- B runtime·callbackasm1(SB)
- MOVW $1166, R12
- B runtime·callbackasm1(SB)
- MOVW $1167, R12
- B runtime·callbackasm1(SB)
- MOVW $1168, R12
- B runtime·callbackasm1(SB)
- MOVW $1169, R12
- B runtime·callbackasm1(SB)
- MOVW $1170, R12
- B runtime·callbackasm1(SB)
- MOVW $1171, R12
- B runtime·callbackasm1(SB)
- MOVW $1172, R12
- B runtime·callbackasm1(SB)
- MOVW $1173, R12
- B runtime·callbackasm1(SB)
- MOVW $1174, R12
- B runtime·callbackasm1(SB)
- MOVW $1175, R12
- B runtime·callbackasm1(SB)
- MOVW $1176, R12
- B runtime·callbackasm1(SB)
- MOVW $1177, R12
- B runtime·callbackasm1(SB)
- MOVW $1178, R12
- B runtime·callbackasm1(SB)
- MOVW $1179, R12
- B runtime·callbackasm1(SB)
- MOVW $1180, R12
- B runtime·callbackasm1(SB)
- MOVW $1181, R12
- B runtime·callbackasm1(SB)
- MOVW $1182, R12
- B runtime·callbackasm1(SB)
- MOVW $1183, R12
- B runtime·callbackasm1(SB)
- MOVW $1184, R12
- B runtime·callbackasm1(SB)
- MOVW $1185, R12
- B runtime·callbackasm1(SB)
- MOVW $1186, R12
- B runtime·callbackasm1(SB)
- MOVW $1187, R12
- B runtime·callbackasm1(SB)
- MOVW $1188, R12
- B runtime·callbackasm1(SB)
- MOVW $1189, R12
- B runtime·callbackasm1(SB)
- MOVW $1190, R12
- B runtime·callbackasm1(SB)
- MOVW $1191, R12
- B runtime·callbackasm1(SB)
- MOVW $1192, R12
- B runtime·callbackasm1(SB)
- MOVW $1193, R12
- B runtime·callbackasm1(SB)
- MOVW $1194, R12
- B runtime·callbackasm1(SB)
- MOVW $1195, R12
- B runtime·callbackasm1(SB)
- MOVW $1196, R12
- B runtime·callbackasm1(SB)
- MOVW $1197, R12
- B runtime·callbackasm1(SB)
- MOVW $1198, R12
- B runtime·callbackasm1(SB)
- MOVW $1199, R12
- B runtime·callbackasm1(SB)
- MOVW $1200, R12
- B runtime·callbackasm1(SB)
- MOVW $1201, R12
- B runtime·callbackasm1(SB)
- MOVW $1202, R12
- B runtime·callbackasm1(SB)
- MOVW $1203, R12
- B runtime·callbackasm1(SB)
- MOVW $1204, R12
- B runtime·callbackasm1(SB)
- MOVW $1205, R12
- B runtime·callbackasm1(SB)
- MOVW $1206, R12
- B runtime·callbackasm1(SB)
- MOVW $1207, R12
- B runtime·callbackasm1(SB)
- MOVW $1208, R12
- B runtime·callbackasm1(SB)
- MOVW $1209, R12
- B runtime·callbackasm1(SB)
- MOVW $1210, R12
- B runtime·callbackasm1(SB)
- MOVW $1211, R12
- B runtime·callbackasm1(SB)
- MOVW $1212, R12
- B runtime·callbackasm1(SB)
- MOVW $1213, R12
- B runtime·callbackasm1(SB)
- MOVW $1214, R12
- B runtime·callbackasm1(SB)
- MOVW $1215, R12
- B runtime·callbackasm1(SB)
- MOVW $1216, R12
- B runtime·callbackasm1(SB)
- MOVW $1217, R12
- B runtime·callbackasm1(SB)
- MOVW $1218, R12
- B runtime·callbackasm1(SB)
- MOVW $1219, R12
- B runtime·callbackasm1(SB)
- MOVW $1220, R12
- B runtime·callbackasm1(SB)
- MOVW $1221, R12
- B runtime·callbackasm1(SB)
- MOVW $1222, R12
- B runtime·callbackasm1(SB)
- MOVW $1223, R12
- B runtime·callbackasm1(SB)
- MOVW $1224, R12
- B runtime·callbackasm1(SB)
- MOVW $1225, R12
- B runtime·callbackasm1(SB)
- MOVW $1226, R12
- B runtime·callbackasm1(SB)
- MOVW $1227, R12
- B runtime·callbackasm1(SB)
- MOVW $1228, R12
- B runtime·callbackasm1(SB)
- MOVW $1229, R12
- B runtime·callbackasm1(SB)
- MOVW $1230, R12
- B runtime·callbackasm1(SB)
- MOVW $1231, R12
- B runtime·callbackasm1(SB)
- MOVW $1232, R12
- B runtime·callbackasm1(SB)
- MOVW $1233, R12
- B runtime·callbackasm1(SB)
- MOVW $1234, R12
- B runtime·callbackasm1(SB)
- MOVW $1235, R12
- B runtime·callbackasm1(SB)
- MOVW $1236, R12
- B runtime·callbackasm1(SB)
- MOVW $1237, R12
- B runtime·callbackasm1(SB)
- MOVW $1238, R12
- B runtime·callbackasm1(SB)
- MOVW $1239, R12
- B runtime·callbackasm1(SB)
- MOVW $1240, R12
- B runtime·callbackasm1(SB)
- MOVW $1241, R12
- B runtime·callbackasm1(SB)
- MOVW $1242, R12
- B runtime·callbackasm1(SB)
- MOVW $1243, R12
- B runtime·callbackasm1(SB)
- MOVW $1244, R12
- B runtime·callbackasm1(SB)
- MOVW $1245, R12
- B runtime·callbackasm1(SB)
- MOVW $1246, R12
- B runtime·callbackasm1(SB)
- MOVW $1247, R12
- B runtime·callbackasm1(SB)
- MOVW $1248, R12
- B runtime·callbackasm1(SB)
- MOVW $1249, R12
- B runtime·callbackasm1(SB)
- MOVW $1250, R12
- B runtime·callbackasm1(SB)
- MOVW $1251, R12
- B runtime·callbackasm1(SB)
- MOVW $1252, R12
- B runtime·callbackasm1(SB)
- MOVW $1253, R12
- B runtime·callbackasm1(SB)
- MOVW $1254, R12
- B runtime·callbackasm1(SB)
- MOVW $1255, R12
- B runtime·callbackasm1(SB)
- MOVW $1256, R12
- B runtime·callbackasm1(SB)
- MOVW $1257, R12
- B runtime·callbackasm1(SB)
- MOVW $1258, R12
- B runtime·callbackasm1(SB)
- MOVW $1259, R12
- B runtime·callbackasm1(SB)
- MOVW $1260, R12
- B runtime·callbackasm1(SB)
- MOVW $1261, R12
- B runtime·callbackasm1(SB)
- MOVW $1262, R12
- B runtime·callbackasm1(SB)
- MOVW $1263, R12
- B runtime·callbackasm1(SB)
- MOVW $1264, R12
- B runtime·callbackasm1(SB)
- MOVW $1265, R12
- B runtime·callbackasm1(SB)
- MOVW $1266, R12
- B runtime·callbackasm1(SB)
- MOVW $1267, R12
- B runtime·callbackasm1(SB)
- MOVW $1268, R12
- B runtime·callbackasm1(SB)
- MOVW $1269, R12
- B runtime·callbackasm1(SB)
- MOVW $1270, R12
- B runtime·callbackasm1(SB)
- MOVW $1271, R12
- B runtime·callbackasm1(SB)
- MOVW $1272, R12
- B runtime·callbackasm1(SB)
- MOVW $1273, R12
- B runtime·callbackasm1(SB)
- MOVW $1274, R12
- B runtime·callbackasm1(SB)
- MOVW $1275, R12
- B runtime·callbackasm1(SB)
- MOVW $1276, R12
- B runtime·callbackasm1(SB)
- MOVW $1277, R12
- B runtime·callbackasm1(SB)
- MOVW $1278, R12
- B runtime·callbackasm1(SB)
- MOVW $1279, R12
- B runtime·callbackasm1(SB)
- MOVW $1280, R12
- B runtime·callbackasm1(SB)
- MOVW $1281, R12
- B runtime·callbackasm1(SB)
- MOVW $1282, R12
- B runtime·callbackasm1(SB)
- MOVW $1283, R12
- B runtime·callbackasm1(SB)
- MOVW $1284, R12
- B runtime·callbackasm1(SB)
- MOVW $1285, R12
- B runtime·callbackasm1(SB)
- MOVW $1286, R12
- B runtime·callbackasm1(SB)
- MOVW $1287, R12
- B runtime·callbackasm1(SB)
- MOVW $1288, R12
- B runtime·callbackasm1(SB)
- MOVW $1289, R12
- B runtime·callbackasm1(SB)
- MOVW $1290, R12
- B runtime·callbackasm1(SB)
- MOVW $1291, R12
- B runtime·callbackasm1(SB)
- MOVW $1292, R12
- B runtime·callbackasm1(SB)
- MOVW $1293, R12
- B runtime·callbackasm1(SB)
- MOVW $1294, R12
- B runtime·callbackasm1(SB)
- MOVW $1295, R12
- B runtime·callbackasm1(SB)
- MOVW $1296, R12
- B runtime·callbackasm1(SB)
- MOVW $1297, R12
- B runtime·callbackasm1(SB)
- MOVW $1298, R12
- B runtime·callbackasm1(SB)
- MOVW $1299, R12
- B runtime·callbackasm1(SB)
- MOVW $1300, R12
- B runtime·callbackasm1(SB)
- MOVW $1301, R12
- B runtime·callbackasm1(SB)
- MOVW $1302, R12
- B runtime·callbackasm1(SB)
- MOVW $1303, R12
- B runtime·callbackasm1(SB)
- MOVW $1304, R12
- B runtime·callbackasm1(SB)
- MOVW $1305, R12
- B runtime·callbackasm1(SB)
- MOVW $1306, R12
- B runtime·callbackasm1(SB)
- MOVW $1307, R12
- B runtime·callbackasm1(SB)
- MOVW $1308, R12
- B runtime·callbackasm1(SB)
- MOVW $1309, R12
- B runtime·callbackasm1(SB)
- MOVW $1310, R12
- B runtime·callbackasm1(SB)
- MOVW $1311, R12
- B runtime·callbackasm1(SB)
- MOVW $1312, R12
- B runtime·callbackasm1(SB)
- MOVW $1313, R12
- B runtime·callbackasm1(SB)
- MOVW $1314, R12
- B runtime·callbackasm1(SB)
- MOVW $1315, R12
- B runtime·callbackasm1(SB)
- MOVW $1316, R12
- B runtime·callbackasm1(SB)
- MOVW $1317, R12
- B runtime·callbackasm1(SB)
- MOVW $1318, R12
- B runtime·callbackasm1(SB)
- MOVW $1319, R12
- B runtime·callbackasm1(SB)
- MOVW $1320, R12
- B runtime·callbackasm1(SB)
- MOVW $1321, R12
- B runtime·callbackasm1(SB)
- MOVW $1322, R12
- B runtime·callbackasm1(SB)
- MOVW $1323, R12
- B runtime·callbackasm1(SB)
- MOVW $1324, R12
- B runtime·callbackasm1(SB)
- MOVW $1325, R12
- B runtime·callbackasm1(SB)
- MOVW $1326, R12
- B runtime·callbackasm1(SB)
- MOVW $1327, R12
- B runtime·callbackasm1(SB)
- MOVW $1328, R12
- B runtime·callbackasm1(SB)
- MOVW $1329, R12
- B runtime·callbackasm1(SB)
- MOVW $1330, R12
- B runtime·callbackasm1(SB)
- MOVW $1331, R12
- B runtime·callbackasm1(SB)
- MOVW $1332, R12
- B runtime·callbackasm1(SB)
- MOVW $1333, R12
- B runtime·callbackasm1(SB)
- MOVW $1334, R12
- B runtime·callbackasm1(SB)
- MOVW $1335, R12
- B runtime·callbackasm1(SB)
- MOVW $1336, R12
- B runtime·callbackasm1(SB)
- MOVW $1337, R12
- B runtime·callbackasm1(SB)
- MOVW $1338, R12
- B runtime·callbackasm1(SB)
- MOVW $1339, R12
- B runtime·callbackasm1(SB)
- MOVW $1340, R12
- B runtime·callbackasm1(SB)
- MOVW $1341, R12
- B runtime·callbackasm1(SB)
- MOVW $1342, R12
- B runtime·callbackasm1(SB)
- MOVW $1343, R12
- B runtime·callbackasm1(SB)
- MOVW $1344, R12
- B runtime·callbackasm1(SB)
- MOVW $1345, R12
- B runtime·callbackasm1(SB)
- MOVW $1346, R12
- B runtime·callbackasm1(SB)
- MOVW $1347, R12
- B runtime·callbackasm1(SB)
- MOVW $1348, R12
- B runtime·callbackasm1(SB)
- MOVW $1349, R12
- B runtime·callbackasm1(SB)
- MOVW $1350, R12
- B runtime·callbackasm1(SB)
- MOVW $1351, R12
- B runtime·callbackasm1(SB)
- MOVW $1352, R12
- B runtime·callbackasm1(SB)
- MOVW $1353, R12
- B runtime·callbackasm1(SB)
- MOVW $1354, R12
- B runtime·callbackasm1(SB)
- MOVW $1355, R12
- B runtime·callbackasm1(SB)
- MOVW $1356, R12
- B runtime·callbackasm1(SB)
- MOVW $1357, R12
- B runtime·callbackasm1(SB)
- MOVW $1358, R12
- B runtime·callbackasm1(SB)
- MOVW $1359, R12
- B runtime·callbackasm1(SB)
- MOVW $1360, R12
- B runtime·callbackasm1(SB)
- MOVW $1361, R12
- B runtime·callbackasm1(SB)
- MOVW $1362, R12
- B runtime·callbackasm1(SB)
- MOVW $1363, R12
- B runtime·callbackasm1(SB)
- MOVW $1364, R12
- B runtime·callbackasm1(SB)
- MOVW $1365, R12
- B runtime·callbackasm1(SB)
- MOVW $1366, R12
- B runtime·callbackasm1(SB)
- MOVW $1367, R12
- B runtime·callbackasm1(SB)
- MOVW $1368, R12
- B runtime·callbackasm1(SB)
- MOVW $1369, R12
- B runtime·callbackasm1(SB)
- MOVW $1370, R12
- B runtime·callbackasm1(SB)
- MOVW $1371, R12
- B runtime·callbackasm1(SB)
- MOVW $1372, R12
- B runtime·callbackasm1(SB)
- MOVW $1373, R12
- B runtime·callbackasm1(SB)
- MOVW $1374, R12
- B runtime·callbackasm1(SB)
- MOVW $1375, R12
- B runtime·callbackasm1(SB)
- MOVW $1376, R12
- B runtime·callbackasm1(SB)
- MOVW $1377, R12
- B runtime·callbackasm1(SB)
- MOVW $1378, R12
- B runtime·callbackasm1(SB)
- MOVW $1379, R12
- B runtime·callbackasm1(SB)
- MOVW $1380, R12
- B runtime·callbackasm1(SB)
- MOVW $1381, R12
- B runtime·callbackasm1(SB)
- MOVW $1382, R12
- B runtime·callbackasm1(SB)
- MOVW $1383, R12
- B runtime·callbackasm1(SB)
- MOVW $1384, R12
- B runtime·callbackasm1(SB)
- MOVW $1385, R12
- B runtime·callbackasm1(SB)
- MOVW $1386, R12
- B runtime·callbackasm1(SB)
- MOVW $1387, R12
- B runtime·callbackasm1(SB)
- MOVW $1388, R12
- B runtime·callbackasm1(SB)
- MOVW $1389, R12
- B runtime·callbackasm1(SB)
- MOVW $1390, R12
- B runtime·callbackasm1(SB)
- MOVW $1391, R12
- B runtime·callbackasm1(SB)
- MOVW $1392, R12
- B runtime·callbackasm1(SB)
- MOVW $1393, R12
- B runtime·callbackasm1(SB)
- MOVW $1394, R12
- B runtime·callbackasm1(SB)
- MOVW $1395, R12
- B runtime·callbackasm1(SB)
- MOVW $1396, R12
- B runtime·callbackasm1(SB)
- MOVW $1397, R12
- B runtime·callbackasm1(SB)
- MOVW $1398, R12
- B runtime·callbackasm1(SB)
- MOVW $1399, R12
- B runtime·callbackasm1(SB)
- MOVW $1400, R12
- B runtime·callbackasm1(SB)
- MOVW $1401, R12
- B runtime·callbackasm1(SB)
- MOVW $1402, R12
- B runtime·callbackasm1(SB)
- MOVW $1403, R12
- B runtime·callbackasm1(SB)
- MOVW $1404, R12
- B runtime·callbackasm1(SB)
- MOVW $1405, R12
- B runtime·callbackasm1(SB)
- MOVW $1406, R12
- B runtime·callbackasm1(SB)
- MOVW $1407, R12
- B runtime·callbackasm1(SB)
- MOVW $1408, R12
- B runtime·callbackasm1(SB)
- MOVW $1409, R12
- B runtime·callbackasm1(SB)
- MOVW $1410, R12
- B runtime·callbackasm1(SB)
- MOVW $1411, R12
- B runtime·callbackasm1(SB)
- MOVW $1412, R12
- B runtime·callbackasm1(SB)
- MOVW $1413, R12
- B runtime·callbackasm1(SB)
- MOVW $1414, R12
- B runtime·callbackasm1(SB)
- MOVW $1415, R12
- B runtime·callbackasm1(SB)
- MOVW $1416, R12
- B runtime·callbackasm1(SB)
- MOVW $1417, R12
- B runtime·callbackasm1(SB)
- MOVW $1418, R12
- B runtime·callbackasm1(SB)
- MOVW $1419, R12
- B runtime·callbackasm1(SB)
- MOVW $1420, R12
- B runtime·callbackasm1(SB)
- MOVW $1421, R12
- B runtime·callbackasm1(SB)
- MOVW $1422, R12
- B runtime·callbackasm1(SB)
- MOVW $1423, R12
- B runtime·callbackasm1(SB)
- MOVW $1424, R12
- B runtime·callbackasm1(SB)
- MOVW $1425, R12
- B runtime·callbackasm1(SB)
- MOVW $1426, R12
- B runtime·callbackasm1(SB)
- MOVW $1427, R12
- B runtime·callbackasm1(SB)
- MOVW $1428, R12
- B runtime·callbackasm1(SB)
- MOVW $1429, R12
- B runtime·callbackasm1(SB)
- MOVW $1430, R12
- B runtime·callbackasm1(SB)
- MOVW $1431, R12
- B runtime·callbackasm1(SB)
- MOVW $1432, R12
- B runtime·callbackasm1(SB)
- MOVW $1433, R12
- B runtime·callbackasm1(SB)
- MOVW $1434, R12
- B runtime·callbackasm1(SB)
- MOVW $1435, R12
- B runtime·callbackasm1(SB)
- MOVW $1436, R12
- B runtime·callbackasm1(SB)
- MOVW $1437, R12
- B runtime·callbackasm1(SB)
- MOVW $1438, R12
- B runtime·callbackasm1(SB)
- MOVW $1439, R12
- B runtime·callbackasm1(SB)
- MOVW $1440, R12
- B runtime·callbackasm1(SB)
- MOVW $1441, R12
- B runtime·callbackasm1(SB)
- MOVW $1442, R12
- B runtime·callbackasm1(SB)
- MOVW $1443, R12
- B runtime·callbackasm1(SB)
- MOVW $1444, R12
- B runtime·callbackasm1(SB)
- MOVW $1445, R12
- B runtime·callbackasm1(SB)
- MOVW $1446, R12
- B runtime·callbackasm1(SB)
- MOVW $1447, R12
- B runtime·callbackasm1(SB)
- MOVW $1448, R12
- B runtime·callbackasm1(SB)
- MOVW $1449, R12
- B runtime·callbackasm1(SB)
- MOVW $1450, R12
- B runtime·callbackasm1(SB)
- MOVW $1451, R12
- B runtime·callbackasm1(SB)
- MOVW $1452, R12
- B runtime·callbackasm1(SB)
- MOVW $1453, R12
- B runtime·callbackasm1(SB)
- MOVW $1454, R12
- B runtime·callbackasm1(SB)
- MOVW $1455, R12
- B runtime·callbackasm1(SB)
- MOVW $1456, R12
- B runtime·callbackasm1(SB)
- MOVW $1457, R12
- B runtime·callbackasm1(SB)
- MOVW $1458, R12
- B runtime·callbackasm1(SB)
- MOVW $1459, R12
- B runtime·callbackasm1(SB)
- MOVW $1460, R12
- B runtime·callbackasm1(SB)
- MOVW $1461, R12
- B runtime·callbackasm1(SB)
- MOVW $1462, R12
- B runtime·callbackasm1(SB)
- MOVW $1463, R12
- B runtime·callbackasm1(SB)
- MOVW $1464, R12
- B runtime·callbackasm1(SB)
- MOVW $1465, R12
- B runtime·callbackasm1(SB)
- MOVW $1466, R12
- B runtime·callbackasm1(SB)
- MOVW $1467, R12
- B runtime·callbackasm1(SB)
- MOVW $1468, R12
- B runtime·callbackasm1(SB)
- MOVW $1469, R12
- B runtime·callbackasm1(SB)
- MOVW $1470, R12
- B runtime·callbackasm1(SB)
- MOVW $1471, R12
- B runtime·callbackasm1(SB)
- MOVW $1472, R12
- B runtime·callbackasm1(SB)
- MOVW $1473, R12
- B runtime·callbackasm1(SB)
- MOVW $1474, R12
- B runtime·callbackasm1(SB)
- MOVW $1475, R12
- B runtime·callbackasm1(SB)
- MOVW $1476, R12
- B runtime·callbackasm1(SB)
- MOVW $1477, R12
- B runtime·callbackasm1(SB)
- MOVW $1478, R12
- B runtime·callbackasm1(SB)
- MOVW $1479, R12
- B runtime·callbackasm1(SB)
- MOVW $1480, R12
- B runtime·callbackasm1(SB)
- MOVW $1481, R12
- B runtime·callbackasm1(SB)
- MOVW $1482, R12
- B runtime·callbackasm1(SB)
- MOVW $1483, R12
- B runtime·callbackasm1(SB)
- MOVW $1484, R12
- B runtime·callbackasm1(SB)
- MOVW $1485, R12
- B runtime·callbackasm1(SB)
- MOVW $1486, R12
- B runtime·callbackasm1(SB)
- MOVW $1487, R12
- B runtime·callbackasm1(SB)
- MOVW $1488, R12
- B runtime·callbackasm1(SB)
- MOVW $1489, R12
- B runtime·callbackasm1(SB)
- MOVW $1490, R12
- B runtime·callbackasm1(SB)
- MOVW $1491, R12
- B runtime·callbackasm1(SB)
- MOVW $1492, R12
- B runtime·callbackasm1(SB)
- MOVW $1493, R12
- B runtime·callbackasm1(SB)
- MOVW $1494, R12
- B runtime·callbackasm1(SB)
- MOVW $1495, R12
- B runtime·callbackasm1(SB)
- MOVW $1496, R12
- B runtime·callbackasm1(SB)
- MOVW $1497, R12
- B runtime·callbackasm1(SB)
- MOVW $1498, R12
- B runtime·callbackasm1(SB)
- MOVW $1499, R12
- B runtime·callbackasm1(SB)
- MOVW $1500, R12
- B runtime·callbackasm1(SB)
- MOVW $1501, R12
- B runtime·callbackasm1(SB)
- MOVW $1502, R12
- B runtime·callbackasm1(SB)
- MOVW $1503, R12
- B runtime·callbackasm1(SB)
- MOVW $1504, R12
- B runtime·callbackasm1(SB)
- MOVW $1505, R12
- B runtime·callbackasm1(SB)
- MOVW $1506, R12
- B runtime·callbackasm1(SB)
- MOVW $1507, R12
- B runtime·callbackasm1(SB)
- MOVW $1508, R12
- B runtime·callbackasm1(SB)
- MOVW $1509, R12
- B runtime·callbackasm1(SB)
- MOVW $1510, R12
- B runtime·callbackasm1(SB)
- MOVW $1511, R12
- B runtime·callbackasm1(SB)
- MOVW $1512, R12
- B runtime·callbackasm1(SB)
- MOVW $1513, R12
- B runtime·callbackasm1(SB)
- MOVW $1514, R12
- B runtime·callbackasm1(SB)
- MOVW $1515, R12
- B runtime·callbackasm1(SB)
- MOVW $1516, R12
- B runtime·callbackasm1(SB)
- MOVW $1517, R12
- B runtime·callbackasm1(SB)
- MOVW $1518, R12
- B runtime·callbackasm1(SB)
- MOVW $1519, R12
- B runtime·callbackasm1(SB)
- MOVW $1520, R12
- B runtime·callbackasm1(SB)
- MOVW $1521, R12
- B runtime·callbackasm1(SB)
- MOVW $1522, R12
- B runtime·callbackasm1(SB)
- MOVW $1523, R12
- B runtime·callbackasm1(SB)
- MOVW $1524, R12
- B runtime·callbackasm1(SB)
- MOVW $1525, R12
- B runtime·callbackasm1(SB)
- MOVW $1526, R12
- B runtime·callbackasm1(SB)
- MOVW $1527, R12
- B runtime·callbackasm1(SB)
- MOVW $1528, R12
- B runtime·callbackasm1(SB)
- MOVW $1529, R12
- B runtime·callbackasm1(SB)
- MOVW $1530, R12
- B runtime·callbackasm1(SB)
- MOVW $1531, R12
- B runtime·callbackasm1(SB)
- MOVW $1532, R12
- B runtime·callbackasm1(SB)
- MOVW $1533, R12
- B runtime·callbackasm1(SB)
- MOVW $1534, R12
- B runtime·callbackasm1(SB)
- MOVW $1535, R12
- B runtime·callbackasm1(SB)
- MOVW $1536, R12
- B runtime·callbackasm1(SB)
- MOVW $1537, R12
- B runtime·callbackasm1(SB)
- MOVW $1538, R12
- B runtime·callbackasm1(SB)
- MOVW $1539, R12
- B runtime·callbackasm1(SB)
- MOVW $1540, R12
- B runtime·callbackasm1(SB)
- MOVW $1541, R12
- B runtime·callbackasm1(SB)
- MOVW $1542, R12
- B runtime·callbackasm1(SB)
- MOVW $1543, R12
- B runtime·callbackasm1(SB)
- MOVW $1544, R12
- B runtime·callbackasm1(SB)
- MOVW $1545, R12
- B runtime·callbackasm1(SB)
- MOVW $1546, R12
- B runtime·callbackasm1(SB)
- MOVW $1547, R12
- B runtime·callbackasm1(SB)
- MOVW $1548, R12
- B runtime·callbackasm1(SB)
- MOVW $1549, R12
- B runtime·callbackasm1(SB)
- MOVW $1550, R12
- B runtime·callbackasm1(SB)
- MOVW $1551, R12
- B runtime·callbackasm1(SB)
- MOVW $1552, R12
- B runtime·callbackasm1(SB)
- MOVW $1553, R12
- B runtime·callbackasm1(SB)
- MOVW $1554, R12
- B runtime·callbackasm1(SB)
- MOVW $1555, R12
- B runtime·callbackasm1(SB)
- MOVW $1556, R12
- B runtime·callbackasm1(SB)
- MOVW $1557, R12
- B runtime·callbackasm1(SB)
- MOVW $1558, R12
- B runtime·callbackasm1(SB)
- MOVW $1559, R12
- B runtime·callbackasm1(SB)
- MOVW $1560, R12
- B runtime·callbackasm1(SB)
- MOVW $1561, R12
- B runtime·callbackasm1(SB)
- MOVW $1562, R12
- B runtime·callbackasm1(SB)
- MOVW $1563, R12
- B runtime·callbackasm1(SB)
- MOVW $1564, R12
- B runtime·callbackasm1(SB)
- MOVW $1565, R12
- B runtime·callbackasm1(SB)
- MOVW $1566, R12
- B runtime·callbackasm1(SB)
- MOVW $1567, R12
- B runtime·callbackasm1(SB)
- MOVW $1568, R12
- B runtime·callbackasm1(SB)
- MOVW $1569, R12
- B runtime·callbackasm1(SB)
- MOVW $1570, R12
- B runtime·callbackasm1(SB)
- MOVW $1571, R12
- B runtime·callbackasm1(SB)
- MOVW $1572, R12
- B runtime·callbackasm1(SB)
- MOVW $1573, R12
- B runtime·callbackasm1(SB)
- MOVW $1574, R12
- B runtime·callbackasm1(SB)
- MOVW $1575, R12
- B runtime·callbackasm1(SB)
- MOVW $1576, R12
- B runtime·callbackasm1(SB)
- MOVW $1577, R12
- B runtime·callbackasm1(SB)
- MOVW $1578, R12
- B runtime·callbackasm1(SB)
- MOVW $1579, R12
- B runtime·callbackasm1(SB)
- MOVW $1580, R12
- B runtime·callbackasm1(SB)
- MOVW $1581, R12
- B runtime·callbackasm1(SB)
- MOVW $1582, R12
- B runtime·callbackasm1(SB)
- MOVW $1583, R12
- B runtime·callbackasm1(SB)
- MOVW $1584, R12
- B runtime·callbackasm1(SB)
- MOVW $1585, R12
- B runtime·callbackasm1(SB)
- MOVW $1586, R12
- B runtime·callbackasm1(SB)
- MOVW $1587, R12
- B runtime·callbackasm1(SB)
- MOVW $1588, R12
- B runtime·callbackasm1(SB)
- MOVW $1589, R12
- B runtime·callbackasm1(SB)
- MOVW $1590, R12
- B runtime·callbackasm1(SB)
- MOVW $1591, R12
- B runtime·callbackasm1(SB)
- MOVW $1592, R12
- B runtime·callbackasm1(SB)
- MOVW $1593, R12
- B runtime·callbackasm1(SB)
- MOVW $1594, R12
- B runtime·callbackasm1(SB)
- MOVW $1595, R12
- B runtime·callbackasm1(SB)
- MOVW $1596, R12
- B runtime·callbackasm1(SB)
- MOVW $1597, R12
- B runtime·callbackasm1(SB)
- MOVW $1598, R12
- B runtime·callbackasm1(SB)
- MOVW $1599, R12
- B runtime·callbackasm1(SB)
- MOVW $1600, R12
- B runtime·callbackasm1(SB)
- MOVW $1601, R12
- B runtime·callbackasm1(SB)
- MOVW $1602, R12
- B runtime·callbackasm1(SB)
- MOVW $1603, R12
- B runtime·callbackasm1(SB)
- MOVW $1604, R12
- B runtime·callbackasm1(SB)
- MOVW $1605, R12
- B runtime·callbackasm1(SB)
- MOVW $1606, R12
- B runtime·callbackasm1(SB)
- MOVW $1607, R12
- B runtime·callbackasm1(SB)
- MOVW $1608, R12
- B runtime·callbackasm1(SB)
- MOVW $1609, R12
- B runtime·callbackasm1(SB)
- MOVW $1610, R12
- B runtime·callbackasm1(SB)
- MOVW $1611, R12
- B runtime·callbackasm1(SB)
- MOVW $1612, R12
- B runtime·callbackasm1(SB)
- MOVW $1613, R12
- B runtime·callbackasm1(SB)
- MOVW $1614, R12
- B runtime·callbackasm1(SB)
- MOVW $1615, R12
- B runtime·callbackasm1(SB)
- MOVW $1616, R12
- B runtime·callbackasm1(SB)
- MOVW $1617, R12
- B runtime·callbackasm1(SB)
- MOVW $1618, R12
- B runtime·callbackasm1(SB)
- MOVW $1619, R12
- B runtime·callbackasm1(SB)
- MOVW $1620, R12
- B runtime·callbackasm1(SB)
- MOVW $1621, R12
- B runtime·callbackasm1(SB)
- MOVW $1622, R12
- B runtime·callbackasm1(SB)
- MOVW $1623, R12
- B runtime·callbackasm1(SB)
- MOVW $1624, R12
- B runtime·callbackasm1(SB)
- MOVW $1625, R12
- B runtime·callbackasm1(SB)
- MOVW $1626, R12
- B runtime·callbackasm1(SB)
- MOVW $1627, R12
- B runtime·callbackasm1(SB)
- MOVW $1628, R12
- B runtime·callbackasm1(SB)
- MOVW $1629, R12
- B runtime·callbackasm1(SB)
- MOVW $1630, R12
- B runtime·callbackasm1(SB)
- MOVW $1631, R12
- B runtime·callbackasm1(SB)
- MOVW $1632, R12
- B runtime·callbackasm1(SB)
- MOVW $1633, R12
- B runtime·callbackasm1(SB)
- MOVW $1634, R12
- B runtime·callbackasm1(SB)
- MOVW $1635, R12
- B runtime·callbackasm1(SB)
- MOVW $1636, R12
- B runtime·callbackasm1(SB)
- MOVW $1637, R12
- B runtime·callbackasm1(SB)
- MOVW $1638, R12
- B runtime·callbackasm1(SB)
- MOVW $1639, R12
- B runtime·callbackasm1(SB)
- MOVW $1640, R12
- B runtime·callbackasm1(SB)
- MOVW $1641, R12
- B runtime·callbackasm1(SB)
- MOVW $1642, R12
- B runtime·callbackasm1(SB)
- MOVW $1643, R12
- B runtime·callbackasm1(SB)
- MOVW $1644, R12
- B runtime·callbackasm1(SB)
- MOVW $1645, R12
- B runtime·callbackasm1(SB)
- MOVW $1646, R12
- B runtime·callbackasm1(SB)
- MOVW $1647, R12
- B runtime·callbackasm1(SB)
- MOVW $1648, R12
- B runtime·callbackasm1(SB)
- MOVW $1649, R12
- B runtime·callbackasm1(SB)
- MOVW $1650, R12
- B runtime·callbackasm1(SB)
- MOVW $1651, R12
- B runtime·callbackasm1(SB)
- MOVW $1652, R12
- B runtime·callbackasm1(SB)
- MOVW $1653, R12
- B runtime·callbackasm1(SB)
- MOVW $1654, R12
- B runtime·callbackasm1(SB)
- MOVW $1655, R12
- B runtime·callbackasm1(SB)
- MOVW $1656, R12
- B runtime·callbackasm1(SB)
- MOVW $1657, R12
- B runtime·callbackasm1(SB)
- MOVW $1658, R12
- B runtime·callbackasm1(SB)
- MOVW $1659, R12
- B runtime·callbackasm1(SB)
- MOVW $1660, R12
- B runtime·callbackasm1(SB)
- MOVW $1661, R12
- B runtime·callbackasm1(SB)
- MOVW $1662, R12
- B runtime·callbackasm1(SB)
- MOVW $1663, R12
- B runtime·callbackasm1(SB)
- MOVW $1664, R12
- B runtime·callbackasm1(SB)
- MOVW $1665, R12
- B runtime·callbackasm1(SB)
- MOVW $1666, R12
- B runtime·callbackasm1(SB)
- MOVW $1667, R12
- B runtime·callbackasm1(SB)
- MOVW $1668, R12
- B runtime·callbackasm1(SB)
- MOVW $1669, R12
- B runtime·callbackasm1(SB)
- MOVW $1670, R12
- B runtime·callbackasm1(SB)
- MOVW $1671, R12
- B runtime·callbackasm1(SB)
- MOVW $1672, R12
- B runtime·callbackasm1(SB)
- MOVW $1673, R12
- B runtime·callbackasm1(SB)
- MOVW $1674, R12
- B runtime·callbackasm1(SB)
- MOVW $1675, R12
- B runtime·callbackasm1(SB)
- MOVW $1676, R12
- B runtime·callbackasm1(SB)
- MOVW $1677, R12
- B runtime·callbackasm1(SB)
- MOVW $1678, R12
- B runtime·callbackasm1(SB)
- MOVW $1679, R12
- B runtime·callbackasm1(SB)
- MOVW $1680, R12
- B runtime·callbackasm1(SB)
- MOVW $1681, R12
- B runtime·callbackasm1(SB)
- MOVW $1682, R12
- B runtime·callbackasm1(SB)
- MOVW $1683, R12
- B runtime·callbackasm1(SB)
- MOVW $1684, R12
- B runtime·callbackasm1(SB)
- MOVW $1685, R12
- B runtime·callbackasm1(SB)
- MOVW $1686, R12
- B runtime·callbackasm1(SB)
- MOVW $1687, R12
- B runtime·callbackasm1(SB)
- MOVW $1688, R12
- B runtime·callbackasm1(SB)
- MOVW $1689, R12
- B runtime·callbackasm1(SB)
- MOVW $1690, R12
- B runtime·callbackasm1(SB)
- MOVW $1691, R12
- B runtime·callbackasm1(SB)
- MOVW $1692, R12
- B runtime·callbackasm1(SB)
- MOVW $1693, R12
- B runtime·callbackasm1(SB)
- MOVW $1694, R12
- B runtime·callbackasm1(SB)
- MOVW $1695, R12
- B runtime·callbackasm1(SB)
- MOVW $1696, R12
- B runtime·callbackasm1(SB)
- MOVW $1697, R12
- B runtime·callbackasm1(SB)
- MOVW $1698, R12
- B runtime·callbackasm1(SB)
- MOVW $1699, R12
- B runtime·callbackasm1(SB)
- MOVW $1700, R12
- B runtime·callbackasm1(SB)
- MOVW $1701, R12
- B runtime·callbackasm1(SB)
- MOVW $1702, R12
- B runtime·callbackasm1(SB)
- MOVW $1703, R12
- B runtime·callbackasm1(SB)
- MOVW $1704, R12
- B runtime·callbackasm1(SB)
- MOVW $1705, R12
- B runtime·callbackasm1(SB)
- MOVW $1706, R12
- B runtime·callbackasm1(SB)
- MOVW $1707, R12
- B runtime·callbackasm1(SB)
- MOVW $1708, R12
- B runtime·callbackasm1(SB)
- MOVW $1709, R12
- B runtime·callbackasm1(SB)
- MOVW $1710, R12
- B runtime·callbackasm1(SB)
- MOVW $1711, R12
- B runtime·callbackasm1(SB)
- MOVW $1712, R12
- B runtime·callbackasm1(SB)
- MOVW $1713, R12
- B runtime·callbackasm1(SB)
- MOVW $1714, R12
- B runtime·callbackasm1(SB)
- MOVW $1715, R12
- B runtime·callbackasm1(SB)
- MOVW $1716, R12
- B runtime·callbackasm1(SB)
- MOVW $1717, R12
- B runtime·callbackasm1(SB)
- MOVW $1718, R12
- B runtime·callbackasm1(SB)
- MOVW $1719, R12
- B runtime·callbackasm1(SB)
- MOVW $1720, R12
- B runtime·callbackasm1(SB)
- MOVW $1721, R12
- B runtime·callbackasm1(SB)
- MOVW $1722, R12
- B runtime·callbackasm1(SB)
- MOVW $1723, R12
- B runtime·callbackasm1(SB)
- MOVW $1724, R12
- B runtime·callbackasm1(SB)
- MOVW $1725, R12
- B runtime·callbackasm1(SB)
- MOVW $1726, R12
- B runtime·callbackasm1(SB)
- MOVW $1727, R12
- B runtime·callbackasm1(SB)
- MOVW $1728, R12
- B runtime·callbackasm1(SB)
- MOVW $1729, R12
- B runtime·callbackasm1(SB)
- MOVW $1730, R12
- B runtime·callbackasm1(SB)
- MOVW $1731, R12
- B runtime·callbackasm1(SB)
- MOVW $1732, R12
- B runtime·callbackasm1(SB)
- MOVW $1733, R12
- B runtime·callbackasm1(SB)
- MOVW $1734, R12
- B runtime·callbackasm1(SB)
- MOVW $1735, R12
- B runtime·callbackasm1(SB)
- MOVW $1736, R12
- B runtime·callbackasm1(SB)
- MOVW $1737, R12
- B runtime·callbackasm1(SB)
- MOVW $1738, R12
- B runtime·callbackasm1(SB)
- MOVW $1739, R12
- B runtime·callbackasm1(SB)
- MOVW $1740, R12
- B runtime·callbackasm1(SB)
- MOVW $1741, R12
- B runtime·callbackasm1(SB)
- MOVW $1742, R12
- B runtime·callbackasm1(SB)
- MOVW $1743, R12
- B runtime·callbackasm1(SB)
- MOVW $1744, R12
- B runtime·callbackasm1(SB)
- MOVW $1745, R12
- B runtime·callbackasm1(SB)
- MOVW $1746, R12
- B runtime·callbackasm1(SB)
- MOVW $1747, R12
- B runtime·callbackasm1(SB)
- MOVW $1748, R12
- B runtime·callbackasm1(SB)
- MOVW $1749, R12
- B runtime·callbackasm1(SB)
- MOVW $1750, R12
- B runtime·callbackasm1(SB)
- MOVW $1751, R12
- B runtime·callbackasm1(SB)
- MOVW $1752, R12
- B runtime·callbackasm1(SB)
- MOVW $1753, R12
- B runtime·callbackasm1(SB)
- MOVW $1754, R12
- B runtime·callbackasm1(SB)
- MOVW $1755, R12
- B runtime·callbackasm1(SB)
- MOVW $1756, R12
- B runtime·callbackasm1(SB)
- MOVW $1757, R12
- B runtime·callbackasm1(SB)
- MOVW $1758, R12
- B runtime·callbackasm1(SB)
- MOVW $1759, R12
- B runtime·callbackasm1(SB)
- MOVW $1760, R12
- B runtime·callbackasm1(SB)
- MOVW $1761, R12
- B runtime·callbackasm1(SB)
- MOVW $1762, R12
- B runtime·callbackasm1(SB)
- MOVW $1763, R12
- B runtime·callbackasm1(SB)
- MOVW $1764, R12
- B runtime·callbackasm1(SB)
- MOVW $1765, R12
- B runtime·callbackasm1(SB)
- MOVW $1766, R12
- B runtime·callbackasm1(SB)
- MOVW $1767, R12
- B runtime·callbackasm1(SB)
- MOVW $1768, R12
- B runtime·callbackasm1(SB)
- MOVW $1769, R12
- B runtime·callbackasm1(SB)
- MOVW $1770, R12
- B runtime·callbackasm1(SB)
- MOVW $1771, R12
- B runtime·callbackasm1(SB)
- MOVW $1772, R12
- B runtime·callbackasm1(SB)
- MOVW $1773, R12
- B runtime·callbackasm1(SB)
- MOVW $1774, R12
- B runtime·callbackasm1(SB)
- MOVW $1775, R12
- B runtime·callbackasm1(SB)
- MOVW $1776, R12
- B runtime·callbackasm1(SB)
- MOVW $1777, R12
- B runtime·callbackasm1(SB)
- MOVW $1778, R12
- B runtime·callbackasm1(SB)
- MOVW $1779, R12
- B runtime·callbackasm1(SB)
- MOVW $1780, R12
- B runtime·callbackasm1(SB)
- MOVW $1781, R12
- B runtime·callbackasm1(SB)
- MOVW $1782, R12
- B runtime·callbackasm1(SB)
- MOVW $1783, R12
- B runtime·callbackasm1(SB)
- MOVW $1784, R12
- B runtime·callbackasm1(SB)
- MOVW $1785, R12
- B runtime·callbackasm1(SB)
- MOVW $1786, R12
- B runtime·callbackasm1(SB)
- MOVW $1787, R12
- B runtime·callbackasm1(SB)
- MOVW $1788, R12
- B runtime·callbackasm1(SB)
- MOVW $1789, R12
- B runtime·callbackasm1(SB)
- MOVW $1790, R12
- B runtime·callbackasm1(SB)
- MOVW $1791, R12
- B runtime·callbackasm1(SB)
- MOVW $1792, R12
- B runtime·callbackasm1(SB)
- MOVW $1793, R12
- B runtime·callbackasm1(SB)
- MOVW $1794, R12
- B runtime·callbackasm1(SB)
- MOVW $1795, R12
- B runtime·callbackasm1(SB)
- MOVW $1796, R12
- B runtime·callbackasm1(SB)
- MOVW $1797, R12
- B runtime·callbackasm1(SB)
- MOVW $1798, R12
- B runtime·callbackasm1(SB)
- MOVW $1799, R12
- B runtime·callbackasm1(SB)
- MOVW $1800, R12
- B runtime·callbackasm1(SB)
- MOVW $1801, R12
- B runtime·callbackasm1(SB)
- MOVW $1802, R12
- B runtime·callbackasm1(SB)
- MOVW $1803, R12
- B runtime·callbackasm1(SB)
- MOVW $1804, R12
- B runtime·callbackasm1(SB)
- MOVW $1805, R12
- B runtime·callbackasm1(SB)
- MOVW $1806, R12
- B runtime·callbackasm1(SB)
- MOVW $1807, R12
- B runtime·callbackasm1(SB)
- MOVW $1808, R12
- B runtime·callbackasm1(SB)
- MOVW $1809, R12
- B runtime·callbackasm1(SB)
- MOVW $1810, R12
- B runtime·callbackasm1(SB)
- MOVW $1811, R12
- B runtime·callbackasm1(SB)
- MOVW $1812, R12
- B runtime·callbackasm1(SB)
- MOVW $1813, R12
- B runtime·callbackasm1(SB)
- MOVW $1814, R12
- B runtime·callbackasm1(SB)
- MOVW $1815, R12
- B runtime·callbackasm1(SB)
- MOVW $1816, R12
- B runtime·callbackasm1(SB)
- MOVW $1817, R12
- B runtime·callbackasm1(SB)
- MOVW $1818, R12
- B runtime·callbackasm1(SB)
- MOVW $1819, R12
- B runtime·callbackasm1(SB)
- MOVW $1820, R12
- B runtime·callbackasm1(SB)
- MOVW $1821, R12
- B runtime·callbackasm1(SB)
- MOVW $1822, R12
- B runtime·callbackasm1(SB)
- MOVW $1823, R12
- B runtime·callbackasm1(SB)
- MOVW $1824, R12
- B runtime·callbackasm1(SB)
- MOVW $1825, R12
- B runtime·callbackasm1(SB)
- MOVW $1826, R12
- B runtime·callbackasm1(SB)
- MOVW $1827, R12
- B runtime·callbackasm1(SB)
- MOVW $1828, R12
- B runtime·callbackasm1(SB)
- MOVW $1829, R12
- B runtime·callbackasm1(SB)
- MOVW $1830, R12
- B runtime·callbackasm1(SB)
- MOVW $1831, R12
- B runtime·callbackasm1(SB)
- MOVW $1832, R12
- B runtime·callbackasm1(SB)
- MOVW $1833, R12
- B runtime·callbackasm1(SB)
- MOVW $1834, R12
- B runtime·callbackasm1(SB)
- MOVW $1835, R12
- B runtime·callbackasm1(SB)
- MOVW $1836, R12
- B runtime·callbackasm1(SB)
- MOVW $1837, R12
- B runtime·callbackasm1(SB)
- MOVW $1838, R12
- B runtime·callbackasm1(SB)
- MOVW $1839, R12
- B runtime·callbackasm1(SB)
- MOVW $1840, R12
- B runtime·callbackasm1(SB)
- MOVW $1841, R12
- B runtime·callbackasm1(SB)
- MOVW $1842, R12
- B runtime·callbackasm1(SB)
- MOVW $1843, R12
- B runtime·callbackasm1(SB)
- MOVW $1844, R12
- B runtime·callbackasm1(SB)
- MOVW $1845, R12
- B runtime·callbackasm1(SB)
- MOVW $1846, R12
- B runtime·callbackasm1(SB)
- MOVW $1847, R12
- B runtime·callbackasm1(SB)
- MOVW $1848, R12
- B runtime·callbackasm1(SB)
- MOVW $1849, R12
- B runtime·callbackasm1(SB)
- MOVW $1850, R12
- B runtime·callbackasm1(SB)
- MOVW $1851, R12
- B runtime·callbackasm1(SB)
- MOVW $1852, R12
- B runtime·callbackasm1(SB)
- MOVW $1853, R12
- B runtime·callbackasm1(SB)
- MOVW $1854, R12
- B runtime·callbackasm1(SB)
- MOVW $1855, R12
- B runtime·callbackasm1(SB)
- MOVW $1856, R12
- B runtime·callbackasm1(SB)
- MOVW $1857, R12
- B runtime·callbackasm1(SB)
- MOVW $1858, R12
- B runtime·callbackasm1(SB)
- MOVW $1859, R12
- B runtime·callbackasm1(SB)
- MOVW $1860, R12
- B runtime·callbackasm1(SB)
- MOVW $1861, R12
- B runtime·callbackasm1(SB)
- MOVW $1862, R12
- B runtime·callbackasm1(SB)
- MOVW $1863, R12
- B runtime·callbackasm1(SB)
- MOVW $1864, R12
- B runtime·callbackasm1(SB)
- MOVW $1865, R12
- B runtime·callbackasm1(SB)
- MOVW $1866, R12
- B runtime·callbackasm1(SB)
- MOVW $1867, R12
- B runtime·callbackasm1(SB)
- MOVW $1868, R12
- B runtime·callbackasm1(SB)
- MOVW $1869, R12
- B runtime·callbackasm1(SB)
- MOVW $1870, R12
- B runtime·callbackasm1(SB)
- MOVW $1871, R12
- B runtime·callbackasm1(SB)
- MOVW $1872, R12
- B runtime·callbackasm1(SB)
- MOVW $1873, R12
- B runtime·callbackasm1(SB)
- MOVW $1874, R12
- B runtime·callbackasm1(SB)
- MOVW $1875, R12
- B runtime·callbackasm1(SB)
- MOVW $1876, R12
- B runtime·callbackasm1(SB)
- MOVW $1877, R12
- B runtime·callbackasm1(SB)
- MOVW $1878, R12
- B runtime·callbackasm1(SB)
- MOVW $1879, R12
- B runtime·callbackasm1(SB)
- MOVW $1880, R12
- B runtime·callbackasm1(SB)
- MOVW $1881, R12
- B runtime·callbackasm1(SB)
- MOVW $1882, R12
- B runtime·callbackasm1(SB)
- MOVW $1883, R12
- B runtime·callbackasm1(SB)
- MOVW $1884, R12
- B runtime·callbackasm1(SB)
- MOVW $1885, R12
- B runtime·callbackasm1(SB)
- MOVW $1886, R12
- B runtime·callbackasm1(SB)
- MOVW $1887, R12
- B runtime·callbackasm1(SB)
- MOVW $1888, R12
- B runtime·callbackasm1(SB)
- MOVW $1889, R12
- B runtime·callbackasm1(SB)
- MOVW $1890, R12
- B runtime·callbackasm1(SB)
- MOVW $1891, R12
- B runtime·callbackasm1(SB)
- MOVW $1892, R12
- B runtime·callbackasm1(SB)
- MOVW $1893, R12
- B runtime·callbackasm1(SB)
- MOVW $1894, R12
- B runtime·callbackasm1(SB)
- MOVW $1895, R12
- B runtime·callbackasm1(SB)
- MOVW $1896, R12
- B runtime·callbackasm1(SB)
- MOVW $1897, R12
- B runtime·callbackasm1(SB)
- MOVW $1898, R12
- B runtime·callbackasm1(SB)
- MOVW $1899, R12
- B runtime·callbackasm1(SB)
- MOVW $1900, R12
- B runtime·callbackasm1(SB)
- MOVW $1901, R12
- B runtime·callbackasm1(SB)
- MOVW $1902, R12
- B runtime·callbackasm1(SB)
- MOVW $1903, R12
- B runtime·callbackasm1(SB)
- MOVW $1904, R12
- B runtime·callbackasm1(SB)
- MOVW $1905, R12
- B runtime·callbackasm1(SB)
- MOVW $1906, R12
- B runtime·callbackasm1(SB)
- MOVW $1907, R12
- B runtime·callbackasm1(SB)
- MOVW $1908, R12
- B runtime·callbackasm1(SB)
- MOVW $1909, R12
- B runtime·callbackasm1(SB)
- MOVW $1910, R12
- B runtime·callbackasm1(SB)
- MOVW $1911, R12
- B runtime·callbackasm1(SB)
- MOVW $1912, R12
- B runtime·callbackasm1(SB)
- MOVW $1913, R12
- B runtime·callbackasm1(SB)
- MOVW $1914, R12
- B runtime·callbackasm1(SB)
- MOVW $1915, R12
- B runtime·callbackasm1(SB)
- MOVW $1916, R12
- B runtime·callbackasm1(SB)
- MOVW $1917, R12
- B runtime·callbackasm1(SB)
- MOVW $1918, R12
- B runtime·callbackasm1(SB)
- MOVW $1919, R12
- B runtime·callbackasm1(SB)
- MOVW $1920, R12
- B runtime·callbackasm1(SB)
- MOVW $1921, R12
- B runtime·callbackasm1(SB)
- MOVW $1922, R12
- B runtime·callbackasm1(SB)
- MOVW $1923, R12
- B runtime·callbackasm1(SB)
- MOVW $1924, R12
- B runtime·callbackasm1(SB)
- MOVW $1925, R12
- B runtime·callbackasm1(SB)
- MOVW $1926, R12
- B runtime·callbackasm1(SB)
- MOVW $1927, R12
- B runtime·callbackasm1(SB)
- MOVW $1928, R12
- B runtime·callbackasm1(SB)
- MOVW $1929, R12
- B runtime·callbackasm1(SB)
- MOVW $1930, R12
- B runtime·callbackasm1(SB)
- MOVW $1931, R12
- B runtime·callbackasm1(SB)
- MOVW $1932, R12
- B runtime·callbackasm1(SB)
- MOVW $1933, R12
- B runtime·callbackasm1(SB)
- MOVW $1934, R12
- B runtime·callbackasm1(SB)
- MOVW $1935, R12
- B runtime·callbackasm1(SB)
- MOVW $1936, R12
- B runtime·callbackasm1(SB)
- MOVW $1937, R12
- B runtime·callbackasm1(SB)
- MOVW $1938, R12
- B runtime·callbackasm1(SB)
- MOVW $1939, R12
- B runtime·callbackasm1(SB)
- MOVW $1940, R12
- B runtime·callbackasm1(SB)
- MOVW $1941, R12
- B runtime·callbackasm1(SB)
- MOVW $1942, R12
- B runtime·callbackasm1(SB)
- MOVW $1943, R12
- B runtime·callbackasm1(SB)
- MOVW $1944, R12
- B runtime·callbackasm1(SB)
- MOVW $1945, R12
- B runtime·callbackasm1(SB)
- MOVW $1946, R12
- B runtime·callbackasm1(SB)
- MOVW $1947, R12
- B runtime·callbackasm1(SB)
- MOVW $1948, R12
- B runtime·callbackasm1(SB)
- MOVW $1949, R12
- B runtime·callbackasm1(SB)
- MOVW $1950, R12
- B runtime·callbackasm1(SB)
- MOVW $1951, R12
- B runtime·callbackasm1(SB)
- MOVW $1952, R12
- B runtime·callbackasm1(SB)
- MOVW $1953, R12
- B runtime·callbackasm1(SB)
- MOVW $1954, R12
- B runtime·callbackasm1(SB)
- MOVW $1955, R12
- B runtime·callbackasm1(SB)
- MOVW $1956, R12
- B runtime·callbackasm1(SB)
- MOVW $1957, R12
- B runtime·callbackasm1(SB)
- MOVW $1958, R12
- B runtime·callbackasm1(SB)
- MOVW $1959, R12
- B runtime·callbackasm1(SB)
- MOVW $1960, R12
- B runtime·callbackasm1(SB)
- MOVW $1961, R12
- B runtime·callbackasm1(SB)
- MOVW $1962, R12
- B runtime·callbackasm1(SB)
- MOVW $1963, R12
- B runtime·callbackasm1(SB)
- MOVW $1964, R12
- B runtime·callbackasm1(SB)
- MOVW $1965, R12
- B runtime·callbackasm1(SB)
- MOVW $1966, R12
- B runtime·callbackasm1(SB)
- MOVW $1967, R12
- B runtime·callbackasm1(SB)
- MOVW $1968, R12
- B runtime·callbackasm1(SB)
- MOVW $1969, R12
- B runtime·callbackasm1(SB)
- MOVW $1970, R12
- B runtime·callbackasm1(SB)
- MOVW $1971, R12
- B runtime·callbackasm1(SB)
- MOVW $1972, R12
- B runtime·callbackasm1(SB)
- MOVW $1973, R12
- B runtime·callbackasm1(SB)
- MOVW $1974, R12
- B runtime·callbackasm1(SB)
- MOVW $1975, R12
- B runtime·callbackasm1(SB)
- MOVW $1976, R12
- B runtime·callbackasm1(SB)
- MOVW $1977, R12
- B runtime·callbackasm1(SB)
- MOVW $1978, R12
- B runtime·callbackasm1(SB)
- MOVW $1979, R12
- B runtime·callbackasm1(SB)
- MOVW $1980, R12
- B runtime·callbackasm1(SB)
- MOVW $1981, R12
- B runtime·callbackasm1(SB)
- MOVW $1982, R12
- B runtime·callbackasm1(SB)
- MOVW $1983, R12
- B runtime·callbackasm1(SB)
- MOVW $1984, R12
- B runtime·callbackasm1(SB)
- MOVW $1985, R12
- B runtime·callbackasm1(SB)
- MOVW $1986, R12
- B runtime·callbackasm1(SB)
- MOVW $1987, R12
- B runtime·callbackasm1(SB)
- MOVW $1988, R12
- B runtime·callbackasm1(SB)
- MOVW $1989, R12
- B runtime·callbackasm1(SB)
- MOVW $1990, R12
- B runtime·callbackasm1(SB)
- MOVW $1991, R12
- B runtime·callbackasm1(SB)
- MOVW $1992, R12
- B runtime·callbackasm1(SB)
- MOVW $1993, R12
- B runtime·callbackasm1(SB)
- MOVW $1994, R12
- B runtime·callbackasm1(SB)
- MOVW $1995, R12
- B runtime·callbackasm1(SB)
- MOVW $1996, R12
- B runtime·callbackasm1(SB)
- MOVW $1997, R12
- B runtime·callbackasm1(SB)
- MOVW $1998, R12
- B runtime·callbackasm1(SB)
- MOVW $1999, R12
- B runtime·callbackasm1(SB)
diff --git a/src/syscall/dirent_test.go b/src/syscall/dirent_test.go
index cfa5478feb1bc8..173ccc3ed28ef3 100644
--- a/src/syscall/dirent_test.go
+++ b/src/syscall/dirent_test.go
@@ -140,7 +140,7 @@ func TestDirentRepeat(t *testing.T) {
// Check results
slices.Sort(files)
slices.Sort(files2)
- if strings.Join(files, "|") != strings.Join(files2, "|") {
+ if !slices.Equal(files, files2) {
t.Errorf("bad file list: want\n%q\ngot\n%q", files, files2)
}
}
diff --git a/src/syscall/getdirentries_test.go b/src/syscall/getdirentries_test.go
index 5d401d8dd6fa2c..b5361ddaef7024 100644
--- a/src/syscall/getdirentries_test.go
+++ b/src/syscall/getdirentries_test.go
@@ -11,7 +11,6 @@ import (
"os"
"path/filepath"
"slices"
- "strings"
"syscall"
"testing"
"unsafe"
@@ -78,7 +77,7 @@ func testGetdirentries(t *testing.T, count int) {
names = append(names, ".", "..") // Getdirentries returns these also
slices.Sort(names)
slices.Sort(names2)
- if strings.Join(names, ":") != strings.Join(names2, ":") {
+ if !slices.Equal(names, names2) {
t.Errorf("names don't match\n names: %q\nnames2: %q", names, names2)
}
}
diff --git a/src/syscall/syscall_windows.go b/src/syscall/syscall_windows.go
index 01c039cf287047..c1416b3731056b 100644
--- a/src/syscall/syscall_windows.go
+++ b/src/syscall/syscall_windows.go
@@ -14,7 +14,6 @@ import (
"internal/msan"
"internal/oserror"
"internal/race"
- "runtime"
"sync"
"unsafe"
)
@@ -525,18 +524,8 @@ func setFilePointerEx(handle Handle, distToMove int64, newFilePointer *int64, wh
if unsafe.Sizeof(uintptr(0)) == 8 {
_, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 4, uintptr(handle), uintptr(distToMove), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence), 0, 0)
} else {
- // Different 32-bit systems disgaree about whether distToMove starts 8-byte aligned.
- switch runtime.GOARCH {
- default:
- panic("unsupported 32-bit architecture")
- case "386":
- // distToMove is a LARGE_INTEGER, which is 64 bits.
- _, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 5, uintptr(handle), uintptr(distToMove), uintptr(distToMove>>32), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence), 0)
- case "arm":
- // distToMove must be 8-byte aligned per ARM calling convention
- // https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions#stage-c-assignment-of-arguments-to-registers-and-stack
- _, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 6, uintptr(handle), 0, uintptr(distToMove), uintptr(distToMove>>32), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence))
- }
+ // distToMove is a LARGE_INTEGER, which is 64 bits.
+ _, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 5, uintptr(handle), uintptr(distToMove), uintptr(distToMove>>32), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence), 0)
}
if e1 != 0 {
return errnoErr(e1)
diff --git a/src/syscall/types_windows_arm.go b/src/syscall/types_windows_arm.go
deleted file mode 100644
index e72e9f5ced2bd0..00000000000000
--- a/src/syscall/types_windows_arm.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syscall
-
-type WSAData struct {
- Version uint16
- HighVersion uint16
- Description [WSADESCRIPTION_LEN + 1]byte
- SystemStatus [WSASYS_STATUS_LEN + 1]byte
- MaxSockets uint16
- MaxUdpDg uint16
- VendorInfo *byte
-}
-
-type Servent struct {
- Name *byte
- Aliases **byte
- Port uint16
- Proto *byte
-}
diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go
index 4d733135fe5a85..c28c3ea2002d21 100644
--- a/src/text/template/funcs.go
+++ b/src/text/template/funcs.go
@@ -62,26 +62,13 @@ func builtins() FuncMap {
}
}
-var builtinFuncsOnce struct {
- sync.Once
- v map[string]reflect.Value
-}
-
-// builtinFuncsOnce lazily computes & caches the builtinFuncs map.
-// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
-func builtinFuncs() map[string]reflect.Value {
- builtinFuncsOnce.Do(func() {
- builtinFuncsOnce.v = createValueFuncs(builtins())
- })
- return builtinFuncsOnce.v
-}
-
-// createValueFuncs turns a FuncMap into a map[string]reflect.Value
-func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
- m := make(map[string]reflect.Value)
+// builtinFuncs lazily computes & caches the builtinFuncs map.
+var builtinFuncs = sync.OnceValue(func() map[string]reflect.Value {
+ funcMap := builtins()
+ m := make(map[string]reflect.Value, len(funcMap))
addValueFuncs(m, funcMap)
return m
-}
+})
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
diff --git a/src/time/tick_test.go b/src/time/tick_test.go
index 416bef59ee91ee..d89d2dbdeadf2f 100644
--- a/src/time/tick_test.go
+++ b/src/time/tick_test.go
@@ -151,7 +151,7 @@ func TestTickerResetLtZeroDuration(t *testing.T) {
}
func TestLongAdjustTimers(t *testing.T) {
- if runtime.GOOS == "android" || runtime.GOOS == "ios" {
+ if runtime.GOOS == "android" || runtime.GOOS == "ios" || runtime.GOOS == "plan9" {
t.Skipf("skipping on %s - too slow", runtime.GOOS)
}
t.Parallel()
diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go
index 9f400065bdb696..39a7986c7bc246 100644
--- a/test/codegen/arithmetic.go
+++ b/test/codegen/arithmetic.go
@@ -314,6 +314,18 @@ func MergeMuls5(a, n int) int {
return a*n - 19*n // (a-19)n
}
+// Multiplications folded negation
+
+func FoldNegMul(a int) int {
+ // loong64:"MULV","MOVV\t[$]-11",-"SUBVU\tR[0-9], R0,"
+ return (-a) * 11
+}
+
+func Fold2NegMul(a, b int) int {
+ // loong64:"MULV",-"SUBVU\tR[0-9], R0,"
+ return (-a) * (-b)
+}
+
// -------------- //
// Division //
// -------------- //
diff --git a/test/codegen/fuse.go b/test/codegen/fuse.go
index 79dd337dee2234..8d6ea3c5c74664 100644
--- a/test/codegen/fuse.go
+++ b/test/codegen/fuse.go
@@ -195,3 +195,24 @@ func ui4d(c <-chan uint8) {
for x := <-c; x < 126 || x >= 128; x = <-c {
}
}
+
+// ------------------------------------ //
+// regressions //
+// ------------------------------------ //
+
+func gte4(x uint64) bool {
+ return x >= 4
+}
+
+func lt20(x uint64) bool {
+ return x < 20
+}
+
+func issue74915(c <-chan uint64) {
+ // Check that the optimization is not blocked by function inlining.
+
+ // amd64:"CMPQ\t.+, [$]16","ADDQ\t[$]-4,"
+ // s390x:"CLGIJ\t[$]4, R[0-9]+, [$]16","ADD\t[$]-4,"
+ for x := <-c; gte4(x) && lt20(x); x = <-c {
+ }
+}
diff --git a/test/codegen/math.go b/test/codegen/math.go
index 87d9cd7b2715ba..4272e4ef887bff 100644
--- a/test/codegen/math.go
+++ b/test/codegen/math.go
@@ -160,6 +160,7 @@ func fromFloat64(f64 float64) uint64 {
// loong64:"MOVV\tF.*, R.*"
// ppc64x:"MFVSRD"
// mips64/hardfloat:"MOVV\tF.*, R.*"
+ // riscv64:"FMVXD"
return math.Float64bits(f64+1) + 1
}
@@ -168,6 +169,7 @@ func fromFloat32(f32 float32) uint32 {
// arm64:"FMOVS\tF.*, R.*"
// loong64:"MOVW\tF.*, R.*"
// mips64/hardfloat:"MOVW\tF.*, R.*"
+ // riscv64:"FMVXW"
return math.Float32bits(f32+1) + 1
}
@@ -177,6 +179,7 @@ func toFloat64(u64 uint64) float64 {
// loong64:"MOVV\tR.*, F.*"
// ppc64x:"MTVSRD"
// mips64/hardfloat:"MOVV\tR.*, F.*"
+ // riscv64:"FMVDX"
return math.Float64frombits(u64+1) + 1
}
@@ -185,6 +188,7 @@ func toFloat32(u32 uint32) float32 {
// arm64:"FMOVS\tR.*, F.*"
// loong64:"MOVW\tR.*, F.*"
// mips64/hardfloat:"MOVW\tR.*, F.*"
+ // riscv64:"FMVWX"
return math.Float32frombits(u32+1) + 1
}