diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 00000000..33318bc2
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,13 @@
+# These are supported funding model platforms
+
+github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
+patreon: KurokuLabs
+open_collective: gosublime
+ko_fi: # Replace with a single Ko-fi username
+tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
+community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
+liberapay: # Replace with a single Liberapay username
+issuehunt: # Replace with a single IssueHunt username
+otechie: # Replace with a single Otechie username
+custom:
+ - https://margo.sh/funding/
diff --git a/.github/workflows/gs-ci.yml b/.github/workflows/gs-ci.yml
new file mode 100644
index 00000000..3f913ef6
--- /dev/null
+++ b/.github/workflows/gs-ci.yml
@@ -0,0 +1,22 @@
+on: [push, pull_request]
+name: margo-ci
+jobs:
+ margo-ci:
+ strategy:
+ matrix:
+ go-version: [1.13.x, 1.14.x]
+ platform: [ubuntu-latest, macos-latest, windows-latest]
+ runs-on: ${{ matrix.platform }}
+ steps:
+ - name: Setup
+ uses: actions/setup-go@v1
+ with:
+ go-version: ${{ matrix.go-version }}
+ - name: Checkout
+ uses: actions/checkout@v2
+ with:
+ path: gs
+ - name: CI
+ env:
+ GOPATH: ${{ github.workspace }}/gs
+ run: go install -v gosublime/cmd/margo
diff --git a/.gitignore b/.gitignore
index 05139765..4616469e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,5 @@
+_before.py
+_after.py
bin/**
pkg/**
!.keep
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 2988433b..00000000
--- a/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: go
-go:
- - 1.8
- - 1.x
-install: true
-script:
- - env GOPATH=$PWD go install -v gosublime/cmd/margo
diff --git a/Ariana.sublime-color-scheme b/Ariana.sublime-color-scheme
new file mode 100644
index 00000000..a8f707d2
--- /dev/null
+++ b/Ariana.sublime-color-scheme
@@ -0,0 +1,313 @@
+{
+ "name": "Ariana",
+ "author": "DisposaBoy, Sublime HQ Pty Ltd, Dmitri Voronianski",
+ "variables":
+ {
+ "black": "hsl(0, 0%, 0%)",
+ "blue": "hsl(210, 50%, 60%)",
+ "blue2": "hsl(215, 33%, 29%)",
+ "blue3": "hsl(215, 40%, 21%)",
+ "blue4": "hsl(210, 13%, 45%)",
+ "blue5": "hsl(180, 36%, 54%)",
+ "blue6": "hsl(221, 12%, 69%)",
+ "green": "hsl(114, 31%, 68%)",
+ "grey": "hsl(0, 0%, 20%)",
+ "orange": "hsl(32, 93%, 66%)",
+ "orange2": "hsl(32, 85%, 55%)",
+ "orange3": "hsl(40, 94%, 68%)",
+ "pink": "hsl(300, 30%, 68%)",
+ "red": "hsl(357, 79%, 65%)",
+ "red2": "hsl(13, 93%, 66%)",
+ "white": "hsl(0, 0%, 100%)",
+ "white2": "hsl(0, 0%, 97%)",
+ "white3": "hsl(219, 28%, 93%)"
+ },
+ "globals":
+ {
+ "foreground": "var(white3)",
+ "background": "var(blue3)",
+ "caret": "var(orange)",
+ "line_highlight": "var(blue2)",
+ "selection": "var(blue2)",
+ "selection_border": "var(blue4)",
+ "inactive_selection": "var(blue2)",
+ "misspelling": "var(red)",
+ "shadow": "color(var(black) alpha(0.25))",
+ "active_guide": "var(blue5)",
+ "stack_guide": "color(var(blue5) alpha(0.5))",
+ "highlight": "var(blue5)",
+ "find_highlight_foreground": "var(grey)",
+ "find_highlight": "var(orange3)",
+ "brackets_options": "underline",
+ "brackets_foreground": "var(orange)",
+ "bracket_contents_options": "underline",
+ "bracket_contents_foreground": "var(blue5)",
+ "tags_options": "stippled_underline",
+ "tags_foreground": "var(pink)"
+ },
+ "rules":
+ [
+ {
+ "name": "Comment",
+ "scope": "comment, punctuation.definition.comment",
+ "foreground": "var(blue6)"
+ },
+ {
+ "name": "String",
+ "scope": "string",
+ "foreground": "var(green)"
+ },
+ {
+ "name": "Punctuation",
+ "scope": "punctuation.definition",
+ "foreground": "var(blue5)"
+ },
+ {
+ "name": "Number",
+ "scope": "constant.numeric",
+ "foreground": "var(orange)"
+ },
+ {
+ "name": "Built-in constant",
+ "scope": "constant.language",
+ "foreground": "var(red)",
+ "font_style": "italic"
+ },
+ {
+ "name": "User-defined constant",
+ "scope": "constant.character, constant.other",
+ "foreground": "var(pink)"
+ },
+ {
+ "name": "Member Variable",
+ "scope": "variable.member",
+ "foreground": "var(red)"
+ },
+ {
+ "name": "Keyword",
+ "scope": "keyword - keyword.operator, keyword.operator.word",
+ "foreground": "var(pink)"
+ },
+ {
+ "name": "Operators",
+ "scope": "keyword.operator",
+ "foreground": "var(red2)"
+ },
+ {
+ "name": "Punctuation",
+ "scope": "punctuation.separator, punctuation.terminator",
+ "foreground": "var(blue6)"
+ },
+ {
+ "name": "Punctuation",
+ "scope": "punctuation.section",
+ "foreground": "var(white)"
+ },
+ {
+ "name": "Accessor",
+ "scope": "punctuation.accessor",
+ "foreground": "var(blue6)"
+ },
+ {
+ "name": "Annotation Punctuation",
+ "scope": "punctuation.definition.annotation",
+ "foreground": "var(blue5)"
+ },
+ {
+ "name": "JavaScript Dollar",
+ "scope": "variable.other.dollar.only.js, variable.other.object.dollar.only.js, variable.type.dollar.only.js, support.class.dollar.only.js",
+ "foreground": "var(blue5)"
+ },
+ {
+ "name": "Storage",
+ "scope": "storage",
+ "foreground": "var(red)"
+ },
+ {
+ "name": "Storage type",
+ "scope": "storage.type",
+ "foreground": "var(pink)",
+ "font_style": "italic"
+ },
+ {
+ "name": "Entity name",
+ "scope": "entity.name.function",
+ "foreground": "var(blue5)"
+ },
+ {
+ "name": "Entity name",
+ "scope": "entity.name - (entity.name.section | entity.name.tag | entity.name.label)",
+ "foreground": "var(orange)"
+ },
+ {
+ "name": "Inherited class",
+ "scope": "entity.other.inherited-class",
+ "foreground": "var(blue5)",
+ "font_style": "italic underline"
+ },
+ {
+ "name": "Function argument",
+ "scope": "variable.parameter",
+ "foreground": "var(orange)"
+ },
+ {
+ "name": "Language variable",
+ "scope": "variable.language",
+ "foreground": "var(red)",
+ "font_style": "italic"
+ },
+ {
+ "name": "Tag name",
+ "scope": "entity.name.tag",
+ "foreground": "var(red)"
+ },
+ {
+ "name": "Tag attribute",
+ "scope": "entity.other.attribute-name",
+ "foreground": "var(pink)"
+ },
+ {
+ "name": "Function call",
+ "scope": "variable.function, variable.annotation",
+ "foreground": "var(blue)"
+ },
+ {
+ "name": "Library function",
+ "scope": "support.function, support.macro",
+ "foreground": "var(blue)",
+ "font_style": "italic"
+ },
+ {
+ "name": "Library constant",
+ "scope": "support.constant",
+ "foreground": "var(pink)",
+ "font_style": "italic"
+ },
+ {
+ "name": "Library class/type",
+ "scope": "support.type, support.class",
+ "foreground": "var(blue)",
+ "font_style": "italic"
+ },
+ {
+ "name": "Invalid",
+ "scope": "invalid",
+ "foreground": "var(white2)",
+ "background": "var(red)"
+ },
+ {
+ "name": "Invalid deprecated",
+ "scope": "invalid.deprecated",
+ "foreground": "var(white2)",
+ "background": "var(orange2)"
+ },
+ {
+ "name": "YAML Key",
+ "scope": "entity.name.tag.yaml",
+ "foreground": "var(blue5)"
+ },
+ {
+ "name": "YAML String",
+ "scope": "source.yaml string.unquoted",
+ "foreground": "var(white3)"
+ },
+ {
+ "name": "markup headings",
+ "scope": "markup.heading",
+ "font_style": "bold"
+ },
+ {
+ "name": "markup headings",
+ "scope": "markup.heading punctuation.definition.heading",
+ "foreground": "var(red2)"
+ },
+ {
+ "name": "markup h1",
+ "scope": "markup.heading.1 punctuation.definition.heading",
+ "foreground": "var(red)"
+ },
+ {
+ "name": "markup links",
+ "scope": "string.other.link, markup.underline.link",
+ "foreground": "var(blue)"
+ },
+ {
+ "name": "markup bold",
+ "scope": "markup.bold",
+ "font_style": "bold"
+ },
+ {
+ "name": "markup italic",
+ "scope": "markup.italic",
+ "font_style": "italic"
+ },
+ {
+ "name": "markup bold/italic",
+ "scope": "markup.italic markup.bold | markup.bold markup.italic",
+ "font_style": "bold italic"
+ },
+ {
+ "name": "markup hr",
+ "scope": "punctuation.definition.thematic-break",
+ "foreground": "var(orange)"
+ },
+ {
+ "name": "markup numbered list bullet",
+ "scope": "markup.list.numbered.bullet",
+ "foreground": "var(green)"
+ },
+ {
+ "name": "markup blockquote",
+ "scope": "markup.quote punctuation.definition.blockquote, markup.list punctuation.definition.list_item",
+ "foreground": "var(orange)"
+ },
+ {
+ "name": "markup code",
+ "scope": "markup.raw",
+ "background": "color(var(blue2) alpha(0.38))"
+ },
+ {
+ "name": "markup code",
+ "scope": "markup.raw.inline",
+ "background": "color(var(blue2) alpha(0.5))"
+ },
+ {
+ "name": "markup punctuation",
+ "scope": "(text punctuation.definition.italic | text punctuation.definition.bold)",
+ "foreground": "var(pink)"
+ },
+ {
+ "name": "diff.header",
+ "scope": "meta.diff, meta.diff.header",
+ "foreground": "var(pink)"
+ },
+ {
+ "name": "diff.deleted",
+ "scope": "markup.deleted",
+ "foreground": "var(red)"
+ },
+ {
+ "name": "diff.inserted",
+ "scope": "markup.inserted",
+ "foreground": "var(green)"
+ },
+ {
+ "name": "diff.changed",
+ "scope": "markup.changed",
+ "foreground": "var(orange)"
+ },
+ {
+ "name": "CSS Properties",
+ "scope": "support.type.property-name",
+ "foreground": "var(white3)"
+ },
+ {
+ "scope": "constant.numeric.line-number.match",
+ "foreground": "var(red)"
+ },
+ {
+ "scope": "message.error",
+ "foreground": "var(red)"
+ }
+ ]
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 13691864..2d69810b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,1078 +1,555 @@
+## Links:
+https://margo.sh/donate - Help support future development of GoSublime.
-Help shape the future of margo and GoSublime.
-See https://margo.sh/gosublime-future for more details.
-
-Thank you all for the great feedback so far!!! :D
-Base on your feedback, we've decided to join https://margo.sh/s/patreon/ and https://margo.sh/s/twitter/.
-We plan to explore more direct funding through Stripe soon. More updates will be posted to Twitter.
+https://margo.sh/b/hello-margo - A short introduction to margo.
+https://margo.sh/b/motd - Get notified when GoSublime has a new release.
-**Donate:**
-
-If you find GoSublime useful and would like to support me and future development of GoSublime,
-please donate via one of the available methods on https://github.com/DisposaBoy/GoSublime#donations
+---
+## Changes
-**margo:**
+## 20.06.14
-The new version of margo is close to being ready for real usage.
-If you'd like to test it out, press `ctrl+.`,`ctrl+x` or `cmd+.`,`cmd+x`
-to open the extension file and then save it and restart sublime text.
-
-Please make sure to read all the comments, as enabling it will affect GoSublime features.
-
-**Changes:**
-
-## 18.05.19
- * Improve ligature support in the `GoSublime: Go` syntax
+This release contains a number of features and bug fixes that have been worked on over the last few months.
-## 18.04.30-1
- * fix margo build failure in some cases when the go compiler is up/down-graded
+_You will need to restart Sublime Text for all changes to take effect_
-## 18.04.19-1
- * fix margo build failure when `use_gs_gopath` is enabled
+- Add new GoCmd{} option `Humanize` to make `go test` and `go re/play` (in test mdoe) output more readable (using https://github.com/dustin/go-humanize).
-## 18.03.26-1
- * update gocode
- * fix gocode completion showing duplicate function parameter names
- * add support for restricting the files in which events are triggered
- * (hopefully) fix an issue where some users were experiencing laggy input
-
-## 18.03.23-1
- * GOPATH is automatically set to the internal GOPATH when editing margo source files
- this allows for things like linters and gocode to work when editing `margo.go`
-
- * margo is now only automatically restarted if `go test` succeeds
- this further supports linters by not restarting when `margo.go` compilation fails
-
- * add a default linter pattern for gometalinter
- the following reducer/linter should now work without any additional setup
-
- ```
- &golang.Linter{Name: "gometalinter", Args: []string{"--fast"}},
-
- ```
-
- * add support for sending settings to margo
- in the GoSublime settings, Sublime Text preferences or project settings
- the entry `"margo": {...}` will be send to margo where reducers can make use of it
- by accessing `mx.Editor.Settings` e.g.
-
- gosublime settings:
- ```
- "margo": {
- "message": "hello world from the GoSublime settings",
- }
- ```
-
- project settings:
- ```
- "margo": {
- "message": "hello world from the project settings",
- }
- ```
+ - large numbers are split up using commas
+ - 123456 ns/op is converted to µs/op, etc.
+ - 123456 B/op is converted to KiB/op, etc.
- margo.go
- ```
- mg.Reduce(func(mx *mg.Ctx) *mg.State {
- var settings struct {
- // due to limitations in the codec pkg we need to add struct tags
- // unless we use the exact (capitalized name) in the editor settings
- Status string `codec:"status"`
- }
- err := mx.Editor.Settings(&settings)
- switch err {
- case mg.ErrNoSettings:
- // when the `Started` action is dispatched, no client data is present
- // and we therefore have no settings
- return mx.State
- case nil:
- return mx.AddStatus(settings.Status)
- default:
- return mx.AddStatusf("cannot decode settings: %v", err)
- }
- }),
- ```
-## 18.03.21-1
- * fix exception due to missing import?
+ To enabled it, use:
-## 18.03.20-1
- * fix a case where (old) margo compilation fails because the Go version cannot
- be determined, but the sanity check shows the correct version
- * move some initializations off the ui thread to avoid ui freezes on startup
- * fix the `golang.GoInstallDiscardBinaries` linter failing due to invalid dirname
+ &golang.GoCmd{
+ Humanize: true,
+ }
-## 18.03.19-1
- * disable the `autoinst` setting by default
- * fix a Python buffering issue that sometimes caused the ui to freeze
- * speedup sh-bootstrap/Sublime Text startup
+ e.g. output:
-## 18.03.16-1
- * support for context-aware snippets have now been added in the new version of margo.
- just add `golang.Snippets,` to your reducers to enable it.
+ goos: linux
+ goarch: amd64
+ pkg: margo.sh/vfs
+ BenchmarkPoke/Miss-8 388,868 2.952 µs/op
+ BenchmarkPoke/Hit-8 1,739,704 684 ns/op
+ PASS
- see https://github.com/disposablue/margo/blob/master/extension-example/extension-example.go
-
-## 18.03.05-1
- * if no status is set, the status markers (dots) are no longer shown
- * sh-bootstrap.go is now built with `go build` to improve startup time in go1.10
- * path handling on Windows was improved
- * linter support should now be almost complete.
- * add support for displaying issues via `ctrl+.`,`ctrl+e` or `cmd+.`,`cmd+e`
- * errors for all files in the package are tracked
- * the error count in the status bar is always visible if there are errors in other files
-
- see the example extension https://github.com/disposablue/margo/blob/master/extension-example/extension-example.go for examples of builtin linters.
-
-
-## 18.02.17-2
- * stop linking sqlite3 to avoid cgo-related compilation errors
-
-## 18.02.17-1
- * fix a compilation error introduced in r18.02.16-1
-
-## 18.02.16-1
- * The new version of margo is close to being ready for real usage.
- If you'd like to test it out, press `ctrl+.`,`ctrl+x` or `cmd+.`,`cmd+x`
- to open the extension file and then save it or restart sublime text
- * Highlights:
- * less dependence on Python, so development should be a lot easier going forward
- * it comes with integrated support for GoImports
- * gocode integration now supports more options like autobuild, showing function params and autocompleting packages that have not been imported
-
-## 18.01.17-1
- * update gocode
- * sync the settings when the active view changes to avoid them going out-of-sync when switching projects
- * add support for exporting env vars into ST.
- see docs for the setting `export_env_vars` (`ctrl+., ctrl+4`, `super+., super+4` on mac)
- * sync all project settings, not just `env`
-
-## 17.12.17-1
- * fix failure to list some packges in the imports palette
- * update gocode
-
-## 17.12.08-1
- * fix broken commenting when the Go package is disabled
-
-## 17.11.27-1
- * use the old GS syntax definitions instead of the new ones from ST to avoid regressions
-
-## 17.11.25-1
- * use the latest Sublime Text Go syntax
- * convert all our existing syntax definitions to .sublime-synta
- * keep track of the sh.bootstrap output and include it in the Sanity Check
-
-## 17.11.14-1
- * Fix failure to list individual Test|Benchmark|Example functions in the test palette
-
-## 17.11.13-1
- * Change the prefix for identifiers in `*_test.go` files to tilde(~)
- to prevent cluttering the declartion palette when searching for (un-)exported identifiers
-
- * Move sh.py/shell bootstrapping into Go and always run it through the shell.
-
- This should fix 2 bugs:
-
- 1. If you had multiple versions of Go installed, one chosen seemingly at random:
-
- * Go is installed via your package manager and is setup correctly
- * You then installed a custom version of Go and set GOROOT (in your shell)
- * It has no effect even though your custom binary appear first in $PATH
-
- This might also fix other cases where the sanity check shows correct settings but compilation fails.
-
- 2. multi-path GOPATH, PATH, etc. in Fish and similar shells are seemingly ignored
-
- In Fish and similar shells where PATH (and other colon separated vars) are lists;
- when these vars are echoed, they are output as `"dir1" "dir2"` instead of `"dir1":"dir2"`
- so when GS sees it, it thinks it's 1 non-existend dir `"dir1 dir2"`.
-
-## 17.10.15
- * update gocode
- * fix failure to display `time` (and other packages) in the imports list
+ Known bugs:
-## 17.08.23
+ - The output fields are not aligned
- * update gocode
+- Add new reducer golang.GoGenerate
-## 17.03.05
- * assign default GOPATH as is done in go1.8
- * don't follow symlinks when scanning for package lists
+ It adds a UserCmd (cord `ctrl/cmd+.`,`ctrl/cmd+.c`) named `Go Generate` that calls `go generate` in the closest go package (current dir or parent dirs).
-## 17.02.16
- * update gocode
+ It can be enabled with:
-## 16.07.09-1
- * update gocode
+ &golang.GoGenerate{
+ Args: []string{"-v", "-x"},
+ },
-## 16.06.02-1
- * update gocode
- * if you're using Go 1.7 beta and experience issues with completion, you're advised to downgrade back to 1.6
+- Auto-completion now works when the line ends with a dot (.)
-## 16.05.07-1
- * Add initial support for MarGo extensions.
- Press ctrl+.,ctrl+./super+.,super+. and type "Edit MarGo Extension" to open the the extension file.
- If you your $GOPATH/src contains directories with lots of files or you'd otherwise like
- to skip when looking up import paths, you can do so by configuring the `ImportPaths` option:
-
- package gosublime
+- Add new reducer golang.AsmFmt
- import (
- "disposa.blue/margo"
- "disposa.blue/margo/meth/importpaths"
- "path/filepath"
- "strings"
- )
+ It does code fmt'ing for `.s` files using https://github.com/klauspost/asmfmt
- func init() {
- margo.Configure(func(o *margo.Opts) {
- o.ImportPaths = importpaths.MakeImportPathsFunc(func(path string) bool {
- // note: the default filter includes node_modules
+ It formats `.s` files when they are saved, or the fmt cord `ctrl+.`,`ctrl.f` is pressed.
- // use the default filter
- return importpaths.PathFilter(path) &&
- // don't descened into huge node_modules directory
- !strings.Contains(path, filepath.Base("node_modules"))
- })
- })
- }
+- Add new reducer &web.Prettier{}
+ It does code fmt'ing using https://github.com/prettier/prettier
+ By default It fmt's CSS, HTML, JS, JSON, JSX, SVG, TS, TSX and XML files.
-## 16.05.03-2
- * fallback to the internal MarGo fmt if `fmt_cmd` fails
+ To specify the list of langs to fmt set the `Langs` field:
-## 16.05.03-1
- * fix incomplete gocode update
+ &web.Prettier{
+ // Langs: []mg.Lang{mg.JS}, // only fmt .js files
+ Langs: web.PrettierDefaultLangs,
+ },
-## 16.05.01-1
- * update gocode
- * add struct fields to the declarations palettes
+ You might also need to `import "margo.sh/web"`.
-## 16.04.29-1
- * the imports list (ctrl+.,ctrl+p/super+.,super+p) is now sourced from source code packages only
- and recognises vendored packages
+ You will need to install prettier separately.
-## 16.04.08-2
- * If you use the `fmt_cmd` setting with `goimports` or any other slow command
- you should read and understand the `ipc_timeout` setting documented in `GoSublime.sublime-settings`
+- Add new Lang constants: mg.HTML, mg.SVG and mg.XML
-## 16.04.08-1
- * added a new SUPPORT.md file calrify what level of support can be expected from use of GoSublime
- * you are advised to reach and understand its contents
+- Add mgutil.SplitWriter a writer that writes to an underlying writer in split chunks e.g. lines somewhat similar to bufio.scanner
-## 16.03.22-1
- * add new pseudo env var _dir (`dirname($_fn)`) and do env var substitution on fmt_cmd
- * use `"fmt_cmd": ["goimports", "-srcdir", "$_dir"]` for newer version of goimports
+- `go.play` and `go.replay` (cord `ctrl/cmd+.`,`ctrl/cmd+r`) now works in in unsaved `_test.go` files.
-## 16.01.09-1
- * Output GOROOT and GOPATH to the ST console when they change
+- `go.replay` now runs the Benchmark\* func surrounding the cursor.
-## 15.12.31-1
- * Update gocode (struct field completion, go15vendor completion, etc.)
+ Compared to `ctrl/cmd+shift+left-click`, it also runs tests.
-## 14.02.25-1
- * added setting `installsuffix`. this should help enabling completion and pkg importing
- for appengine. set this to `appengine` and add the appengine goroot to your GOPATH e.g.
- {
- "installsuffix": "appengine",
- "env": {
- "GOPATH": "$YOUR_OWN_GOPATH:$PATH_TO_APPENGINE/goroot"
- }
- }
+ Known bugs:
- * added setting `ipc_timeout`. if you're experiencing issues with code completion
- and the error is `Blocking Call(gocode_complete): Timeout`, set this setting to `2`, or `3`, etc..
- this value is the number of seconds to wait for ipc response from margo before timing out.
- **note: blocking ipc calls like code completion will freeze the ui if they take too long**
+ - It currently ignores the TestArgs and BenchmarkArgs options of the golang.TestCmds reducer.
-## 13.12.26-1
- * when the key binding `ctrl+dot`,`ctrl+r` is pressed, 9o no longer gains focus
+- mg.CmdCtx supports a new option `Verbose`,
-## 13.12.21-2
- * setting `autocomplete_live_hint` was renamed to `calltips` and enabled by default.
- this setting make functions signatures appear in the status bar when you place the
- cursor in a function call
- * the completion_options command was dropped from margo and therefore mg9.completion_options was removed
- * the shell_pathsep setting was removed
+ When `cx.Verbose = true`the commands that are run are printed to the output prefixed with `#`.
-## 13.12.21-1
- * make GoSublime's quick panels use a monospace font
- * add a prefix to the declarations panels:
- `+` indicates exported identifiers
- `-` indicates non-exported identifiers
- * in the declarations panels, init functions are now suffixed with ` (filename)`
- * in the declarations panels, const declarations are now suffixed with ` (value)` e.g. `const StatusTeapot (418)`
- * add syntax definitions for the template builtin functions
+ e.g. output:
-## 13.12.19-1
- * the OS X key bindings have been removed
+ [ `replay` | done ]
+ # go test -test.run=. -test.bench=^BenchmarkPoke$
+ goos: linux
+ [...]
- * a copy has been provided below. you may change the "keys" as you wish and place it inside
- your user key bindings (menu Preferences > Key bindings - User) to restore the functionality
+ It's enabled for `go.play` and `go.replay` (cord `ctrl/cmd+.`,`ctrl/cmd+r`).
- {
- "keys": ["shift+space"],
- "command": "auto_complete",
- "args": {"disable_auto_insert": true, "api_completions_only": true, "next_completion_if_showing": false},
- "context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
- }
+- Issues without a valid tag are now defaulted to `mg.Error` instead of being ignored.
+ This fixes some cases where the error palette shows errors, but the status and HUD doesn't.
-## 13.12.17-1
- * give string decoding priority to utf-8 over the system's preferred encoding
+- Fix some cases where issues are reported in the wrong file or incorrectly anchored to the current file.
-## 13.12.15-1
- * remove the ctrl+s, etc. key bindings and fmt the file during the save event.
+- goutil.IsPkgDir() and other functions now use the VFS, so should touch the disk less.
-## 13.12.14-2
- * the autocompletion key bindings on OS X have been changed to shift+space
+## 20.03.09
-## 13.12.14-1
- * added new setting `fmt_cmd` to allow replacing margo's fmt with an external gofmt compatible command like like https://github.com/bradfitz/goimports. see the default config for documentation
- * as a last resort, GoSublime will now try to ignore (by replacement) any bytes that cannot be decoded as utf-8 in places that handle strings (like printing to the console)
- * fix the missing `Run {Test,Example,Benchmark}s` entries in the .t palette
+This release fixes a couple bugs:
-## 13.10.05-1
- * sync gocode
+- GO111MODULE=off is set after building, in cases where GO111MODULE wasn't already set by the user.
+- An update message is shown even when the local GoSublime version is greater than that reported by the server.
-## 13.09.07-1
- * remove error syntax highlighting of lone percentage signs in strings
+## 20.03.01
-## 13.07.29-1
- * the .p method of finding packages was reverted. as a result `use_named_imports` has no effect
+This release fixes a margo build failure when upgrading to go1.14.
-## 13.07.28-1
- * the behaviour of `$GS_GOPATH` has change, please see `Usage & Tips` `ctrl+dot,ctrl+2`
- section `Per-project settings & Project-based GOPATH` for details
+## 20.02.01
- * MarGo will now attempt to automatically install packages when you import a package that doesn't exist
- or when completion fails. see the default settings file, `ctrl+dot,ctrl+4` for more details
- about the `autoinst` setting
+This release focuses on fixing a performance issue due to a failure to resetting all cached data prematurely.
- * a new setting was added to allow using `GS_GOPATH` exclusively. see the default settings file,
- `ctrl+dot,ctrl+4` for more details on the `use_gs_gopath` setting
+- Cache some files in memory to avoid re-reading from disk every time.
- * a new setting to allow importing packages with their package name was added.
- see the default settings file, `ctrl+dot,ctrl+4` for more details on the `use_named_imports` setting
+- The `&nodejs.PackageScripts{}` reducer now uses `yarn` instead of `npm` if the `yarn.lock` file is present.
+## 20.01.01
-## 13.07.23-1
- * update gocode
+This release mainly focuses on under-the-hood improvements for module support.
-## 13.07.22-1
- * update gocode
+- The default auto-completion import mode has been changed to `Kim-Porter`, our solution for auto-completion and package/module going forward.
-## 13.07.17-1
- * the behaviour of 9o output scrolling has changed. instead of attempting to show the end
- of the output, the start of the output will be shown instead.
- if you preferred the old behaviour, use the new setting `"9o_show_end": true`
+ One side-effect of this change is that unimported-packages support is less reliable but we feel this is a small drawback when compared to the much improved auto-completion support.
-## 13.07.14-1
- * fix comment toggling when the `Go` package is disabled
+ We plan to remove support for switching import modes in the future, but if you would like to revert to the previous default (bearing in mind auto-completion might stop working), configure the `MarGocodeCtl` reducer as follows:
-## 13.07.12-1
- * update gocode
+ ```go
+ &golang.MarGocodeCtl{
+ ImporterMode: golang.SrcImporterWithFallback,
+ }
+ ```
-## 13.07.06-2
- * the symbols [ ] ( ) { } , . are now treated as puctuation (they might be syntax highlighted)
+- The Go/TypeCheck linter is now more complete and should be able to type-check (without failure) all packages for which auto-completion is available.
+ This linter offers typechecking (like the gotype tool) but can work on unsaved files and while you type and is faster a full `go install` lint.
-## 13.07.06-1
- * the various operator groups, in addition to semi-colons are now treated as `operators` so they should now be syntax highlighted
+ To enable add the following reducer to your `margo.go` file:
-## 13.07.03-1
- * log MarGo build failure
+ ```go
+ &golang.TypeCheck{},
+ ```
-## 13.07.01-1
- * add user aliases to the 9o completion
- * fix broken arrows keys in 9o completion
- * 9o completion no longer contains the history command prefix (^1 ^2 etc.) (the commands are still shown)
+- Some HTTP handler snippets have been added and are offered in files that `import "net/http"`.
-## 13.06.30-4
- * try not to init() GoSublime more than once
+## 19.10.22
-## 13.06.30-3
- * the `up` and `down` arrows keys now traverses the 9o history when the cursor is in the prompt
+- API BREAKAGE:
+ ParseWithMode and ParseWithMode now takes a `*mg.Ctx` instead of a `mg.KVStore`.
-## 13.06.30-2
- * added support for aliases via the setting `9o_aliases`, see the default settings files for documentation
+- Add experimental support for auto-completion and type-checking in go modules.
-## 13.06.30-1
- This update brings with it a new `GoSublime: Go` syntax definition.
- If you get an error complaining about GoSublime .tmLanguage file,
- you should be able to fix it by closing all `.go` files and restarting Sublime Text.
- If you're using the `GoSublime-next.tmLanguage` please delete the file `Packages/User/GoSublime-next.sublime-settings` (if it exists).
- On update(and restart), all views using syntax files with the base-name `GoSublime.tmLanguage`
- or `GoSublime-next.tmLanguage` will be automatically changed to `GoSublime: Go`.
- Hopefully this change will go smoothly.
+- Add experimental reducer `&golang.TypeCheck{}`.
+ It's a linter that does a full type-check as you type (even in unsaved files).
+ It can be thought of as a replacement for the `gotype` binary of old.
- For all other bugs relating to the new syntax definition (e.g. completion stops working)
- please add a comment to https://github.com/DisposaBoy/GoSublime/issues/245
+ NOTE: This is purely an experiment used primarily for testing the package importer
+ and type-checking and will probably break randomly, if it works at all.
- For all other feature requests or bugs, please open a new issue.
+ With that said, the plan is to clean it up and develop it further in the future.
- additionally:
+- The Ariana color scheme has been tweaked to improve readability.
- * there is a new pre-defined variable _nm that is the base name of the current view
- * all pre-defind env vars (_fn, _wd, etc.) are now defined globally and will appear within the
- environment of all 9o command even when run through your shell
+- Add a `‣` prefix to status items and reduce the space between them.
+- Add langs `mg.GoMod` and `mg.GoSum` for `go.mod` and `go.sum` files, respectively.
+ For convenience, `goutil.Langs` now holds the list of all Go-related langs
+ and Go linters are now available in `go.mod` and `go.sum`.
+- The tasks count styled has been changed to `Tasks ➊➋➌`.
+ The status animates between `Tasks ➊➋➌` and `Tasks ➀➁➂` while there are tasks less than 16s old.
-## 13.06.29-2
- * show the `go build` output when (re-)installing MarGo
- * show the `go version` output on startup
- * fix the main menu and command palette pointing to the wrong error log file
+- The issue count styled has been changed to `Error ➊ꞏ🄋`.
-## 13.06.29-1
- * added 9o `echo` command
- * added two new env vars:
- `$_wd (or $PWD)` contains the 9o working directory
- `$_fn` contains the abs path to the current active view/file (if available)
- * env vars on the 9o command line are expanded before the command is run. see 9o `help`
+ NOTE: The meanings of the numbers have been reverted.
-## 13.06.22-1
- * NOTE: if you have your own GoSublime snippets, the meaning of `global` has changed.
- It will no longer be `true` unless a package is fully declared and the cursor
- is below the line on which the package was declared
+ Previously, given `1/2 Errors`, there was 1 issue with tag `Error` in this view, and there was a total 2 errors in all views.
+ The new meanings `Error ➊ꞏ🄋` is: ➊ is the number issues in the current view and 🄋 is the number issues in other views.
+ Only first number is highlighted if there are issues in the current view.
+ Likewise, when there are issues, but none in the current view, only the second number is highlighted.
-## 13.06.16-2
- * added support for automatically setting the `GoSublime: HTML` syntax to certain extensions. See the default settings file (ctrl+dot,ctrl+4) for documentation on the `gohtml_extensions` setting
+- Don't show the `func` prefix in the calltip status. The parens already make it obviously a function.
-## 13.06.16-1
- * all undefined 9o commands are now run through your shell. As always, commands can manually be run through the with the `sh` command e.g. `sh echo 123` command
+## 19.06.16
-## 13.06.15-1
- * based on the feedback I recieved I integrated with the shell a little...
- * I added support for shells: bash, cygwin/msys/git bash, fish, zsh, rc, etc.
- * see https://github.com/DisposaBoy/GoSublime/blob/master/articles/shell.md for more details
+- Fix a deadlock/freeze (seen on Mac OS) when starting up with multiple windows open.
+- Fix an issue where the active window loses focus when starting up.
-## 13.06.05-1
- * added the shell env var and shell setting to the sanity check output
+## 18.11.28
+This release introduces the HUD and comes with many improvements to snippets and a tweaked version of the Mariana color schemed named Ariana.
-## 13.06.03-1
- * I added a small article about running [golint](https://github.com/golang/lint) and other user-commands for linting
- https://github.com/DisposaBoy/GoSublime/blob/master/articles/golint.md
+- The HUD is an output panel that's automatically populated with info traditionally found in the status bar and various pop-ups/tool-tips.
-## 13.06.02-1
- * changed GoSublime home dir to Packages/User/GoSublime/[PLATFORM]
- * changed margo exe name to gosublime.margo_[VERSION]_[GO_VERSION].exe
- * sorry for any breakages
+ Currently, the following info will be displayed there:
-## 13.06.01-1
- * fix missing method snippet when using GoSublime-next.tmLanguage
+ - The `Issues` status, including the error messages for the current line.
-## 13.05.27-3
- * make sure the output panel is always accessed from the main thread
+ - The `GocodeCalltips` status, including positional highlighting of params and return statement.
-## 13.05.27-2
- * document the `fn_exclude_prefixes` setting
+ It's bound to the keys `ctrl+.`,`ctrl+0`.
-## 13.05.27-1
- * added basic syntax highlighting for go templates (embedded within `{{` and `}}`)
- * inside .go files, `raw` strings(only) now has highlighting for go templates
- *note* this is only available on the GoSublime-next syntax which will be set to the default
- for .go files soon, see https://github.com/DisposaBoy/GoSublime/issues/245
- * for html files, the extension .gohtml will yield html highlighting, snippets, etc.
- as normal .html files do, with addition of go template highighting within `{{` and `}}`
- see https://github.com/DisposaBoy/GoSublime/issues/252
+ You can manually bind it to another key. e.g. via `Preferences > Key Bindings`:
-## 13.05.26-2
- * 9o: `tskill` without args, now opens the `pending tasks` palette
+ ```json
+ {
+ "keys": ["ctrl+0"],
+ "command": "margo_show_hud",
+ },
+ ```
-## 13.05.26-1
- * fix `mg -env` in st3 on windows
+- Several new snippets have been added and old ones improved.
-## 13.05.12-7
- * add basic support for injecting commands into 9o.
- contact me if you intend to make use of this feature
+ - AppendSnippet: `name = |` suggests:
-## 13.05.12-6
- * 9o `hist` now honours `9o_instance`
+ - `name = append(name, ...)`
+ - `slice = append(slice[:len(slice):len(slice)], ...)`
-## 13.05.12-5
- * more 9o `cd`, always chdir
+ - DeferSnippet:
-## 13.05.12-4
- * fix not being able to properly cd to relative parent directories
+ - `defer func() {}()`
+ - `defer f()`
-## 13.05.12-3
- * add a basic `cd` command to 9o. see 9o `help` for documentation
+ - ReturnSnippet:
-## 13.05.12-2
- * mg/sh now handless binary output
- * mg/sh now accepts a string `Cmd.Input` that allows passing input the command
+ - `return`
-## 13.05.12-1
- * improved GoSublime-next syntax highlighting.
- see https://github.com/DisposaBoy/GoSublime/issues/245
+ - MutexSnippet: `mu.|` suggests:
-## 13.05.06-4
- * display 9o wd in a simplified manner
- * impl 9o_instance setting: note: this does not work yet
+ - `mu.Lock(); defer mu.Unlock(); |`
+ - `mu.Lock(); |; mu.Unlock()`
-## 13.05.06-3
- * add support for setting the 9o color scheme with `9o_color_scheme`
- * fix completion being shown in gs-next tmlang when the cursor it at the end of the line
+ - PackageNameSnippet:
-## 13.05.06-2
- * disable completion in gs-next: strings, runes, comments
+ - `package main; func main() { | }`
-## 13.05.06-1
- * A new syntax definition has been created to fix all the short-comings of the existing
- syntax highlighting. if you're interested in testing it, please take a look at
- https://github.com/DisposaBoy/GoSublime/issues/245
+ - DocSnippet: suggest function names, field names, etc. inside the corresponding _documentation_ comment
-## 13.05.04-4
- * add new `9o_instance` and `9o_color_scheme`. note: these are not yet implemented
- see https://github.com/DisposaBoy/GoSublime/issues/243
+- Fix the golang.Gocode{} reducer changing View.Pos
-## 13.05.04-3
- * add new `lint_enbaled` and `linters` settings. note: the new linter has not yet been implemented
- see https://github.com/DisposaBoy/GoSublime/issues/220
+- The `(Add)UnimportedPackages` feature no longer adds circular imports
-## 13.05.04-2
- * removed setting: `margo_addr`, if you have a *MarGo* binary in your $PATH, delete it.
+- Show the tasks animation after 500ms instead of 1s to (hopefully) make things feel more responsive
-## 13.05.04-1
- * don't sort imports when adding/removing
+- GOROOT and GOPATH are scanned in parallel at startup
-## 13.05.01-2
- * fix mg9 request leak
+- All packages in GOROOT and GOPATH are available for the unimported packages feature, not just those in GOROOT and the package cache.
-## 13.05.01-1
- * give PATH preference to Go related bin directories
- this has the side-effect that if you set e.g. GOROOT in your project settings(e.g. GAE),
- then $GOROOT/bin/go should be found first, even if you have a normal Go binary at /usr/bin/gos
+- Add better named alias `pkg-list` for `unimported-packages` and show the directory where the package is
-## 13.04.27-1
- * fix failure to load GoSublime.tmLanguage in st3
+- API BREAKAGE:
+ Most users should not be affected, but there were some API changes/breakages.
-## 13.04.24-1
- * fix gs.which treating directories `$PATH` named `go` as the `go` executable
+ - removed CursorScope.Any and CursorScope.All
-## 13.04.21-1
- ** WARNING **
- **
- ** the linter system is being redone
- ** this means comp_lint and all lint-related settings will be removed or renamed
- ** see https://github.com/DisposaBoy/GoSublime/issues/220
- **
+ - make CursorCtx.Pos the int offset instead of token.Pos
- * only show calltip if the call is on the same line as the cursor:
- this avoids displaying a calltip for fmt.Println() in the following snippet
+ - `CursorNode` and `ParseCursorNode` were removed
- fmt.|
- fmt.Println("done")
+ - `CursorCtx.Ctx` is no longer embedded
-## 13.04.14-2
- * fix failing to find a calltip for b() in p.a(p.b())
+## 18.11.06
-## 13.04.14-1
- * calltips are now implemented in margo
+- Fix `UnimportedPackages` support for stdlib packages in go1.10 and earlier versions.
-## 13.04.13-1
- * pre-compile margo on update (before restart)
- * detect the go binary's path instead of relying on python
- * try to work-around odd scrolling in 9o
+## 18.11.04
-## 13.04.01-1
- * when replaying unsaved views you are now able to navigate to src lines inside 9o
+- API BREAKAGE: Rename `mg.Reducer.Reducer*` to `mg.Reducer.R*`.
-## 13.03.31-2
- * add GOBIIN to PATH
- * set default package snippet to `package main` if the filename is main.go
+ Most users should be unaffected.
+ If you've _called_ any of these methods _directly_,
+ you will need to rename the following method calls:
-## 13.03.31-1
- * use relative paths when setting syntax files: should fix any errors about not being able to load e.g. 9o.hidden-tmLanguage
+ - ReducerLabel -> RLabel
+ - ReducerInit -> RInit
+ - ReducerConfig -> RConfig
+ - ReducerCond -> RCond
+ - ReducerMount -> RMount
+ - Reducerduce -> Rduce
+ - ReducerUnmount -> RUnmount
-## 13.03.30-3
- * update gocode to https://github.com/nsf/gocode/commit/86e62597306bc1a07d6e64e7d22cd0bb0de78fc3
+- API BREAKAGE: mg.RunCmdData has been un-exported
-## 13.03.30-2
- * restore py3k compat: execfile was removed
+- The following fields in the `&golang.GoCode{}` and `&golang.GocodeCalltips{}` reducers are now ignored.
-## 13.03.30-1
- * work-around show_call_tip hang when the file starts with a comment
+ - Source: this is now the default
+ - ProposeBuiltins: this is now the default
+ - ProposeTests: use `&golang.MarGocodeCtl{}`
+ - Autobuild: we now use the source code so there are no plans to implement this
+ - UnimportedPackages: this is now the default
-## 13.03.29-4
- * impl a basic oom killer in MarGo. If MarGo's memory usage reaches 1000m, she? will die
- you can configure this limit in the user settings ctrl+dot,ctrl+5
- e.g. to limit the memory use to 500m use:
+ See `&golang.MarGocodeCtl{}` (below).
- "margo_oom": 500
+- Add support for 'unimported' packages.
-## 13.03.29-3
- * add support for showing function call tip live in the status bar
- to enable to add the setting:
- "autocomplete_live_hint": true
- to your user settings in ctrl+dot,ctrl+5
+ - auto-completing `json.` will now try to import `encoding/json`
+ - known bugs: when adding the import, the view will scroll
+ - known limitation: we don't scan GOPATH and we don't support the vendor directory
- note: the old keybinding ctrl+dot,ctrl+space works as normal
+ Use `NoUnimportedPackages` (below) to disable this.
-## 13.03.29-2
- * properly detect when the source(about.py) changes
- * notify the user of an update if the on-disk vesion differs from the live version
+- Add support for preloading imported packages when a view is activated.
-## 13.03.29-1
- * add bindings for default setting(ctrl+dot,ctrl+4) and user settings(ctrl+dot,ctrl+5)
+ - This aims to keep the cache warm to speed up auto-completion.
-## 13.03.28-2
- * more python hacks
+ Use `NoPreloading` (below) to disable this.
-## 13.03.28-1
- * make the sanity check output more verbose
- * add key bindings for:
- (replace ctrl with super on os x)
- README.md: ctrl+dot,ctrl+1
- USAGE.md: ctrl+dot,ctrl+2
- run sanity check: ctrl+dot,ctrl+3
+* Add support for adding `unimported` packages to the file.
-## r13.03.25-1
- * abort blocking calls(completion, fmt) early if the install stage isn't set to "done"
+ - Use `AddUnimportedPackages` (below) to enabled this
-## r13.03.24-3
- * wait for mg9.install to finish before attempting to send any request to margo.
- fixes a false-positive error about the mg binary being missing before installtion completes
+* All the above can be configured using the `&golang.MarGocodeCtl{}` reducer
-## a13.03.24-2
- * fix call to getcwdu(not in py3k)
+ ```Go
+ &golang.MarGocodeCtl{
+ // whether or not to include Test*, Benchmark* and Example* functions in the auto-completion list
+ // gs: this replaces the `autocomplete_tests` setting
+ ProposeTests: false,
-## r13.03.24-1
- * communicate a tag/version between gs and mg so the case where they're out-of-sync can be detected
+ // Don't try to automatically import packages when auto-compeltion fails
+ // e.g. when `json.` is typed, if auto-complete fails
+ // "encoding/json" is imported and auto-complete attempted on that package instead
+ // See AddUnimportedPackages
+ NoUnimportedPackages: false,
+ // If a package was imported internally for use in auto-completion,
+ // insert it in the source code
+ // See NoUnimportedPackages
+ // e.g. after `json.` is typed, `import "encoding/json"` added to the code
+ AddUnimportedPackages: false,
-## r13.03.23-2
- * use getcwdu instead of getcwd in hopes of avoiding python issues
+ // Don't preload packages to speed up auto-completion, etc.
+ NoPreloading: false,
-## r13.03.23-1
- * foreign platform binaries will no longer be cleaned up
- e.g. where the current platform is linux-x64, a linux-x32 binary (gosublime.margo.r13.03.23-1.linux-x32.exe)
- will not be cleaned up until you load st2 on linux-x32
+ // Don't suggest builtin types and functions
+ // gs: this replaces the `autocomplete_builtins` setting
+ NoBuiltins: false,
+ },
+ ```
-## r13.03.20-1
- * MarGo EXE name has changed to gosublime.margo.[VERSION].[platform]-[arch].exe
- e.g. gosublime.margo.r13.03.20-1.linux-x64.exe
+* Add new lang conststants for `mg.JSX`, `mg.TS`, `mg.TSX` and rename `R` to `Rlang`
-## r13.03.16-2
- * use the first action on a line if an action is triggered in the wrong place
- e.g. if there is a filename and an error message, clicking on the error message will
- 9o will now try find the filename
+* Don't treat an empty non-nil slice as matching in `LangIs()` and `ActionIs()`
-## r13.03.16-1
- * add imports to the top of the block. this causes them to be a part of the first group of imports
- in cases where imports are group by separating them with a space
+* Fix an infinite loop when auto-completing inside packages with cyclic dependencies
-## r13.03.03-1
- * reduce false-positives in gs.flag linter
- * fix margo help/flags (remove gocode flags from the output)
+## 18.10.06
-## r13.03.02-1
- * cap the number of concurrent requests margo will process
+- restore support for running individual test functions by pressing `ctrl+.`,`ctrl+g` or `ctrl+shift + left/right-click` on the function declaration's name
-## r13.03.01-1
- * add go/types (like the old gotype) linter
- * disable go/types linter by default:
- to enabled it, set an empty filter list in your user settings e.g. `"lint_filter": []`
+- add support for having multiple builtins with the same name
-## r13.02.24-1
- * add new setting `lint_filter`. see the default settings for documentation
+- API breakage:
+ `mg.ExecRunFunc()` was replaced with the pattern `CmdCtx.WithCmd().Run()`
+ the former bypasses builtins so running `go install` has no linting support
-## r13.02.09-1
- *impl 9o `hist` command
+## 18.09.30
-## r13.02.08-2
- * add THANKS.md there are several other donors who weren't added since your donations were
- done anonymously. I'd like to add you as well :) if you want your name added please let me know
- either thank you all!
+- Improve autocompletion scope detection
-## r13.02.08-1
- * initial(incomplete) Sublime Text 3 support
- * gsshell ui and gs_shell command removed
- * anything that imported or used gs* commands is probably broken
+ - snippets should now the shown when there is a comment above the package statement
+ - completion should no longer be shown when there is no package statement
-## r13.02.03-3
- * impl `go share` as 9o command `share`. see ctrl+9 "help" for more details
+- misc tweaks to the method snippets
+ - for pointer method receivers, only the `*` is selected for the initial method definition
+ - when there are syntax errors in the file, methods should no longer be suggested for the invalid type `_`
-## r13.02.03-2
- * add new setting `build_command` to allow changing what command is run when you press ctrl+dot,ctrl+b
- see the default settings for documentation (ctrl+dot,ctrl+dot "default settings")
+## 18.09.25
-## r13.02.03-1
- * fmt verbs are now highlighted in raw strings
- * fix race between fmt_save and 9o
- * allow action'ing (super/ctrl+g only) seletions in 9o
+- Switch golang.Gocode and golang.GocodeCalltips to new mode SrcImporterWithFallback by default
-## r13.01.27-2
- * (by default) only save 9o history if the command was manually executed
+ This should improve the experience a lot:
-## r13.01.27-1
- * correctly handle hist indexing when there's only one command in the history (last command not recalled on ctrl+dot,ctrl+b)
+ - in the old `Source: true` mode, CGO packages often failed
+ - in the old `Source: false` mode, you had to make sure the package was installed
+ and up-to-date
+ - in this new mode, we try the more reliable source mode and fallback
+ to the binary mode if it fails
-## r13.01.26-1
- * fix broken package snippet (inserting blank package name)
+ As a result, the `Source: bool` fields are now ignored.
+ To restore the old behaviour, use the golang.MarGocodeCtl reducer:
-## r13.01.25-2
- * set .go files to use the `GoSublime` syntax definition instead of `Go` as it's more complete
- * hide GsDoc and 9o sytax definitions from the command palette
+ ```Go
+ &golang.MarGocodeCtl{
+ ImporterMode: golang.SrcImporterOnly,
+ // or
+ ImporterMode: golang.BinImporterOnly,
+ }
+ ```
-## r13.01.25-1
- * fix 9o command history indexing (caused wrong command to be expanded for ^1, ^2 etc)
+- replace margocodectl `cache-list-by-key` and `cache-list-by-dur` with `cache-list`
+ see `margocodectl cache-list --help`
-## r13.01.24-2
- * add $HOME/go/bin to $PATH
+- Improve FmtCmd's error message
-## r13.01.24-1
- * add $HOME/bin to $PATH
+ When goimports fails due to a syntax error, the parse error should now be shown as well
+ and not just the meaningless `exit 2` error message
-## r13.01.23-1
- * fix broken 9o-related keybindings (ctrl+dot,ctrl+r etc.)
+## 18.09.18
-## r13.01.22-1
- * fix missing declarations in unsaved files
+- fix a case where margo exits due to IPC shutdown
+ _you will need to restart Sublime Text_
+- return all possible completions in gocode to allow the editor to do filtering.
+ this restores the old behaviour where typing `abc.X` proposes `abc.XYX123`
-## r13.01.21-1
- **majour refactoring - watch out for bugs**
+## 18.09.14
- * fix handling of binary data in the run/replay commands
- * misc tweaks+fixes
- * remove gsdepends
- * remove all rpc calls to margo.py
- * remove margo0
+- This release adds a new experimental update notifier.
-## r13.01.20-1
- **IMPORTANT**
- this update marks the complete transition of all keybindings away from GsShell.
- `ctrl+b` `ctrl+dot`,`ctrl+b` `ctrl+dot`,`ctrl+t` and `ctrl+dot`,`ctrl+r`
- all uses 9o now. for more information about the GsShell replacement 9o please press ctrl+9 and type help
+ MOTD keeps you updated about new versions and important announcements
-## r13.01.19-2
- **NOTICE**
- The transition to 9o has begun. press ctrl+9 or super+9 and type `help` for more details on 9o.
- 9o will evntually completely replace all GoSublime's interaction with the OS' shell.
- This includes GsShell(ctrl+dot,ctrl+b).
+ It adds a new command `motd.sync` available via the UserCmd palette as `Sync MOTD`
- As of this update, `ctrl+dot`,`ctrl+r` and `ctrl+dot`,`ctrl+t` has been remapped
+ `Interval` can be set in order to enable automatic update fetching.
-## r13.01.19-1
- * impl 9o command history
+ When new updates are found, it displays the message in the status bar
+ e.g. `★ margo.sh/cl/18.09.14 ★` a url where you see the upcoming changes before updating
-## r13.01.17-1
- * add keybindings in 9o for committing autocompletion instead of executing the prompt when auto_complete_commit_on_tab is false
+ It sends the following data to the url https://api.margo.sh/motd.json:
-## r13.01.14-1
- * added pledgie badge http://www.pledgie.com/campaigns/19078
+ - current editor plugin name e.g. `?client=gosublime`
+ this tells us which editor plugin's changelog to check
+ - current editor plugin version e.g. `?tag=r18.09.14-1`
+ this allows us to determine if there any updates
+ - whether or not this is the first request of the day e.g. `?firstHit=1`
+ this allows us to get an estimated count of active users without storing
+ any personally identifiable data
-## r13.01.12-1
+ No other data is sent. For more info contact privacy at kuroku.io
- **WARNING**
+ To enabled it, add the following reducer:
- GoSublime will soon switch to 9o `ctrl+9` or `super+9`.
- It will replace GsShell `ctrl+dot`,`ctrl+b` (maybe `ctrl+b`).
- GsShell has reached its EOL and as a result no GsShell specific bugs will be fixed, old or new.
- The code (gsshell.py) will remain for a little while so if you use code that interacts
- with it, now is the time to make let me know so necessary features can implemented in 9o
+ ```Go
+ &mg.MOTD{
+ // Interval, if set, specifies how often to automatically fetch messages from Endpoint
+ // Interval: 3600e9, // automatically fetch updates every hour
+ },
+ ```
-## r13.01.06-1
- * add two new 9o command `env` and `settings` see 9o `help` for more details
- * 9o now supports a new scheme `gs.packages` e.g. `ctrl+shft`, left-click on gs.packages://GoSublime/9o.md will open the 9o docs
+ You will need to restart Sublime Text.
+ Unless you uncomment/set `Interval`, you will need to manually check for updates
+ using the `Sync MOTD` command from the usercmd palette
+ `ctrl+.`,`ctrl+c` / `super+.`,`super+c`
-## r13.01.05-2
- * added two task aliases to tskill
- `tskill replay` will kill/cancel the last replay command
+- The `GoSublime: Go` syntax was switched to a new syntax based on the Go syntax shipped in Sublime Text
- `tskill go` will kill the last go command (go test, etc.). as a consequence,
- the 9o `go` command now acts like the `replay` command in that kills any previous instance
+ - if you find any breakages, please file an issue at margo.sh/gs/i
+ - if you prefer the colouring of the previous version, you can switch back to the old syntax
+ via `Menu > View > Syntax > Open all with current extension as... > GoSublime > GoSublime: Go (Deprecated)`
+ please not that this version is buggy and will not receive any fixes
- * added new setting autosave:
- controls whether or not pkg files should be automatically saved when necessary
- (e.g. when running 9o `replay` or `go test` commands)
+- golang.Gocode, golang.GocodeCalltips:
-## r13.01.05-1
- * impl click-tests. i.e `ctrl+shift`,`left-click` on words that start with Test,Benchmark or Example
- will run go corresponding test or bench. `ctrl+shift`,`right-click` will do the same but using only the prefix
- e.g.
- `ctrl+shift`,`left-click` on `BenchmarkNewFunc` will run only `BenchmarkNew`:
- `go test -test.run=none -test.bench="^BenchmarkNew$"`
+ - reduce memory use with `Source: true`
+ - support syscall/js
- `ctrl+shift`,`right-click` on `BenchmarkNewFunc` will run all benchmarks:
- `go test -test.run=none -test.bench="^Benchmark.*"`
+- golang.Guru gained support for syscall/js
+ guru is now called with `-tags "js wasm"` if `syscall/js` is imported in the package
-## r12.12.29-1
- * impl 9o tskill command. see 9o(ctrl+9) "help" for more info
+## 18.08.31
-## r12.12.27-2
- * impl `go test` in 9o run and replay
+- Switch the `ctrl+.`,`ctrl+t` / `cmd+.`,`cmd+t` keybinding to the new &golang.TestCmds{} reducer:
-## r12.12.27-1
- * introducing 9o, the new command-shell. press `ctrl+9` or `super+9` to activate it.
- WARNING: in the near future 9o will replace GsShell
+ ```Go
+ &golang.TestCmds{
+ // additional args to add to the command when running tests and examples
+ TestArgs: []string{},
-## r12.12.26-1
- * sync gocode: Windows-specific config_dir/config_file implementation.
+ // additional args to add to the command when running benchmarks
+ BenchArgs: []string{"-benchmem"},
+ },
+ ```
-## r12.12.13-2
- * add a new setting: `autocomplete_filter_name`
- you may set this to a regexp which will be used to filter entries in the auto-completion list
- e.g. `"autocomplete_filter_name": "^autogenerated_"` will prevent any type or function
- whose name begins with "autogenerated_" from appearing in the auto-completion list
+## 18.08.29
-## r12.12.13-1
- * implement `9 replay` command that will `9 play` (build + run) the current package, after killing any previous instances.
- Until it goes live, you can override the existing `ctrl+dot`,`ctrl+r` binding or bind it to something else by adding
- the following key binding to your user key bindings via menu `Preferences > Key Bindings - User`
+- implement more aggressive gocode caching.
+ behind the scenes, imported/type-checked packages are cached until the respective package is edited.
- {
- "keys": ["ctrl+.", "ctrl+r"],
- "command": "gs_commander_open",
- "args": {"run": ["9", "replay"]},
- "context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
- }
+ - it should now be ok to use `Source: true` option without slowdowns.
+ - as a bonus, `go modules` should now have completion with `Source: true`
+ - please note that `Source: true` uses a lot more memory (see below for details about cache pruning)
+ - if both &golang.Gocode{Source: true} and &golang.GocodeCalltips{Source: true}
+ use `Source: true`, they will share the cache (less memory use)
-## r12.12.2-3
- * setting `margo_cmd` has been removed
-
-## r12.12.2-2
- * setting `gocode_cmd` has been removed
-
-## r12.12.2-1
- * setting `complete_builtins` has been renamed to `autocomplete_builtins`
+- add new reducer `&golang.MarGocodeCtl{}`
+ this allows manual cache management using the new `margocodectl` command
-## r12.11.28-1
- * If you have issues with env vars, particularly on OS X, consider setting the
- `shell` setting. See `Packages/User/GoSublime.sublime-settings` for more details
+ - to clear the cache use the command `margocodectl cache-prune`
+ run `margocodectl` for info about how to use the command
+ - automated cache pruning will be implemented in the future
-## r12.11.15-1
- * MarGo (margo0) and gocode are now bundled with GoSublime and should be in active use.
- Feel free to remove any old source from $GOPATH*/github.com/{nsf/gocode,DisposaBoy/MarGo}
- if you have no use for them in additiion to their respective binaries
+## 18.08.22
-## r12.11.04-1
- * added new setting `complete_builtins`
- set this to `true` to show builtin type and functions in the completion menu
+- merge all shell env vars named `^(MARGO|GO|CGO)\w+` into the GoSublime environment
+ this ensures new env vars like `GOPROXY` and `GO111MODULE` work correctly
-## r12.11.03-1
- * BREAKING CHANGES ARE COMING: in the next GoSublime update support for windows-style
- environment variables will be removed.
- If you have environment variables that are not expanded before GS sees them and they are
- of the form `%HOME%`, `%GOPATH%` etc. they will no longer be expanded.
- You should transition to *nix-style env vars.
+- try to prevent `GO111MODULE` leaking into the agent build process
- i.e `%GOPATH%` etc. should be changed to `$GOPATH`. `$$` can be used to escape to escape`$` characters
+- add support for UserCmd prompts
-## r12.09.22-1
- * the experimental gsshell replacement codename shelly is no more.
- it has been replaced with gscommander which operates from within the output panel
- a separate instance per directory
-
- to activate it press `ctrl+#` or `super+#`
- proper documentation will appear as things progress but for now it works as follows:
- paths are highlighted(usually bold), pressing `ctrl+shift+[left click]` will open it.
- if the path is a file it will open in ST2 otherwise if it's a url it will be opened
- in your web browser
-
- typing `#` followed by a command and pressing `enter` will run that command
-
- auto-completion and implementation of common commands such as `cd` and `go play` will follow soon
-
-
-## r12.09.16-1
- * add typename-aware method definition snippets for types declared in the current file
-
-## r12.09.08-2
- * add new setting `comp_lint_commands` that allows you specify what commands comp-lint should run
- e.g to run `go vet` followed by `go install`, add the following to your user settings.
- by default the only command run for comp-lint is `go install`
-
- "comp_lint_enabled": true, // enable comp-lint
- "comp_lint_commands": [
- {"cmd": ["go", "install"]}, // first run go install
- {"cmd": ["go", "vet"]} // followed by go vet,
- ],
- "on_save": [
- {"cmd": "gs_comp_lint"} // setup comp-lint to run after you save a file
- ],
-
- see `Package/GoSublime/GoSublime.sublime-settings` for details
-
-## r12.09.08-1
- * add support snippets (an alternative to Sublime Text's Native snippets)
- see `Package/GoSublime/GoSublime.sublime-settings` for details
-
-## r12.08.26-1
- * make gs_browse_files (`ctrl+dot`,`ctrl+m`) act more like a file browser.
- it now lists all files in the current directory tree excluding known binary files:
- (.exe, .a, files without extension, etc.) and an entry to go to the parent directory
-
-## r12.08.23-1
- * add experimental support for post-save commands
- a new entry `on_save` is supported in `GoSublime.sublime-settings`, it takes a list of commands
- in the form of an object {"cmd": "...", "args": {...}} where cmd can be any TextCommand
- * add experimental support for `go install` on save in the form of another linter.
- to activate it add the following to your `GoSublime.sublime-settings`
-
- "comp_lint_enabled": true,
- "on_save": [
- {"cmd": "gs_comp_lint"}
- ]
-
- note: enabling this will override(disable) the regular GsLint
-
-## r12.08.10-3
- * `ctrl+dot`,`ctrl+a` is now accessible globally
-
-## r12.08.10-2
- * `ctrl+dot`,`ctrl+o` now presents a file list instead of opening a file
-
-## r12.08.10-1
- * `ctrl+dot`,`ctrl+m` now list all relevant files (.go, .c, etc.)
- as well all files in the directory tree recursively (sub-packages)
- it also now works globally
-
-## r12.08.08-1
- * fix a bug which could cause MarGo to take a long time to respond (when accidentally parsing binary files)
- update MarGo
-
-## r12.07.31-1
- * add platform info e.g (linux, amd64) to pkg declarations list (`ctrl+dot`,`ctrl+l`)
-
-## r12.07.28-2
- * add command palette entry to show the build output
- press `ctrl+dot`,`ctrl+dot` and start typing `build output`
-
-## r12.07.28-1
- * update gocode: nsf fixed a bug that could cause gocode to hang on invalid input
-
-## r12.07.21-2
- * fix: handle filename for browse files correctly
- update MarGo
-
-## r12.07.21-1
- * add support for browsing/listing the files in a the current package
- press `ctrl+dot`,`ctrl+m`
- update MarGo
-
-## r12.07.15-2
- * add basic call-tip? support
- * press `ctrl+dot`,`ctrl+space` inside a function parameter list to show its declaration
-
-## r12.07.15-1
- * update gocode: nsf recently added improved support for variables declared in the head of `if` and `for` statements
-
-## r12.07.12-1
- * fix: imports not sorted on fmt/save
- * fix: GsDoc doesn't work correctly in unsaved files
- * various presentation tweaks
- * documentation comments are now displayed for types
- * package documentation is now displayed
- * goto definition of packages is now enabled
- * various keybindings now available in non .go files
- `ctrl+dot`,`ctrl+dot` - open the command palette with only GoSublime entries
- `ctrl+dot`,`ctrl+n` - create a new .go file
- `ctrl+dot`,`ctrl+o` - browse packages
- * update MarGo
-
-## r12.07.08-2
- * new quick panel for go test
- allows easily running `Test.*`, `Example.*`, `Benchmark.*` or individual tests, examples and benchmarks
- press `ctrl+dot`,`ctrl+t` to access the quick panel
-
-## r12.07.08-1
- * you can now browse packages
- press `ctrl+dot`,`ctrl+o` to open the first file found in the select pkg dir
- * new key binding added `ctrl+dot`,`ctrl+l` to list the declarations in the current pkg in a single step
- it does the same thing as `ctrl+dot`,`ctrl+a` and then selecting 'Current Package'
-
-## r12.07.07-2
- * you can now browse declarations in the current package(beyond file-scope)
- as well as all other packages
- press `ctrl+dot`,`ctrl+a` to browser packages via a quick panel
- listing the declarations in the current is still `ctrl+dot+`,`ctrl+d`
- * update MarGo
-
-## r12.07.07-1
- * improve GsLint detection of un-called flag.Parse()
- * listing declarations now works in unsaved files
- * please update MarGo
-
-## r12.06.29-2
- * GsDoc documentation now shows example functions and blocks are now collapsed
- * update MarGo
-
-## r12.06.29-1
- * fix: threading that caused gslint to crash
- *
- * added initial support for per-project settings
- * a settings object named `GoSublime` in your project settings will override values
- * specified in the `Gosublime.sublime-settings` file
- *
- * added new dynamic pseudo-environment variable `GS_GOPATH` will contain an auto-detected GOPATH
- * e.g. if you file name is `/tmp/go/src/hello/main.go` it will contain the value `/tmp/go`
- * it can safely added to your regular `GOPATH` `env` setting e.g.
- * `"env": { "GOPATH": "$HOME/go:$GS_GOPATH" }`
- * this allows for seemless use of project-based GOPATHs without explicit configuration
- *
- * added ctrl+click binding for GsDoc
- * `ctrl+shift+left-click` acts as alias for `ctrl+dot,ctrl+g` a.k.a goto definition
- * `ctrl+shift+right-click` acts as alias for `ctrl+dot,ctrl+h` a.k.a show documentation hint
- * as always, `super` replace `ctrl` on OS X
-
-## r12.06.26-2
- * GsDoc now supports local, package-global and imported package variables and functions
- (MarGo/doc is still incomplete, however: types(structs, etc.) are not resolved yet)
- I've changed the way GsDoc works. Both mode are unified, ctrl+dot,ctrl+g will take you to
- the definition but the hint( ctrl+dot,ctrl+h ) now displays the src along with any comments
- attached to it (this is usually pure documentation)
- * MarGo needs updating
-
-## r12.06.26-1
- * fix: file saving in gsshell
- * fix: duplicating comment that follows imports when imports are modified
- * fix: adding duplicate entries to the package list due to filename case-insensitivity
- * the new_go_file command now automatically fills out the package declaration
- * add binding to create a new go file ( ctrl+dot,ctrl+n )
-
-## r12.06.17-1
- * add support for running(play) the current file without saving it (`ctrl+dot`, `ctrl+r`)
- * add support for sharing the contents of the current on play.golang.org
- press `ctrl+dot`, `ctrl+dot` for a list of all commands and their key bindings as well sharing functionality
-
-## r12.06.09-2
- * MarGo now supports warning about calling flag.String() etc and forgetting to call flag.Parse() afterwards
-
-## r12.06.09-1
- * removed ctrl+shift+g keybinding, please use `ctrl+dot`,`ctrl+dot` to show the list of available commands and their kebindings
- * complete implementation of imports:
- use `ctrl+dot`,`ctrl+p` to add or remove packages
- use `ctrl+dot`,`ctrl+i` to quickly jump to the last imported package where you can assign an alias, etc.
- use `ctrl+dot`,`ctrl+[` to go back to where you were before
- * MarGo needs updating and a restart of ST2 is recommended
-
-## r12.06.05-1
- * add support for configuring the fmt tab settings - see GoSublime.sublime-settings (fmt_tab_width and fmt_tab_indent)
-
-## r12.06.02-1
- * Add initial stub implementation of goto-definition and show-documentation
- * this requires the latest version of MarGo
- * new key bindings and commands: press `ctrl+.`, `ctrl+.`
- * (control or super on OS X, followed by .(dot) twice)
- * or open the command palette(`ctrl+shift+p`) and type `GoSublime:`
- * to show a list of available commands and their respective key bindings
- * note: currently only the pkgname.Function is supported, so types, methods or constants, etc.
-
-## r12.05.30-1
- * fix completion only offering the 'import snippet' if there aren't any imports in the file
-
-## r12.05.29-1
- * update MarGo
-
-## r12.05.26-2
- * re-enable linting
-
-## r12.05.26-1
- * start using margo.fmt, no more dependecy on `gofmt` and `diff`
-
-## r12.05.05-1
- * add support for installing/updating Gocode and MarGo
+ this enables the creation of UserCmds like the following, without dedicated support from margo:
+
+ ```Go
+ mg.UserCmd{
+ Title: "GoRename",
+ Name: "gorename",
+ Args: []string{"-offset={{.View.Filename}}:#{{.View.Pos}}", "-to={{index .Prompts 0}}"},
+ Prompts: []string{"New Name"},
+ }
+ ```
+
+- fix #853 a build failure when using snap packaged go1.10
+
+- fix caching of packages in GOPATH when doing gocode completion
+ this _might_ slow completion, but there should no longer be any stale non-GOROOT package completions
+
+- add new `Source` option to use source code for gocode completions
+ _this will most likely be very slow_
+
+ ```Go
+ &golang.Gocode{ Source: true }
+ &golang.GocodeCalltips{ Source: true }
+ ```
+
+## 18.08.15
+
+- fix missing `go` command integration by default
+
+- you may need to add the reducer `&golang.GoCmd{}`
+
+- this adds new commands (callable through 9o):
+
+ - `go`: Wrapper around the go command, adding linter support
+
+ - `go.play`: Automatically build and run go commands or run go test for packages
+ with support for linting and unsaved files
+
+ - `go.replay`: Wrapper around go.play limited to a single instance
+ by default this command is bound to `ctrl+.,ctrl+r` or `cmd+.,cmd+r`
+
+ UserCmds (`ctrl+.,ctrl+c` / `cmd+.,cmd+c`) are also added for `Go Play` and `Go RePlay`
diff --git a/Default (Linux).sublime-keymap b/Default (Linux).sublime-keymap
index 24fae8e6..b4fdca90 100644
--- a/Default (Linux).sublime-keymap
+++ b/Default (Linux).sublime-keymap
@@ -49,6 +49,10 @@
"keys": ["ctrl+.", "ctrl+e"],
"command": "margo_issues",
},
+ {
+ "keys": ["ctrl+.", "ctrl+c"],
+ "command": "margo_user_cmds",
+ },
{
"keys": ["ctrl+.", "ctrl+["],
"command": "gs_palette",
@@ -70,12 +74,11 @@
"keys": ["ctrl+.", "ctrl+r"],
"command": "gs9o_open",
"args": {"run": ["replay"], "focus_view": false},
- "context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
},
{
"keys": ["ctrl+.", "ctrl+g"],
- "command": "gs_doc",
- "args": {"mode": "goto"},
+ "command": "gs9o_open",
+ "args": {"run": [".actuate"], "focus_view": false, "show_view": false},
"context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
},
{
@@ -122,8 +125,8 @@
},
{
"keys": ["ctrl+.", "ctrl+t"],
- "command": "gs_test",
- "context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
+ "command": "margo_user_cmds",
+ "args": {"action": "QueryTestCmds"},
},
{
"keys": ["ctrl+.", "ctrl+space"],
@@ -132,7 +135,15 @@
},
{
"keys": ["ctrl+9"],
- "command": "gs9o_open"
+ "command": "gs9o_win_open"
+ },
+ {
+ "keys": ["ctrl+.","ctrl+9"],
+ "command": "gs9o_win_open"
+ },
+ {
+ "keys": ["ctrl+.","ctrl+0"],
+ "command": "margo_show_hud"
},
{
"keys": ["ctrl+space"],
diff --git a/Default (OSX).sublime-keymap b/Default (OSX).sublime-keymap
index 3b42fab6..acc41b23 100644
--- a/Default (OSX).sublime-keymap
+++ b/Default (OSX).sublime-keymap
@@ -41,9 +41,12 @@
},
{
"keys": ["super+.", "super+e"],
- "command": "gs_palette",
"command": "margo_issues",
},
+ {
+ "keys": ["super+.", "super+c"],
+ "command": "margo_user_cmds",
+ },
{
"keys": ["super+.", "super+["],
"command": "gs_palette",
@@ -65,12 +68,11 @@
"keys": ["super+.", "super+r"],
"command": "gs9o_open",
"args": {"run": ["replay"], "focus_view": false},
- "context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
},
{
"keys": ["super+.", "super+g"],
- "command": "gs_doc",
- "args": {"mode": "goto"},
+ "command": "gs9o_open",
+ "args": {"run": [".actuate"], "focus_view": false, "show_view": false},
"context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
},
{
@@ -117,8 +119,8 @@
},
{
"keys": ["super+.", "super+t"],
- "command": "gs_test",
- "context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
+ "command": "margo_user_cmds",
+ "args": {"action": "QueryTestCmds"},
},
{
"keys": ["super+.", "shift+space"],
@@ -127,7 +129,15 @@
},
{
"keys": ["super+9"],
- "command": "gs9o_open"
+ "command": "gs9o_win_open"
+ },
+ {
+ "keys": ["super+.","super+9"],
+ "command": "gs9o_win_open"
+ },
+ {
+ "keys": ["super+.","super+0"],
+ "command": "margo_show_hud"
},
{
"keys": ["shift+space"],
diff --git a/Default (Windows).sublime-keymap b/Default (Windows).sublime-keymap
index 6f27e236..4be25420 100644
--- a/Default (Windows).sublime-keymap
+++ b/Default (Windows).sublime-keymap
@@ -47,9 +47,12 @@
},
{
"keys": ["ctrl+.", "ctrl+e"],
- "command": "gs_palette",
"command": "margo_issues",
},
+ {
+ "keys": ["ctrl+.", "ctrl+c"],
+ "command": "margo_user_cmds",
+ },
{
"keys": ["ctrl+.", "ctrl+["],
"command": "gs_palette",
@@ -71,12 +74,11 @@
"keys": ["ctrl+.", "ctrl+r"],
"command": "gs9o_open",
"args": {"run": ["replay"], "focus_view": false},
- "context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
},
{
"keys": ["ctrl+.", "ctrl+g"],
- "command": "gs_doc",
- "args": {"mode": "goto"},
+ "command": "gs9o_open",
+ "args": {"run": [".actuate"], "focus_view": false, "show_view": false},
"context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
},
{
@@ -123,8 +125,8 @@
},
{
"keys": ["ctrl+.", "ctrl+t"],
- "command": "gs_test",
- "context": [{ "key": "selector", "operator": "equal", "operand": "source.go" }]
+ "command": "margo_user_cmds",
+ "args": {"action": "QueryTestCmds"},
},
{
"keys": ["ctrl+.", "ctrl+space"],
@@ -133,7 +135,15 @@
},
{
"keys": ["ctrl+9"],
- "command": "gs9o_open"
+ "command": "gs9o_win_open"
+ },
+ {
+ "keys": ["ctrl+.","ctrl+9"],
+ "command": "gs9o_win_open"
+ },
+ {
+ "keys": ["ctrl+.","ctrl+0"],
+ "command": "margo_show_hud"
},
{
"keys": ["ctrl+space"],
diff --git a/GoSublime.py b/GoSublime.py
index a151dcf7..afe86b71 100644
--- a/GoSublime.py
+++ b/GoSublime.py
@@ -22,16 +22,20 @@
print("GoSublime: %s" % execErr)
def loadable_mods():
+ from . import _before
+ from . import _after
from .gosubl import gs
from .gosubl import sh
from .gosubl import margo
from .gosubl import mg9
return [
+ ('_before', _before),
('gs', gs),
('sh', sh),
('margo', margo),
('mg9', mg9),
+ ('_after', _after),
]
def plugin_loaded():
diff --git a/GoSublime.sublime-commands b/GoSublime.sublime-commands
index 8969f5c4..fcb26aec 100644
--- a/GoSublime.sublime-commands
+++ b/GoSublime.sublime-commands
@@ -26,6 +26,10 @@
"caption": "GoSublime: Show Issues",
"command": "margo_issues",
},
+ {
+ "caption": "GoSublime: User Commands",
+ "command": "margo_user_cmds",
+ },
{
"caption": "GoSublime: Go to last bookmark",
"command": "gs_palette",
@@ -60,8 +64,8 @@
},
{
"caption": "GoSublime: Goto Definition",
- "command": "gs_doc",
- "args": {"mode": "goto"}
+ "command": "gs9o_open",
+ "args": {"run": ["goto.definition"], "focus_view": false, "show_view": false},
},
{
"caption": "GoSublime: Show documentation hint",
diff --git a/GoSublime.sublime-settings b/GoSublime.sublime-settings
index d51d6333..deef0c28 100644
--- a/GoSublime.sublime-settings
+++ b/GoSublime.sublime-settings
@@ -30,10 +30,10 @@
"autosave": true,
// Whether or not gscomplete(gocode) is enabled
- "gscomplete_enabled": true,
+ "gscomplete_enabled": false,
// Whether or not gsfmt is enabled
- "fmt_enabled": true,
+ "fmt_enabled": false,
// whether or not to indent with tabs (alignment is always done using spaces)
"fmt_tab_indent": true,
@@ -57,7 +57,7 @@
"ipc_timeout": 1,
// Whether or not gslint is enabled
- "gslint_enabled": true,
+ "gslint_enabled": false,
// filter the kinds of lint checks that are done. supported kinds:
//
@@ -94,7 +94,7 @@
// Not Implemented
// Whether or not gslint is enabled
- "lint_enabled": true,
+ "lint_enabled": false,
// Not Implemented
// list of linters to run
@@ -181,7 +181,7 @@
// whether or not to show function call tip in the status bar
// the same can be achieved ctrl+dot,ctrl+space using an output panel
- "calltips": true,
+ "calltips": false,
// whether or not to use named imports when the basename of the import path doesn't match the pkg name
// e.g. gosubli.me/go-foo would be imported as:
@@ -280,7 +280,9 @@
// if set, 9o will run in single-instance mode instead of per-pkg
// the name can be any string, so you can e.g. set it per-project and maintain project-specific
// command history
- "9o_instance": "",
+ //
+ // setting it to the string "auto" will automatically assign it a name based on the current directory/package
+ "9o_instance": "9o",
// if set 9o will use the specified color scheme.
// the path must relative to `Packages` e.g. `Packages/My/9o Specific.tmTheme`
@@ -310,14 +312,12 @@
//
"9o_aliases": {},
- // what 9o command to run when (super or )ctrl+dot,ctrl+b us pressed
+ // what 9o command to run when (super or )ctrl+dot,ctrl+b is pressed
// e.g. ["go", "build"]
// the 9o command ^1 recalls the last command you ran manually
// see 9o help(ctrl+9 "help") for more details about what commands are supported
"build_command": ["^1"],
- "auto_complete_triggers": [ {"selector": "source.go", "characters": "."} ],
-
// exclude files with the listed prefixes from the file browsing palette (ctrl+dot,ctrl+m)
"fn_exclude_prefixes": [".", "_"],
@@ -325,7 +325,7 @@
// `GoSublime: HTML` files are html files with the template delimiters `{{` and `}}` tailored to
// Go templates (text/template, html/template)
// (`.gohtml` files are automatically set by the syntax definition)
- "gohtml_extensions": [".html.go"],
+ "gohtml_extensions": [],
// Export the listed environment variables to Sublime Text when the GoSublime settings change
// so the env vars GS loads through your shell and project are available to other plugins
diff --git a/Preferences.sublime-settings b/Preferences.sublime-settings
index 7630fb82..37716af6 100644
--- a/Preferences.sublime-settings
+++ b/Preferences.sublime-settings
@@ -1,3 +1,3 @@
{
- "auto_complete_triggers": [ {"selector": "source.go", "characters": "."} ]
-}
\ No newline at end of file
+ "auto_complete_triggers": [ {"selector": "source.go - comment - string", "characters": "."} ],
+}
diff --git a/README.md b/README.md
index 7d27538e..d80431ab 100644
--- a/README.md
+++ b/README.md
@@ -1,105 +1,53 @@
-
+# GoSublime [](#backers) [](#sponsors) [](https://travis-ci.org/DisposaBoy/GoSublime)
-
+## Intro
+GoSublime is an IDE-like plugin for [Sublime Text 3](http://www.sublimetext.com/) mainly, but not limited to, providing integration for most of your Go/Golang development tools.
-[](#backers) [](#sponsors) [](https://travis-ci.org/DisposaBoy/GoSublime)
-
+See https://margo.sh/b/hello-margo/ for a brief introduction to margo, the engine behind GoSublime.
-GoSublime
-=========
+## Installation & Support
+See https://margo.sh/b/migrate/ for instructions on how to install GoSublime.
+See [SUPPORT.md](SUPPORT.md) for details about what level of support you can expect while using GoSublime.
-Intro
------
+## Features
-GoSublime is a Golang plugin collection for the text editor [Sublime Text](http://www.sublimetext.com/) providing code completion and other IDE-like features. Only Sublime Text **3** is supported.
+- code completion from Gocode (fork);
+- context aware snippets via the code-completion popup;
+- sublime build system(ctrl+b) integrating with GoSublime 9o command prompt with live command output;
+- lint/syntax check as you type or on save;
+- quickly jump to any linter error reported in any open file or package;
+- quickly fmt your source or automatically on save to conform with your coding standards;
+- easily create a new go file and run it without needing to save it first (9o `replay`);
+- share your snippets (anything in the loaded file) on play.golang.org;
+- list declarations in the current file or package;
+- automatically add/remove package imports;
+- quickly jump your import section(automatically goes to the last import) where you can easily edit the pkg alias and return to where you were before;
+- go to definition of a package function or constant, etc.;
+- create your own margo extensions in Go to e.g. add context-aware commands to the command palette.
-Before using GoSublime you should read and understand [SUPPORT.md](https://github.com/DisposaBoy/GoSublime/blob/master/SUPPORT.md)
+## Demo
-Features
---------
+- Old demo http://vimeo.com/disposaboy/gosublime-demo2
-* code completion from [Gocode](https://github.com/nsf/gocode)
-* context aware snippets via the code-completion popup to complement the existing SublimeText Go package.
-* sublime build system(ctrl+b) integrating with GoSublime 9o command prompt
-* lint/syntax check as you type
-* quickly jump to any syntax error reported (and jump back to where you were before (across files))
-* quickly fmt your source or automatically on save to conform with the Go standards
-* easily create a new go file and run it without needing to save it first (9o `replay`)
-* share your snippets (anything in the loaded file) on play.golang.org
-* list declarations in the current file
-* automatically add/remove package imports
-* quickly jump your import section(automatically goes to the last import) where you can easily edit the pkg alias and return to where you were before
-* go to definition of a package function or constant, etc.
-* show the source(and thus documentation) of a variable without needing to change views
+## Copyright, License & Contributors
-Demo
-----
-
-* Old demo http://vimeo.com/disposaboy/gosublime-demo2
-
-
-
-
-Installation
-------------
-
-It is assumed that you have a working installation of [Git](https://git-scm.com/) and know how to use it to clone and update repositories.
-
-Run the command `git clone https://github.com/DisposaBoy/GoSublime` from within the Sublime Text `Packages` directory.
-The location of your Sublime Text Packages directory can be found by clicking the menu: `Preferences` > `Browse Packages...`.
-
-Usage
------
-
-Please see [USAGE.md](USAGE.md) and [9o.md](9o.md) for general usage and other tips for effective usage of GoSublime
-
-**NOTE** GoCode is entirely integrated into GoSublime/MarGo. If you see any bugs related to completion,
-assume they are GoSublime's bugs and I will forward bug reports as necessary.
-
-Settings
---------
-
-You can customize the behaviour of GoSublime by creating a settings file in your `User` package. This can be accessed from within SublimeText by going to the menu `Preferences > Browse Packages...`. Create a file named `GoSublime.sublime-settings` or alternatively copy the default settings file `Packages/GoSublime/GoSublime.sublime-settings` to your `User` package and edit it to your liking.
-
-Note: File names are case-sensitive on some platforms (e.g. Linux) so the file name should be exactly `GoSublime.sublime-settings` with capitalization preserved.
-
-
-Copyright, License & Contributors
-=================================
-
-GoSublime and MarGo are released under the MIT license. See [LICENSE.md](LICENSE.md)
-
-GoSublime is the copyrighted work of *The GoSublime Authors* i.e me ([https://github.com/DisposaBoy/GoSublime](DisposaBoy)) and *all* contributors. If you submit a change, be it documentation or code, so long as it's committed to GoSublime's history I consider you a contributor. See [AUTHORS.md](AUTHORS.md) for a list of all the GoSublime authors/contributors.
+margo and GoSublime are released under the MIT license. See [LICENSE.md](LICENSE.md)
Thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
-Supporters
-==========
-
-GoSublime has received support from many kind individuals and as a thank you I've added most to [THANKS.md](THANKS.md) file as a way of saying *Thank You*. Some donors donated anonymously and so are not listed, however. If you have donated and would like to add an entry to this file, feel free to open a pull request.
-
-Donations
-=========
-
-Supporting me means I can spend more time working on GoSublime and other Open Source projects, hopefully leading to more consistent and regular development.
-
-Donate using Liberapay
-
-
-
-
-
-Donate using PayPal
-
-
+### Supporters
+GoSublime has received support from many kind individuals and as a thank you I've added most to [THANKS.md](THANKS.md) file as a way of saying _Thank You_. Some donors donated anonymously and so are not listed, however. If you have donated and would like to add an entry to this file, feel free to open a pull request.
+### Donations
+See https://margo.sh/funding/ for ways in which you can help support future development of margo and GoSublime.
+
Become a backer or a sponsor on OpenCollective
@@ -109,7 +57,6 @@ Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com
-
### Sponsors
Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/gosublime#sponsor)]
@@ -125,7 +72,6 @@ Support this project by becoming a sponsor. Your logo will show up here with a l
-
-
+
diff --git a/SUPPORT.md b/SUPPORT.md
index a5596063..1173111f 100644
--- a/SUPPORT.md
+++ b/SUPPORT.md
@@ -1,62 +1,57 @@
-This document aims clarify what level of support you can expect when using GoSublime.
-Use of GoSublime assumes you've read and understood *all* the points herein.
+# Support
-Discussion of support and this file in particular are tracked here: https://github.com/DisposaBoy/GoSublime/issues/689
+This document aims to clarify what level of support you can expect while using GoSublime.
+Use of GoSublime assumes you've read and understood _all_ the points herein.
+## Sublime Text
-# Sublime Text
+- All versions of Sublime Text **3** should be supported.
+- For versions before the official 3.0 release in September 2017, graceful fall-backs are in place.
+- Testing is only done for the current non-beta version and only on Linux.
-* **Sublime Text 2 is *not* supported.**
-* It's several years old at this point and Sublime HQ does not respond to my support requests.
-* Furthermore, they've changed the default download link to Sublime Text 3 implying they do not support it either.
+## Experience
-If you have a *good* reason to not upgrade to Sublime Text 3,
-discuss it here https://github.com/DisposaBoy/GoSublime/issues/689
+- It is assumed that you are experienced with Sublime Text, basic key bindings, its settings system, etc.
+- It is assumed that you already have a working Go installation: https://golang.org/doc/install.
+## Package Control
+- Package Control is not supported
-# Experience
+## Go
-* It is assumed that you are experienced with Sublime Text, basic key bindings, its settings system, etc.
-* It is assumed that you already have a working Go installation: https://golang.org/doc/install
-* You are expect to have read and understand the contents of the files: GoSublime.sublime-settings, USAGE.md and 9o.md
+GoSublime is backed by https://margo.sh/ to which the following points apply:
-# Sublime Text's Go package
+- Like the official Go [release policy](https://golang.org/doc/devel/release.html#policy), only the current and previous released versions of Go are supported.
+- Only the main `gc` tool-chain distributed by https://golang.org/ is supported.
+- margo should not require a cgo-enabled Go installation, but non-cgo builds i.e. `CGO_ENABLED=0` are not tested.
-* I disable the built-in Go package so I do not test for compatibility or conflicts with GoSublime.
+## Go Modules
-# Package Control
+At this time, Go modules are only partially supported.
+Auto-completion and other features built directly into margo should work in most cases,
+but features backed by external tools e.g. `goto definition` might work.
-* I do not use Package Control and therefore not able to offer support for any issue related to it.
-* As a user, *you* are expected take care when updating GoSublime.
-* You are advised *not* to utomatically update GoSublime.
+We plan to implement most of these features internally as our (type-check) importer matures.
-# Go
+## Operating Systems
-Please note that GoSublime is backed by a Go program named MarGo to which the following points apply.
+- Testing is only done on Arch Linux.
+- Windows and macOS should work without issue, but no testing is done on them.
-* The minimum supported version of Go is go1.6.
-* Older versions of Go might be able to compile MarGo without issue, but I will not test these older versions.
-* I also do not test the gccgo, llvm, etc. tool-chains. Only the main `gc` tool-chain is supported.
-* MarGo should not require a cgo-enabled Go installation, but I do not test installations with it disabled.
+## Tools
-# Operating Systems
+Please note:
-* I only test Linux.
-* Windows and OS X should work without issue, but I do *not* test anything on them.
+- By default `fmt` is achieved through direct use of the packages in the stdlib and not the binaries on your system.
-# Tools
+## Sponsors & Backers
-Please note:
+While we will make an effort to respond to all issues, we have only a limited amount of time and have chosen to give higher priority to our sponsors and backers (including those who donate outside of Open Collective and Patreon).
-* GoSublime uses its own fork of `gocode` so any installation on your system is ignored.
-* By default `fmt` is achieved through direct use of the packages in the stdlib and not the binaries on your system.
+If an urgent response is required, or an issue has gone without response for more than a few days, our sponsors and backers are welcome to send an email to support@margo.sh.
-I do not use the following tools and do *not* test for compatibility with them:
+## Issues with sensitive details
-* GVM or any other Go version manager
-* GB or any other other alternative to the `go` tool
-* `goimports`, the `gofmt`/`go fmt` *binary* or any other `gofmt` alternative
-* If you use the `fmt_cmd` setting with `goimports` or any other slow command
- you should read and understand the `ipc_timeout` setting documented in `GoSublime.sublime-settings`
+If your issue contains sensitive details or you would otherwise prefer not to post it publicly, you're welcome to send an email to support@margo.sh.
diff --git a/_after.py b/_after.py
new file mode 100644
index 00000000..47858a87
--- /dev/null
+++ b/_after.py
@@ -0,0 +1,3 @@
+
+def gs_init(m={}):
+ pass
diff --git a/_before.py b/_before.py
new file mode 100644
index 00000000..47858a87
--- /dev/null
+++ b/_before.py
@@ -0,0 +1,3 @@
+
+def gs_init(m={}):
+ pass
diff --git a/gosubl/_dbg.py b/gosubl/_dbg.py
index 3ce52be7..267e75ca 100644
--- a/gosubl/_dbg.py
+++ b/gosubl/_dbg.py
@@ -1,30 +1,59 @@
+import inspect
import sys
import time
+pf_enabled = False
+print_enabled = False
+gt_default = 0.020
+
class pf(object):
- def __init__(self, print='{name}: {dur}', name='', dot='', gt=0.020):
+ def __init__(self, *, format='slow op: {name}: {dur}', name='', dot='', gt=gt_default):
self.start = time.time()
self.end = 0
self.duration = 0
self.dur = ''
- self.print = print
+ self.format = format
self.gt = gt
- try:
- self.caller = sys._getframe(1).f_code.co_name
- except ValueError:
- self.caller = 'unknown'
+ self.caller = self._caller_name()
self.name = name or self.caller
self.dot = dot
if self.dot:
- self.name += '.' + self.dot
+ self.name = '%s..%s' % (self.name, self.dot)
+
+ def _caller_name(self):
+ if not pf_enabled:
+ return ''
+
+ try:
+ frame = sys._getframe(2)
+ except AttributeError:
+ return ''
+
+ try:
+ klass = frame.f_locals['self'].__class__
+ except KeyError:
+ klass = ''
+
+ return '%s.%s' % (klass, frame.f_code.co_name)
def __del__(self):
self.end = time.time()
self.duration = self.end - self.start
self.dur = '%0.3fs' % self.duration
+ if not pf_enabled:
+ return
+
if self.duration <= self.gt:
return
- if self.print:
- print(self.print.format(**self.__dict__))
+ if self.format:
+ _println(self.format.format(**self.__dict__))
+
+def _println(*a):
+ print('GoSublime_DBG:', *a)
+
+def println(*a):
+ if print_enabled:
+ _println(*a)
+
diff --git a/gosubl/about.py b/gosubl/about.py
index fac929bb..5c31f605 100644
--- a/gosubl/about.py
+++ b/gosubl/about.py
@@ -1,8 +1,9 @@
import re
import sublime
-ANN = 'a18.07.17-1'
-VERSION = 'r18.05.19-1'
+TAG = '20.06.14-1'
+ANN = 'a'+TAG
+VERSION = 'r'+TAG
VERSION_PAT = re.compile(r'\d{2}[.]\d{2}[.]\d{2}-\d+', re.IGNORECASE)
DEFAULT_GO_VERSION = 'go?'
GO_VERSION_OUTPUT_PAT = re.compile(r'go\s+version\s+(\S+(?:\s+[+]\w+|\s+\([^)]+)?)', re.IGNORECASE)
diff --git a/gosubl/gs.py b/gosubl/gs.py
index f2cc165d..a3947dee 100644
--- a/gosubl/gs.py
+++ b/gosubl/gs.py
@@ -70,7 +70,7 @@
"comp_lint_enabled": False,
"comp_lint_commands": [],
"gslint_timeout": 0,
- "calltips": True,
+ "calltips": False,
"autocomplete_snippets": False,
"autocomplete_tests": False,
"autocomplete_closures": False,
@@ -84,7 +84,7 @@
"autosave": True,
"build_command": [],
"lint_filter": [],
- "lint_enbled": True,
+ "lint_enbled": False,
"linters": [],
"9o_instance": "",
"9o_color_scheme": "",
@@ -154,8 +154,8 @@
'constant.other.rune.go',
])
-VFN_ID_PAT = re.compile(r'^(?:gs\.)?view(?:#|://)(\d+)(.*?)$', re.IGNORECASE)
-ROWCOL_PAT = re.compile(r'^[:]*(\d+)(?:[:](\d+))?[:]*$')
+VFN_ID_PAT = re.compile(r'(?:gs\.)?view(?:#|@|://)(\d+)(.*?)$', re.IGNORECASE)
+ROWCOL_PAT = re.compile(r'[:]+(\d+)(?:[:](\d+))?[:]*$')
USER_DIR = os.path.expanduser('~')
USER_DIR_PAT = re.compile(r'^%s/' % (re.escape(USER_DIR.replace('\\', '/').rstrip('/'))))
@@ -503,12 +503,17 @@ def active_view(win=None, view=None):
return win.active_view()
+def active_wd(win=None, view=None):
+ v = active_view(win=win, view=view)
+ return basedir_or_cwd(v.file_name() if v else '')
+
def win_view(vfn=None, win=None):
- if not win:
- win = sublime.active_window()
+ wins = [win]
+ if win is None:
+ wins = sublime.windows()
view = None
- if win:
+ for win in wins:
m = VFN_ID_PAT.match(vfn or '')
if m:
try:
@@ -519,10 +524,13 @@ def win_view(vfn=None, win=None):
break
except Exception:
gs.error_traceback(NAME)
- elif not vfn or vfn == "":
+
+ if view is None:
+ if not vfn or vfn == "":
view = win.active_view()
else:
view = win.open_file(vfn)
+
return (win, view)
def do_focus(fn, row, col, win, focus_pat, cb):
@@ -531,18 +539,32 @@ def do_focus(fn, row, col, win, focus_pat, cb):
notify(NAME, 'Cannot find file position %s:%s:%s' % (fn, row, col))
if cb:
cb(False)
- elif view.is_loading():
- focus(fn, row=row, col=col, win=win, focus_pat=focus_pat, cb=cb)
- else:
- win.focus_view(view)
- if row <= 0 and col <= 0 and focus_pat:
- r = view.find(focus_pat, 0)
- if r:
- row, col = view.rowcol(r.begin())
- view.run_command("gs_goto_row_col", { "row": row, "col": col })
+
+ return
+
+ def run():
+ r, c = row, col
+
+ if r <= 0 and c <= 0 and focus_pat:
+ reg = view.find(focus_pat, 0)
+ if reg:
+ r, c = view.rowcol(reg.begin())
+
+ if r < 0:
+ r, c = rowcol(view)
+
+ view.run_command("gs_goto_row_col", { "row": r, "col": c })
+
if cb:
cb(True)
+ win.focus_view(view)
+ if view.is_loading():
+ sublime.set_timeout(run, 500)
+ else:
+ run()
+
+
def focus(fn, row=0, col=0, win=None, timeout=100, focus_pat='^package ', cb=None):
sublime.set_timeout(lambda: do_focus(fn, row, col, win, focus_pat, cb), timeout)
@@ -737,6 +759,7 @@ def tm_path(name):
'doc': 'syntax/GoSublime-GsDoc.sublime-syntax',
'go': 'syntax/GoSublime-Go.sublime-syntax',
'gohtml': 'syntax/GoSublime-HTML.sublime-syntax',
+ 'hud': 'syntax/GoSublime-HUD.sublime-syntax',
}
return 'Packages/GoSublime/%s' % d[name]
@@ -789,9 +812,13 @@ def home_path(*a):
def json_decode(s, default):
try:
+ if isinstance(s, bytes):
+ s = s.decode('utf-8')
+
res = json.loads(s)
- if is_a(res, default):
+ if default is None or is_a(res, default):
return (res, '')
+
return (res, 'Unexpected value type')
except Exception as ex:
return (default, 'Decode Error: %s' % ex)
diff --git a/gosubl/margo.py b/gosubl/margo.py
index 1d5e0d71..4ea27b9c 100644
--- a/gosubl/margo.py
+++ b/gosubl/margo.py
@@ -1,27 +1,68 @@
from . import _dbg
from . import gs, gsq, sh
from .margo_agent import MargoAgent
-from .margo_common import OutputLogger, TokenCounter
-from .margo_render import render, render_src
-from .margo_state import State, actions, Config, _view_scope_lang
+from .margo_common import OutputLogger, TokenCounter, Mutex
+from .margo_render import render
+from .margo_state import State, actions, client_actions, Config, _view_scope_lang, view_is_9o, MgView
+from base64 import b64decode
from collections import namedtuple
import glob
import os
+import shlex
import sublime
import time
+import webbrowser
class MargoSingleton(object):
def __init__(self):
- self.package_dir = os.path.dirname(os.path.abspath(__file__))
+ self._ready = False
self.out = OutputLogger('margo')
self.agent_tokens = TokenCounter('agent', format='{}#{:03d}', start=6)
+ self.run_tokens = TokenCounter('9o.run')
self.agent = None
- self.enabled_for_langs = []
+ self.enabled_for_langs = ['*']
self.state = State()
self.status = []
+ self.output_handler = None
+ self._client_actions_handlers = {
+ client_actions.Activate: self._handle_act_activate,
+ client_actions.Restart: self._handle_act_restart,
+ client_actions.Shutdown: self._handle_act_shutdown,
+ client_actions.CmdOutput: self._handle_act_output,
+ client_actions.DisplayIssues: self._handle_DisplayIssues,
+ }
+ self.file_ids = []
+ self._hud_state = {}
+ self._hud_state_lock = Mutex(name='margo.MargoSingleton._hud_state_lock')
+ self.hud_name = 'GoSublime/HUD'
+ self.hud_id = self.hud_name.replace('/','-').lower()
+ self._views = {}
+ self._view_lock = Mutex(name='margo.MargoSingleton._view_lock')
+ self._gopath = ''
+
+ def _sync_settings(self):
+ old, new = self._gopath, sh.getenv('GOPATH')
+
+ if not new or new == old:
+ return
+
+ self._gopath = new
+
+ ag = self.agent
+ if not ag or new == ag.gopath:
+ return
+
+ self.out.println('Stopping agent. GOPATH changed: `%s` -> `%s`' % (ag.gopath, new))
+ self.stop(ag=ag)
def render(self, rs=None):
+ # ST has some locking issues due to its "thread-safe" API
+ # don't access things like sublime.active_view() directly
+
if rs:
+ for err in rs.state.errors:
+ self.out.println('Error: %s' % err)
+
self.state = rs.state
cfg = rs.state.config
@@ -30,21 +71,47 @@ def render(self, rs=None):
if cfg.override_settings:
gs._mg_override_settings = cfg.override_settings
- render(view=gs.active_view(), state=self.state, status=self.status)
+ def _render():
+ render(
+ mg=mg,
+ view=gs.active_view(),
+ state=self.state,
+ status=self.status,
+ )
+
+ if rs:
+ self._handle_client_actions(rs)
+
+ if rs.agent and rs.agent is not self.agent:
+ rs.agent.stop()
+
+ sublime.set_timeout(_render)
- if rs:
- if rs.agent is self.agent:
- sublime.set_timeout_async(lambda: self._handle_client_actions(rs.state.client_actions), 0)
- if rs.agent and rs.agent is not self.agent:
- rs.agent.stop()
+ def _handle_act_activate(self, rs, act):
+ gs.focus(act.name or act.path, row=act.row, col=act.col, focus_pat='')
- def _handle_client_actions(self, client_actions):
- for a in client_actions:
- if a.name == 'restart':
- self.restart()
- elif a.name == 'shutdown':
- self.stop()
+ def _handle_act_restart(self, rs, act):
+ self.restart()
+
+ def _handle_act_shutdown(self, rs, act):
+ self.stop()
+
+ def _handle_act_output(self, rs, act):
+ h = self.output_handler
+ if h:
+ h(rs, act)
+
+ def _handle_DisplayIssues(self, rs, act):
+ gs.active_view().run_command('margo_display_issues')
+
+ def _handle_client_actions(self, rs):
+ for act in rs.state.client_actions:
+ f = self._client_actions_handlers.get(act.action_name)
+ if f:
+ f(rs, act)
+ else:
+ self.out.println('Unknown client-action: %s: %s' % (act.action_name, act))
def render_status(self, *a):
self.status = list(a)
@@ -64,12 +131,17 @@ def restart(self):
self.agent = MargoAgent(self)
self.agent.start()
- def stop(self):
- a, self.agent = self.agent, None
- if a:
- a.stop()
+ def stop(self, ag=None):
+ if not ag or ag is self.agent:
+ ag, self.agent = self.agent, None
+
+ if ag:
+ ag.stop()
def enabled(self, view):
+ if not self._ready:
+ return False
+
if '*' in self.enabled_for_langs:
return True
@@ -77,8 +149,7 @@ def enabled(self, view):
return lang in self.enabled_for_langs
def can_trigger_event(self, view, allow_9o=False):
- if not self.enabled(view):
- return False
+ _pf=_dbg.pf()
if view is None:
return False
@@ -86,27 +157,190 @@ def can_trigger_event(self, view, allow_9o=False):
if view.is_loading():
return False
- vs = view.settings()
- if allow_9o and vs.get('9o'):
+ if not self.enabled(view):
+ return False
+
+ mgv = self.view(view.id(), view=view)
+ if allow_9o and mgv.is_9o:
return True
- if vs.get('is_widget'):
+ if not mgv.is_file:
return False
return True
+ def _gs_init(self):
+ self._sync_settings()
+ gs.sync_settings_callbacks.append(self._sync_settings)
+
+ for w in sublime.windows():
+ for v in w.views():
+ if v is not None:
+ self.view(v.id(), view=v)
+
+ mg._ready = True
+ mg.start()
+
+ def _hud_create_panel(self, win):
+ view = win.create_output_panel(self.hud_name)
+ if win == sublime.active_window():
+ win.focus_view(win.active_view())
+ syntax = gs.tm_path('hud')
+ settings = view.settings()
+ if settings.get('syntax') == syntax:
+ return view
+
+ view.set_syntax_file(syntax)
+ view.set_read_only(True)
+ view.set_name(self.hud_name)
+ opts = {
+ 'line_numbers': False,
+ 'gutter': False,
+ 'margin': 0,
+ 'highlight_line': False,
+ 'rulers': [],
+ 'fold_buttons': False,
+ 'scroll_past_end': False,
+ }
+ settings.erase('color_scheme')
+ for k, v in opts.items():
+ settings.set(k, v)
+
+ return view
+
+ def is_hud_view(self, view):
+ if view is None:
+ return False
+
+ return view.settings().get('syntax') == gs.tm_path('hud')
+
+ def _hud_win_state(self, win):
+ default = (None, None)
+ if win is None:
+ return default
+
+ return self._hud_state.get(win.id()) or default
+
+ def hud_panel(self, win):
+ with self._hud_state_lock:
+ view, phantoms = self._hud_win_state(win)
+ wid = win.id()
+ m = self._hud_state
+
+ if view is None:
+ view = self._hud_create_panel(win)
+ m[wid] = (view, phantoms)
+
+ if phantoms is None:
+ phantoms = sublime.PhantomSet(view, self.hud_name)
+ m[wid] = (view, phantoms)
+
+ if len(m) > 1:
+ wids = [w.id() for w in sublime.windows()]
+ for id in list(m.keys()):
+ if id not in wids:
+ del m[id]
+
+ return (view, phantoms)
+
+ def view(self, id, view=None):
+ with self._view_lock:
+ mgv = self._views.get(id)
+
+ if view is not None:
+ if mgv is None:
+ mgv = MgView(mg=self, view=view)
+ self._views[mgv.id] = mgv
+ else:
+ mgv.sync(view=view)
+
+ return mgv
+
+ def _sync_view(self, event, view):
+ if event in ('pre_close', 'close'):
+ with self._view_lock:
+ self._views.pop(view.id(), None)
+
+ return
+
+ _pf=_dbg.pf(dot=event)
+
+ file_ids = []
+ for w in sublime.windows():
+ for v in w.views():
+ file_ids.append(v.id())
+
+ self.file_ids = file_ids
+ self.view(view.id(), view=view)
+
+ with self._view_lock:
+ m = self._views
+ self._views = {k: m[k] for k in set(file_ids).intersection(set(m.keys()))}
+
def event(self, name, view, handler, args):
- allow_9o = name in (
+ if view is None:
+ return None
+
+ _pf=_dbg.pf(dot=name)
+
+
+ win = view.window()
+ if self.is_hud_view(view):
+ view = gs.active_view(win=win)
+ win.focus_view(view)
+
+ def handle_event(gt=0):
+ if gt > 0:
+ _pf.gt=gt
+
+ self._sync_view(name, view)
+
+ if not self.can_trigger_event(view):
+ return None
+
+ try:
+ return handler(*args)
+ except Exception:
+ gs.error_traceback('mg.event:%s' % handler)
+ return None
+
+ blocking = (
+ 'pre_save',
'query_completions',
)
- if not self.can_trigger_event(view, allow_9o=allow_9o):
- return None
- try:
- return handler(*args)
- except Exception:
- gs.error_traceback('mg.event:%s' % handler)
- return None
+ if name in blocking:
+ return handle_event(gt=0.100)
+
+ sublime.set_timeout(handle_event)
+
+ def _is_str(self, s):
+ return isinstance(s, str)
+
+ def _is_act(self, m):
+ return isinstance(m, dict) and self._is_str(m.get('Name'))
+
+ def _lst_of(self, l, f):
+ return isinstance(l, list) and l and len(list(filter(f, l))) == len(l)
+
+ def navigate(self, href, *, view=None, win=None):
+ if href.startswith('https://') or href.startswith('http://'):
+ gsq.launch('mg.navigate', lambda: webbrowser.open_new_tab(href))
+ return
+
+ dataPfx = 'data:application/json;base64,'
+ data = b64decode(href[len(dataPfx):]) if href.startswith(dataPfx) else href
+
+ view = gs.active_view(view=view, win=win)
+ x, err = gs.json_decode(data, None)
+ if self._is_act(x):
+ self.queue(actions=[x], view=view, delay=0.100)
+ elif self._lst_of(x, self._is_act):
+ self.queue(actions=x, view=view, delay=0.100)
+ elif self._lst_of(x, self._is_str):
+ view.window().run_command('gs9o_open', {'run': x, 'focus_view': False})
+ else:
+ self.out.println('mg.navigate: Invalid href `%s`, expected `http(s)://` or data:json`{Name: action}|[command args...]`, error: %s' % (href, err))
def agent_starting(self, ag):
if ag is not self.agent:
@@ -132,52 +366,129 @@ def _send_start(self):
if not self.agent:
self.start()
- def send(self, action={}, cb=None, view=None):
+ def queue(self, *, actions=[], view=None, delay=-1):
+ self._send_start()
+ self.agent.queue(actions=actions, view=view, delay=delay)
+
+ def send(self, *, actions=[], cb=None, view=None):
self._send_start()
- return self.agent.send(action=action, cb=cb, view=view)
+ return self.agent.send(actions=actions, cb=cb, view=view)
+
+ def on_new(self, view):
+ pass
+
+ def on_pre_close(self, view):
+ pass
def on_query_completions(self, view, prefix, locations):
- action = actions.QueryCompletions.copy()
- rs = self.send(view=view, action=action).wait(0.300)
+ _, lang = _view_scope_lang(view, 0)
+ if not lang:
+ return None
+
+ act = actions.QueryCompletions
+ if lang == 'cmd-prompt':
+ act = self._cmd_completions_act(view, prefix, locations)
+ if not act:
+ return None
+
+ view = gs.active_view(win=view.window())
+ if view is None:
+ return None
+
+ rq = self.send(view=view, actions=[act])
+ rs = rq.wait(0.500)
if not rs:
self.out.println('aborting QueryCompletions. it did not respond in time')
return None
+ if rs.error:
+ self.out.println('completion error: %s: %s' % (act, rs.error))
+ return
+
+ if rs.state.view.src:
+ self._fmt_rs(
+ view=view,
+ event='query_completions',
+ rq=rq,
+ rs=rs,
+ )
+
cl = [c.entry() for c in rs.state.completions]
opts = rs.state.config.auto_complete_opts
return (cl, opts) if opts != 0 else cl
- def on_hover(self, view, point, hover_zone):
- if hover_zone != sublime.HOVER_TEXT:
- return
+ def _cmd_completions_act(self, view, prefix, locations):
+ pos = locations[0]
+ line = view.line(pos)
+ src = view.substr(line)
+ if '#' not in src:
+ return None
+
+ i = src.index('#')
+ while src[i] == ' ' or src[i] == '#':
+ i += 1
+
+ src = src[i:]
+ pos = pos - line.begin() - i
+ name = ''
+ args = shlex.split(src)
+ if args:
+ name = args[0]
+ args = args[1:]
+
+ act = actions.QueryCmdCompletions.copy()
+ act['Data'] = {
+ 'Pos': pos,
+ 'Src': src,
+ 'Name': name,
+ 'Args': args,
+ }
+
+ return act
+
+ def on_hover(self, view, pt, zone):
+ act = actions.QueryTooltips.copy()
+ row, col = view.rowcol(pt)
+ act['Data'] = {
+ 'Row': row,
+ 'Col': col,
+ }
+ self.queue(view=view, actions=[act])
def on_activated(self, view):
- self.send(view=view, action=actions.ViewActivated)
+ self.queue(view=view, actions=[actions.ViewActivated])
def on_modified(self, view):
- self._send_start()
- self.agent.view_modified(view)
+ self.queue(view=view, actions=[actions.ViewModified])
def on_selection_modified(self, view):
- self._send_start()
- self.agent.view_pos_changed(view)
+ self.queue(view=view, actions=[actions.ViewPosChanged])
def fmt(self, view):
- return self._fmt_save(view=view, action=actions.ViewFmt, name='fmt', timeout=5.000)
+ return self._fmt_save(view=view, actions=[actions.ViewFmt], event='fmt', timeout=5.000)
def on_pre_save(self, view):
- return self._fmt_save(view=view, action=actions.ViewPreSave, name='pre-save', timeout=2.000)
+ return self._fmt_save(view=view, actions=[actions.ViewPreSave], event='pre_save', timeout=2.000)
- def _fmt_save(self, *, view, action, name, timeout):
- id_nm = '%d: %s' % (view.id(), view.file_name() or view.name())
- rq = self.send(view=view, action=action)
+ def _fmt_save(self, *, view, actions, event, timeout):
+ rq = self.send(view=view, actions=actions)
rs = rq.wait(timeout)
+ self._fmt_rs(
+ view=view,
+ event=event,
+ rq=rq,
+ rs=rs,
+ )
+
+ def _fmt_rs(self, *, view, event, rq, rs):
+ id_nm = '%d: %s' % (view.id(), view.file_name() or view.name())
+
if not rs:
- self.out.println('%s timedout on view %s' % (name, id_nm))
+ self.out.println('%s timedout on view %s' % (event, id_nm))
return
if rs.error:
- self.out.println('%s error in view %s: %s' % (name, id_nm, rs.error))
+ self.out.println('%s error in view %s: %s' % (event, id_nm, rs.error))
return
req = rq.props.get('View', {})
@@ -200,16 +511,13 @@ def _fmt_save(self, *, view, action, name, timeout):
view.run_command('margo_render_src', {'src': res_src})
def on_post_save(self, view):
- self.send(view=view, action=actions.ViewSaved)
+ self.queue(view=view, actions=[actions.ViewSaved])
def on_load(self, view):
- self.send(view=view, action=actions.ViewLoaded)
-
- def on_close(self, view):
- self.send(view=view, action=actions.ViewClosed)
+ self.on_activated(view)
def example_extension_file(self):
- return gs.dist_path('src/disposa.blue/margo/extension-example/extension-example.go')
+ return gs.dist_path('src/margo.sh/extension-example/extension-example.go')
def extension_file(self, install=False):
src_dir = gs.user_path('src', 'margo')
@@ -224,8 +532,8 @@ def ext_fn():
try:
gs.mkdirp(src_dir)
- with open('%s/margo.go' % src_dir, 'x') as f:
- s = open(self.example_extension_file(), 'r').read()
+ with open('%s/margo.go' % src_dir, 'xb') as f:
+ s = open(self.example_extension_file(), 'rb').read()
f.write(s)
except FileExistsError:
pass
@@ -238,7 +546,7 @@ def ext_fn():
mg = MargoSingleton()
def gs_init(_):
- mg.start()
+ sublime.set_timeout(mg._gs_init)
def gs_fini(_):
mg.stop()
diff --git a/gosubl/margo_agent.py b/gosubl/margo_agent.py
index 53e9e0a3..b4e01fb7 100644
--- a/gosubl/margo_agent.py
+++ b/gosubl/margo_agent.py
@@ -1,7 +1,8 @@
from . import _dbg
from . import sh, gs, gsq
-from .margo_common import TokenCounter, OutputLogger, Chan
+from .margo_common import TokenCounter, OutputLogger, Chan, Mutex
from .margo_state import State, make_props, actions
+from datetime import datetime
import os
import sublime
import subprocess
@@ -9,17 +10,24 @@
import time
ipc_codec = 'msgpack'
-
+ipc_silent_exceptions = (
+ EOFError,
+ BrokenPipeError,
+ ValueError,
+)
if ipc_codec == 'msgpack':
from .vendor import umsgpack
- ipc_dec = umsgpack.load
+ ipc_loads = umsgpack.loads
+ ipc_dec = lambda fp: umsgpack.load(fp, allow_invalid_utf8=True)
ipc_enc = umsgpack.dump
- ipc_ignore_exceptions = (umsgpack.InsufficientDataException, BrokenPipeError)
+ ipc_silent_exceptions += (
+ umsgpack.InsufficientDataException,
+ )
elif ipc_codec == 'cbor':
from .vendor.cbor_py import cbor
+ ipc_loads = cbor.loads
ipc_dec = cbor.load
ipc_enc = cbor.dump
- ipc_ignore_exceptions = (BrokenPipeError)
else:
raise Exception('impossibru')
@@ -32,7 +40,7 @@ def __init__(self, mg):
_, self.domain = mg.agent_tokens.next()
self.cookies = TokenCounter('%s,request' % self.domain)
self.proc = None
- self.lock = threading.Lock()
+ self.lock = Mutex(name='margo.MargoAgent.lock')
self.out = OutputLogger(self.domain, parent=mg.out)
self.global_handlers = {}
self.req_handlers = {}
@@ -41,30 +49,39 @@ def __init__(self, mg):
self.starting.set()
self.started = threading.Event()
self.stopped = threading.Event()
+ self._queue_ch = Chan(discard=1)
self.ready = threading.Event()
- gopaths = [
- os.path.join(sublime.packages_path(), 'User', 'margo'),
- mg.package_dir,
- ]
- psep = os.pathsep
- self._env = {
- 'GOPATH': psep.join(gopaths),
+ gopaths = (gs.user_path(), gs.dist_path())
+ psep = sh.psep
+ self.gopath = sh.getenv('GOPATH')
+ self.data_dir = gs.user_path('margo.data')
+ self._default_env = {
+ 'GOPATH': self.gopath,
+ 'MARGO_DATA_DIR': self.data_dir,
+ 'MARGO_AGENT_GO111MODULE': 'off',
+ 'MARGO_AGENT_GOPATH': psep.join(gopaths),
'PATH': psep.join([os.path.join(p, 'bin') for p in gopaths]) + psep + os.environ.get('PATH'),
}
+ gs.mkdirp(self.data_dir)
- self._mod_ev = threading.Event()
- self._mod_view = None
- self._pos_view = None
+ self._acts_lock = Mutex(name='margo.MargoAgent._acts_lock')
+ self._acts = []
def __del__(self):
self.stop()
+ def _env(self, m):
+ e = self._default_env.copy()
+ e.update(m)
+ return e
+
def stop(self):
if self.stopped.is_set():
return
self.starting.clear()
self.stopped.set()
+ self._queue_ch.close()
self.req_chan.close()
self._stop_proc()
self._release_handlers()
@@ -85,46 +102,51 @@ def run(self):
self._start_proc()
def _start_proc(self):
+ _dbg.pf(dot=self.domain)
+
self.mg.agent_starting(self)
self.out.println('starting')
- gs_gopath = sh.psep.join((gs.user_path(), gs.dist_path()))
gs_gobin = gs.dist_path('bin')
- install_cmd = ['go', 'install', '-v', 'disposa.blue/margo/cmd/margo']
+ mg_exe = 'margo.sh'
+ install_cmd = ['go', 'install', '-v','-x', mg_exe]
cmd = sh.Command(install_cmd)
- cmd.env = {
- 'GOPATH': gs_gopath,
+ cmd.env = self._env({
+ 'GOPATH': self._default_env['MARGO_AGENT_GOPATH'],
+ 'GO111MODULE': self._default_env['MARGO_AGENT_GO111MODULE'],
'GOBIN': gs_gobin,
- }
+ 'MARGO_AGENT_GOBIN': gs_gobin,
+ })
cr = cmd.run()
for v in (cr.out, cr.err, cr.exc):
if v:
self.out.println('%s:\n%s' % (install_cmd, v))
mg_cmd = [
- sh.which('margo', m={'PATH': gs_gobin}) or 'margo',
- 'sublime', '-codec', ipc_codec,
+ sh.which(mg_exe, m={'PATH': gs_gobin}) or mg_exe,
+ 'start', 'margo.sublime', '-codec', ipc_codec,
]
self.out.println(mg_cmd)
cmd = sh.Command(mg_cmd)
- cmd.env = {
- 'GOPATH': gs_gopath,
+ cmd.env = self._env({
'PATH': gs_gobin,
- }
+ })
pr = cmd.proc()
if not pr.ok:
self.stop()
self.out.println('Cannot start margo: %s' % pr.exc)
return
+ stderr = pr.p.stderr
self.proc = pr.p
gsq.launch(self.domain, self._handle_send)
- gsq.launch(self.domain, self._handle_send_mod)
+ gsq.launch(self.domain, self._handle_queue)
gsq.launch(self.domain, self._handle_recv)
gsq.launch(self.domain, self._handle_log)
self.started.set()
self.starting.clear()
self.proc.wait()
+ self._close_file(stderr)
def _stop_proc(self):
self.out.println('stopping')
@@ -132,11 +154,19 @@ def _stop_proc(self):
if not p:
return
- for f in (p.stdin, p.stdout, p.stderr):
- try:
- f.close()
- except Exception as exc:
- self.out.println(exc)
+ # stderr is closed after .wait() returns
+ for f in (p.stdin, p.stdout):
+ self._close_file(f)
+
+ def _close_file(self, f):
+ if f is None:
+ return
+
+ try:
+ f.close()
+ except Exception as exc:
+ self.out.println(exc)
+ gs.error_traceback(self.domain)
def _handle_send_ipc(self, rq):
with self.lock:
@@ -145,8 +175,6 @@ def _handle_send_ipc(self, rq):
try:
ipc_enc(rq.data(), self.proc.stdin)
exc = None
- except ipc_ignore_exceptions as e:
- exc = e
except Exception as e:
exc = e
if not self.stopped.is_set():
@@ -158,8 +186,39 @@ def _handle_send_ipc(self, rq):
rq.done(AgentRes(error='Exception: %s' % exc, rq=rq, agent=self))
- def send(self, action={}, cb=None, view=None):
- rq = AgentReq(self, action, cb=cb, view=view)
+ def _queued_acts(self, view):
+ if view is None:
+ return []
+
+ with self._acts_lock:
+ q, self._acts = self._acts, []
+
+ acts = []
+ for act, vid in q:
+ if vid == view.id():
+ acts.append(act)
+
+ return acts
+
+ def queue(self, *, actions=[], view=None, delay=-1):
+ with self._acts_lock:
+ for act in actions:
+ p = (act, view.id())
+ try:
+ self._acts.remove(p)
+ except ValueError:
+ pass
+
+ self._acts.append(p)
+
+ self._queue_ch.put(delay)
+
+ def send(self, *, actions=[], cb=None, view=None):
+ view = gs.active_view(view=view)
+ if not isinstance(actions, list):
+ raise Exception('actions must be a list, not %s' % type(actions))
+ acts = self._queued_acts(view) + actions
+ rq = AgentReq(self, acts, cb=cb, view=view)
timeout = 0.200
if not self.started.wait(timeout):
rq.done(AgentRes(error='margo has not started after %0.3fs' % (timeout), timedout=timeout, rq=rq, agent=self))
@@ -170,36 +229,16 @@ def send(self, action={}, cb=None, view=None):
return rq
- def view_modified(self, view):
- self._mod_view = view
- self._mod_ev.set()
-
- def view_pos_changed(self, view):
- self._pos_view = view
- self._mod_ev.set()
-
- def _send_mod(self):
- mod_v, self._mod_view = self._mod_view, None
- pos_v, self._pos_view = self._pos_view, None
- if mod_v is None and pos_v is None:
- return
+ def _send_acts(self):
+ view = gs.active_view()
+ acts = self._queued_acts(view)
+ if acts:
+ self.send(actions=acts, view=view).wait()
- view = pos_v
- action = actions.ViewPosChanged
- if mod_v is not None:
- action = actions.ViewModified
- view = mod_v
-
- self.send(action=action, view=view).wait()
-
- def _handle_send_mod(self):
- delay = 0.500
- while not self.stopped.is_set():
- self._mod_ev.wait(delay)
- if self._mod_ev.is_set():
- self._mod_ev.clear()
- time.sleep(delay * 1.5)
- self._send_mod()
+ def _handle_queue(self):
+ for n in self._queue_ch:
+ time.sleep(n if n >= 0 else 0.600)
+ self._send_acts()
def _handle_send(self):
for rq in self.req_chan:
@@ -247,10 +286,11 @@ def _handle_recv(self):
v = ipc_dec(self.proc.stdout) or {}
if v:
self._handle_recv_ipc(v)
- except ipc_ignore_exceptions:
+ except ipc_silent_exceptions:
pass
except Exception as e:
self.out.println('ipc: recv: %s: %s' % (e, v))
+ gs.error_traceback(self.domain)
finally:
self.stop()
@@ -286,26 +326,23 @@ def __init__(self, v={}, error='', timedout=0, rq=None, agent=None):
def set_rq(self, rq):
if self.error and rq:
- act = rq.action
- if act and act.get('Name'):
- self.error = 'action: %s, error: %s' % (act.get('Name'), self.error)
- else:
- self.error = 'error: %s' % self.error
+ self.error = 'actions: %s, error: %s' % (rq.actions_str, self.error)
def get(self, k, default=None):
return self.state.get(k, default)
class AgentReq(object):
- def __init__(self, agent, action, cb=None, view=None):
+ def __init__(self, agent, actions, cb=None, view=None):
self.start_time = time.time()
+ self.actions = actions
+ self.actions_str = ' ~> '.join(a['Name'] for a in actions)
_, cookie = agent.cookies.next()
- self.cookie = 'action:%s(%s)' % (action['Name'], cookie)
+ self.cookie = 'actions(%s),%s' % (self.actions_str, cookie)
self.domain = self.cookie
- self.action = action
self.cb = cb
self.props = make_props(view=view)
self.rs = DEFAULT_RESPONSE
- self.lock = threading.Lock()
+ self.lock = Mutex(name='margo.AgentReq.lock')
self.ev = threading.Event()
self.view = view
@@ -333,7 +370,8 @@ def data(self):
return {
'Cookie': self.cookie,
'Props': self.props,
- 'Action': self.action,
+ 'Actions': self.actions,
+ 'Sent': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f'),
}
DEFAULT_RESPONSE = AgentRes(error='default agent response')
diff --git a/gosubl/margo_common.py b/gosubl/margo_common.py
index 3f3a07fa..92dd5891 100644
--- a/gosubl/margo_common.py
+++ b/gosubl/margo_common.py
@@ -5,6 +5,23 @@
import sublime
import time
+class Mutex(object):
+ def __init__(self, *, name=''):
+ self.name = name
+ self.lck = threading.Lock()
+
+ def __enter__(self):
+ self.lock()
+
+ def __exit__(self, type, value, traceback):
+ self.unlock()
+
+ def lock(self):
+ self.lck.acquire(True)
+
+ def unlock(self):
+ self.lck.release()
+
class OutputLogger(object):
def __init__(self, domain, parent=None):
self.domain = domain
@@ -38,10 +55,10 @@ def next(self):
return self.n, self.format.format(self.name, self.n)
class Chan(object):
- def __init__(self, zero=None):
- self.lock = threading.Lock()
+ def __init__(self, *, zero=None, discard=None):
+ self.lock = Mutex(name='margo.Chan')
self.ev = threading.Event()
- self.dq = deque()
+ self.dq = deque([], maxlen=discard)
self.closed = False
self.zero = zero
diff --git a/gosubl/margo_render.py b/gosubl/margo_render.py
index 5918d824..09362e5e 100644
--- a/gosubl/margo_render.py
+++ b/gosubl/margo_render.py
@@ -1,20 +1,24 @@
+from . import about
from . import _dbg
from . import gs
from . import gspatch
-from .margo_state import view_name, view_path
+from .margo_state import ViewPathName
import sublime
+
STATUS_KEY = '#mg.Status'
-STATUS_PFX = '• '
-STATUS_SFX = ' •'
-STATUS_SEP = ' •• '
+STATUS_PFX = ' '
+STATUS_SFX = ' '
+STATUS_SEP = ' '
-def render(view, state, status=[]):
- sublime.set_timeout_async(lambda: _render(view, state, status), 0)
+def render(*, mg, view, state, status):
+ def cb():
+ _render_tooltips(view, state.tooltips)
+ _render_status(view, status + state.status)
+ _render_issues(view, state.issues)
+ _render_hud(mg=mg, state=state, view=view)
-def _render(view, state, status):
- _render_status(view, status + state.status)
- _render_issues(view, state.issues)
+ sublime.set_timeout(cb)
def _render_status(view, status):
if status:
@@ -46,28 +50,34 @@ def __init__(self, *, key, scope, icon, flags):
issue_key_pfx = '#mg.Issue.'
issue_cfg_error = IssueCfg(
key = issue_key_pfx + 'error',
- scope = 'keyword sublimelinter.mark.error region.redish',
+ scope = 'region.redish',
icon = 'Packages/GoSublime/images/issue.png',
flags = sublime.DRAW_SQUIGGLY_UNDERLINE | sublime.DRAW_NO_OUTLINE | sublime.DRAW_NO_FILL,
)
issue_cfg_warning = IssueCfg(
key = issue_key_pfx + 'warning',
- scope = 'entity sublimelinter.mark.warning region.orangish',
+ scope = 'region.orangish',
+ icon = issue_cfg_error.icon,
+ flags = issue_cfg_error.flags,
+)
+issue_cfg_notice = IssueCfg(
+ key = issue_key_pfx + 'notice',
+ scope = 'region.greenish',
icon = issue_cfg_error.icon,
flags = issue_cfg_error.flags,
)
-issue_cfg_default = issue_cfg_warning
+issue_cfg_default = issue_cfg_error
issue_cfgs = {
'error': issue_cfg_error,
'warning': issue_cfg_warning,
+ 'notice': issue_cfg_notice,
}
def _render_issues(view, issues):
regions = {cfg.key: (cfg, []) for cfg in issue_cfgs.values()}
- path = view_path(view)
- name = view_name(view)
+ vp = ViewPathName(view)
for isu in issues:
- if path == isu.path or name == isu.name:
+ if isu.match(vp):
cfg = issue_cfgs.get(isu.tag) or issue_cfg_default
regions[cfg.key][1].append(_render_issue(view, isu))
@@ -96,3 +106,124 @@ def _render_issue(view, isu):
return sublime.Region(sp, ep)
+def _render_tooltips(view, tooltips):
+ if not tooltips:
+ return
+
+ def ren(t):
+ return '''%s
''' % (t.content)
+
+ content = '''
+
+
+ %s
+
+ ''' % ''.join(ren(t) for t in tooltips)
+
+ flags = sublime.COOPERATE_WITH_AUTO_COMPLETE | sublime.HIDE_ON_MOUSE_MOVE_AWAY
+ location = -1
+ max_width = 640
+ max_height = 480
+
+ def on_navigate(href):
+ pass
+
+ def on_hide():
+ pass
+
+ view.show_popup(
+ content,
+ flags=flags,
+ location=location,
+ max_width=max_width,
+ max_height=max_height,
+ on_navigate=on_navigate,
+ on_hide=on_hide
+ )
+
+def _render_hud(*, mg, state, view):
+ html = '''
+
+
+
+
+
+
+ %s
+
+
+
+ ''' % (
+ mg.hud_id,
+ about.VERSION,
+ about.VERSION,
+ ''.join(state.hud.articles),
+ )
+ def ren(win):
+ v, phantoms = mg.hud_panel(win)
+ phantom = sublime.Phantom(
+ sublime.Region(v.size()),
+ html,
+ sublime.LAYOUT_INLINE,
+ lambda href: mg.navigate(href, view=view),
+ )
+ vp = v.viewport_position()
+ phantoms.update([phantom])
+ v.set_viewport_position(vp)
+
+ for w in sublime.windows():
+ ren(w)
diff --git a/gosubl/margo_state.py b/gosubl/margo_state.py
index 144f3b40..e225c5b2 100644
--- a/gosubl/margo_state.py
+++ b/gosubl/margo_state.py
@@ -1,14 +1,18 @@
from . import _dbg
-from . import gs, sh
+from . import gs, sh, about
from .margo_common import NS
+from os.path import basename, splitext
import os
import re
import sublime
actions = NS(**{k: {'Name': k} for k in (
'QueryCompletions',
+ 'QueryCmdCompletions',
'QueryTooltips',
'QueryIssues',
+ 'QueryUserCmds',
+ 'QueryTestCmds',
'ViewActivated',
'ViewModified',
'ViewPosChanged',
@@ -16,13 +20,56 @@
'ViewPreSave',
'ViewSaved',
'ViewLoaded',
- 'ViewClosed',
+ 'RunCmd',
)})
+client_actions = NS(**{k: k for k in (
+ 'Activate',
+ 'Restart',
+ 'Shutdown',
+ 'CmdOutput',
+ 'DisplayIssues',
+)})
+
+class MgView(sublime.View):
+ def __init__(self, *, mg, view):
+ self.mg = mg
+ self.is_9o = False
+ self.is_file = False
+ self.is_widget = False
+ self.sync(view=view)
+
+ def sync(self, *, view):
+ if view is None:
+ return
+
+ _pf=_dbg.pf(dot=self.id)
+ self.id = view.id()
+ self.view = view
+ self.name = view_name(view)
+ self.is_file = self.id in self.mg.file_ids
+ self.is_widget = not self.is_file
+
+ def __eq__(self, v):
+ return self.view == v
+
+ def __hash__(self):
+ return self.id
+
+ def __repr__(self):
+ return repr(vars(self))
+
+ def name(self):
+ return view_name(self.view)
+
class Config(object):
def __init__(self, m):
+ efl = m.get('EnabledForLangs')
+ if m and (not isinstance(efl, list) or len(efl) == 0):
+ print('MARGO BUG: EnabledForLangs is invalid.\nIt must be a non-empty list, not `%s: %s`\nconfig data: %s' % (type(efl), efl, m))
+
self.override_settings = m.get('OverrideSettings') or {}
- self.enabled_for_langs = m.get('EnabledForLangs') or []
+ self.enabled_for_langs = efl or ['*']
self.inhibit_explicit_completions = m.get('InhibitExplicitCompletions') is True
self.inhibit_word_completions = m.get('InhibitWordCompletions') is True
self.auto_complete_opts = 0
@@ -37,19 +84,61 @@ def __repr__(self):
class State(object):
def __init__(self, v={}):
self.config = Config(v.get('Config') or {})
+ self.errors = v.get('Errors') or []
self.status = v.get('Status') or []
self.view = ResView(v=v.get('View') or {})
- self.client_actions = [ClientAction(v=a) for a in (v.get('ClientActions') or [])]
self.completions = [Completion(c) for c in (v.get('Completions') or [])]
self.tooltips = [Tooltip(t) for t in (v.get('Tooltips') or [])]
self.issues = [Issue(l) for l in (v.get('Issues') or [])]
+ self.user_cmds = [UserCmd(c) for c in (v.get('UserCmds') or [])]
+ self.hud = HUD(v=v.get('HUD') or {})
+
+ self.client_actions = []
+ for ca in (v.get('ClientActions') or []):
+ CA = client_action_creators.get(ca.get('Name') or '') or ClientAction
+ self.client_actions.append(CA(v=ca))
def __repr__(self):
return repr(self.__dict__)
class ClientAction(object):
def __init__(self, v={}):
- self.name = v.get('Name') or ''
+ self.action_name = v.get('Name') or ''
+ self.action_data = v.get('Data') or {}
+
+ def __repr__(self):
+ return repr(vars(self))
+
+class ClientAction_Output(ClientAction):
+ def __init__(self, v):
+ super().__init__(v=v)
+ ad = self.action_data
+
+ self.fd = ad.get('Fd') or ''
+ self.output = ad.get('Output') or ''
+ self.close = ad.get('Close') or False
+ self.fd = ad.get('Fd') or ''
+
+ def __repr__(self):
+ return repr(vars(self))
+
+class ClientAction_Activate(ClientAction):
+ def __init__(self, v):
+ super().__init__(v=v)
+ ad = self.action_data
+
+ self.path = ad.get('Path') or ''
+ self.name = ad.get('Name') or ''
+ self.row = ad.get('Row') or 0
+ self.col = ad.get('Col') or 0
+
+ def __repr__(self):
+ return repr(vars(self))
+
+client_action_creators = {
+ client_actions.CmdOutput: ClientAction_Output,
+ client_actions.Activate: ClientAction_Activate,
+}
class Completion(object):
def __init__(self, v):
@@ -69,15 +158,41 @@ def __repr__(self):
class Tooltip(object):
def __init__(self, v):
- pass
+ self.content = v.get('Content') or ''
def __repr__(self):
return repr(self.__dict__)
-class Issue(object):
+class PathName(object):
+ def __init__(self, *, path, name):
+ self.path = path or ''
+ self.name = name or ''
+
+ def match(self, p):
+ if self.path and self.path == p.path:
+ return True
+
+ if self.name and self.name == p.name:
+ return True
+
+ return False
+
+ def __repr__(self):
+ return repr(vars(self))
+
+class ViewPathName(PathName):
+ def __init__(self, view):
+ super().__init__(
+ path = view_path(view),
+ name = view_name(view),
+ )
+
+class Issue(PathName):
def __init__(self, v):
- self.path = v.get('Path') or ''
- self.name = v.get('Name') or ''
+ super().__init__(
+ path = v.get('Path') or '',
+ name = v.get('Name') or '',
+ )
self.hash = v.get('Hash') or ''
self.row = v.get('Row') or 0
self.col = v.get('Col') or 0
@@ -98,6 +213,12 @@ def relpath(self, dir):
return os.path.relpath(self.path, dir)
+ def basename(self):
+ if not self.path:
+ return self.name
+
+ return os.path.basename(self.path)
+
class ResView(object):
def __init__(self, v={}):
self.name = v.get('Name') or ''
@@ -105,9 +226,22 @@ def __init__(self, v={}):
if isinstance(self.src, bytes):
self.src = self.src.decode('utf-8')
+class UserCmd(object):
+ def __init__(self, v={}):
+ self.title = v.get('Title') or ''
+ self.desc = v.get('Desc') or ''
+ self.name = v.get('Name') or ''
+ self.args = v.get('Args') or []
+ self.dir = v.get('Dir') or ''
+ self.prompts = v.get('Prompts') or []
+
+class HUD(object):
+ def __init__(self, v={}):
+ self.articles = v.get('Articles') or []
+
# in testing, we should be able to push 50MiB+ files constantly without noticing a performance problem
# but keep this number low (realistic source files sizes) at least until we optimize things
-MAX_VIEW_SIZE = 512 << 10
+MAX_VIEW_SIZE = 8 << 20
# TODO: only send the content when it actually changes
# TODO: do chunked copying i.e. copy e.g. 1MiB at a time
@@ -115,11 +249,11 @@ def __init__(self, v={}):
# if we attempt to copy large files because it has to convert into utf*
# which could use up to x4 to convert into the string it gives us
# and then we have to re-encode that into bytes to send it
-def make_props(view=None):
+def make_props(view=None, wd=''):
props = {
'Editor': _editor_props(view),
'Env': sh.env(),
- 'View': _view_props(view),
+ 'View': _view_props(view, wd=wd),
}
return props
@@ -132,55 +266,53 @@ def _editor_props(view):
return {
'Name': 'sublime',
'Version': sublime.version(),
+ 'Client': {
+ 'Name': 'gosublime',
+ 'Tag': about.TAG,
+ },
'Settings': sett,
}
-def _view_props(view):
- view = gs.active_view(view=view)
+def view_is_9o(view):
+ return view is not None and view.settings().get('9o')
+
+def _view_props(view, wd=''):
+ was_9o = view_is_9o(view)
+ if was_9o:
+ view = gs.active_view()
+ else:
+ view = gs.active_view(view=view)
+
if view is None:
return {}
pos = gs.sel(view).begin()
- row, col = view.rowcol(pos)
scope, lang, fn, props = _view_header(view, pos)
- wd = gs.basedir_or_cwd(fn)
-
- if lang == '9o':
- if 'prompt.9o' in scope:
- r = view.extract_scope(pos)
- pos -= r.begin()
- s = view.substr(r)
- src = s.lstrip().lstrip('#').lstrip()
- pos -= len(s) - len(src)
- src = src.rstrip()
- else:
- pos = 0
- src = ''
-
- wd = view.settings().get('9o.wd') or wd
- props['Path'] = '_.9o'
- else:
- src = _view_src(view)
+ wd = wd or gs.getwd() or gs.basedir_or_cwd(fn)
+ src = _view_src(view, lang)
props.update({
'Wd': wd,
'Pos': pos,
- 'Row': row,
- 'Col': col,
'Dirty': view.is_dirty(),
'Src': src,
})
return props
+_sanitize_view_name_pat = re.compile(r'[^-~,.@\w]')
+
def view_name(view, ext='', lang=''):
if view is None:
- return ''
+ return '_._'
+ nm = basename(view.file_name() or view.name() or '_')
+ nm, nm_ext = splitext(nm)
if not ext:
- ext = _view_ext(view, lang=lang)
-
- return 'view#' + _view_id(view) + ext
+ ext = _view_ext(view, lang=lang) or nm_ext or '._'
+ nm = 'view@%s,%s%s' % (_view_id(view), nm, ext)
+ nm = _sanitize_view_name_pat.sub('', nm)
+ return nm
def view_path(view):
if view is None:
@@ -204,7 +336,6 @@ def _view_header(view, pos):
return scope, lang, path, {
'Path': path,
'Name': view_name(view, ext=ext, lang=lang),
- 'Ext': ext,
'Hash': _view_hash(view),
'Lang': lang,
'Scope': scope,
@@ -222,23 +353,54 @@ def _view_hash(view):
return 'id=%s,change=%d' % (_view_id(view), view.change_count())
-_scope_lang_pat = re.compile(r'source[.]([^\s.]+)')
+
+_lang_by_basename = {
+ 'go.mod': 'go.mod',
+ 'go.sum': 'go.sum',
+}
+_scope_lang_pat = re.compile(r'(?:source\.\w+|source|text)[.]([^\s.]+)')
def _view_scope_lang(view, pos):
if view is None:
return ('', '')
+ _pf=_dbg.pf()
scope = view.scope_name(pos).strip().lower()
+
+ if view_is_9o(view):
+ return (scope, 'cmd-prompt')
+
+ nm = basename(view_path(view))
+ lb = _lang_by_basename.get(nm)
+ if lb:
+ return (scope, lb)
+
l = _scope_lang_pat.findall(scope)
- lang = l[-1] if l else scope.split('.')[-1]
+ if not l:
+ return (scope, '')
+
+ blacklist = (
+ 'plain',
+ 'find-in-files',
+ )
+ lang = l[-1]
+ if lang in blacklist:
+ return (scope, '')
+
return (scope, lang)
-def _view_src(view):
+def _view_src(view, lang):
if view is None:
return ''
+ if not lang:
+ return ''
+
if not view.is_dirty():
return ''
+ if view.is_loading():
+ return ''
+
if view.size() > MAX_VIEW_SIZE:
return ''
diff --git a/gosubl/margo_sublime.py b/gosubl/margo_sublime.py
index 99fdcab4..44c762c1 100644
--- a/gosubl/margo_sublime.py
+++ b/gosubl/margo_sublime.py
@@ -2,7 +2,7 @@
from . import gs
from .margo import mg
from .margo_render import render_src
-from .margo_state import actions, view_path, view_name
+from .margo_state import actions, ViewPathName
import os
import sublime
import sublime_plugin
@@ -11,35 +11,100 @@ class MargoEvents(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
return mg.event('query_completions', view, mg.on_query_completions, [view, prefix, locations])
- def on_hover(self, view, point, hover_zone):
- return mg.event('hover', view, mg.on_hover, [view, point, hover_zone])
-
- def on_activated_async(self, view):
+ def on_activated(self, view):
return mg.event('activated', view, mg.on_activated, [view])
- def on_modified_async(self, view):
+ def on_modified(self, view):
return mg.event('modified', view, mg.on_modified, [view])
- def on_selection_modified_async(self, view):
+ def on_selection_modified(self, view):
return mg.event('selection_modified', view, mg.on_selection_modified, [view])
def on_pre_save(self, view):
return mg.event('pre_save', view, mg.on_pre_save, [view])
- def on_post_save_async(self, view):
+ def on_post_save(self, view):
return mg.event('post_save', view, mg.on_post_save, [view])
- def on_load_async(self, view):
+ def on_load(self, view):
return mg.event('load', view, mg.on_load, [view])
- def on_close(self, view):
- return mg.event('close', view, mg.on_close, [view])
+ def on_new(self, view):
+ return mg.event('new', view, mg.on_new, [view])
+
+ def on_pre_close(self, view):
+ return mg.event('pre_close', view, mg.on_pre_close, [view])
+
+ def on_hover(self, view, point, hover_zone):
+ return mg.event('hover', view, mg.on_hover, [view, point, hover_zone])
class MargoRenderSrcCommand(sublime_plugin.TextCommand):
def run(self, edit, src):
render_src(self.view, edit, src)
-class MargoIssuesCommand(sublime_plugin.TextCommand):
+class MargoUserCmdsCommand(sublime_plugin.TextCommand):
+ def enabled(self):
+ return mg.enabled(self.view)
+
+ def run(self, edit, action='QueryUserCmds'):
+ act = getattr(actions, action)
+ mg.send(view=self.view, actions=[act], cb=lambda rs: self._cb(rs=rs, action=action))
+
+ def _on_done(self, *, win, cmd, prompts):
+ if len(prompts) >= len(cmd.prompts):
+ self._on_done_call(win=win, cmd=cmd, prompts=prompts)
+ return
+
+ def on_done(s):
+ prompts.append(s)
+ self._on_done(win=win, cmd=cmd, prompts=prompts)
+
+ win.show_input_panel('%d/%d %s' % (
+ len(prompts) + 1,
+ len(cmd.prompts),
+ cmd.prompts[len(prompts)-1],
+ ), '', on_done, None, None)
+
+ def _on_done_call(self, *, win, cmd, prompts):
+ win.run_command('gs9o_win_open', {
+ 'run': [cmd.name] + cmd.args,
+ 'action_data': {
+ 'Prompts': prompts,
+ 'Dir': cmd.dir,
+ },
+ 'save_hist': False,
+ 'focus_view': False,
+ 'show_view': True,
+ })
+
+ def _cb(self, *, rs, action):
+ win = self.view.window() or sublime.active_window()
+ selected = 0
+ flags = sublime.MONOSPACE_FONT
+ items = []
+ cmds = rs.state.user_cmds
+
+ for c in cmds:
+ desc = c.desc
+ if not desc:
+ desc = '`%s`' % ' '.join([c.name] + c.args)
+ if c.dir:
+ sim = gs.simple_fn(c.dir)
+ rel = os.path.relpath(c.dir, gs.active_wd(win=win))
+ if rel != '.':
+ desc += ' [ %s ]' % (rel if len(rel) < len(sim) else sim)
+ items.append([c.title, desc])
+
+ def on_done(i):
+ if i >= 0 and i < len(cmds):
+ self._on_done(win=win, cmd=cmds[i], prompts=[])
+
+ def on_highlight(i):
+ pass
+
+ win.show_quick_panel(items or ['%s returned no results' % action], on_done, flags, selected, on_highlight)
+
+class margo_display_issues(sublime_plugin.TextCommand):
def run(self, edit, **action):
if mg.enabled(self.view):
self._run()
@@ -49,20 +114,19 @@ def run(self, edit, **action):
})
def _run(self):
- mg.send(view=self.view, action=actions.QueryIssues, cb=self._cb)
+ mg.send(view=self.view, actions=[actions.QueryIssues], cb=self._cb)
def _cb(self, rs):
show_issues(self.view, rs.state.issues)
+class margo_issues(margo_display_issues):
+ pass
+
def issues_to_items(view, issues):
- path = view_path(view)
- dir = os.path.dirname(path)
- name = view_name(view)
+ vp = ViewPathName(view)
+ dir = os.path.dirname(vp.path)
index = []
- def in_view(isu):
- return isu.path == path or isu.name == name or (not isu.path and not isu.name)
-
for isu in issues:
if isu.message:
index.append(isu)
@@ -71,7 +135,7 @@ def in_view(isu):
return ([], [], -1)
def sort_key(isu):
- if in_view(isu):
+ if vp.match(isu):
return (-1, '', isu.row)
return (1, isu.relpath(dir), isu.row)
@@ -82,15 +146,28 @@ def sort_key(isu):
items = []
selected = []
for idx, isu in enumerate(index):
- if in_view(isu):
- title = 'Line %d' % (isu.row + 1)
+ if vp.match(isu):
+ title = '%s:%d' % (isu.basename(), isu.row + 1)
selected.append((abs(isu.row - row), idx))
else:
title = '%s:%d' % (isu.relpath(dir) or isu.name, isu.row + 1)
selected.append((999999999, -1))
- message = ' %s%s' % (isu.message, ' [' + isu.label + ']' if isu.label else '')
- items.append([title, message])
+ rows = [title]
+ rows.extend(s.strip() for s in isu.message.split('\n'))
+ rows.append(' '.join(
+ '[%s]' % s for s in filter(bool, (isu.tag, isu.label))
+ ))
+
+ # hack: ST sometimes decide to truncate the message because it's longer
+ # than the top row... and we don't want the message up there
+ rows[0] = rows[0].ljust(max(len(s) for s in rows))
+ items.append(rows)
+
+ # hack: if the items don't have the same length, ST throws an exception
+ n = max(len(l) for l in items)
+ for l in items:
+ l += [''] * (n - len(l))
return (items, index, min(selected)[1])
@@ -100,8 +177,12 @@ def show_issues(view, issues):
items, index, selected = issues_to_items(view, issues)
def on_done(i):
- if i < 0 or i >= len(items):
- fn = view_path(view) or view_name(view)
+ if not index or i >= len(index):
+ return
+
+ if i < 0:
+ vp = ViewPathName(view)
+ fn = vp.path or vp.name
gs.focus(fn, row=orig_row, col=orig_col, win=view.window())
return
@@ -128,5 +209,10 @@ class MargoOpenExtensionCommand(sublime_plugin.WindowCommand):
def run(self):
fn = mg.extension_file(True)
if fn:
- gs.focus(fn, focus_pat='func Margo')
+ gs.focus(fn, row=-1, focus_pat='')
+
+class margo_show_hud(sublime_plugin.WindowCommand):
+ def run(self):
+ self.window.run_command('show_panel', {'panel': 'output.%s' % mg.hud_name})
+ self.window.focus_view(self.window.active_view())
diff --git a/gosubl/mg9.py b/gosubl/mg9.py
index a5d6b510..77f0bbab 100644
--- a/gosubl/mg9.py
+++ b/gosubl/mg9.py
@@ -185,6 +185,7 @@ def install(aso_install_vesion, force_install, _reinstall=False):
'CGO_ENABLED': '0',
'GOBIN': '',
'GOPATH': install_gopath(),
+ 'GO111MODULE': 'off',
}
ev.debug('%s.build' % DOMAIN, {
diff --git a/gosubl/sh-bootstrap.go b/gosubl/sh-bootstrap.go
index a887f6dc..98a7f0b5 100644
--- a/gosubl/sh-bootstrap.go
+++ b/gosubl/sh-bootstrap.go
@@ -16,16 +16,15 @@ func main() {
m := reVer.FindStringSubmatch(rawVer)
ver := reClean.ReplaceAllString(m[1], "..")
env := map[string]string{
- "GOROOT": build.Default.GOROOT,
- "GOPATH": build.Default.GOPATH,
- "GOBIN": os.Getenv("GOBIN"),
- "PATH": os.Getenv("PATH"),
- "CGO_ENABLED": os.Getenv("CGO_ENABLED"),
+ "GOROOT": build.Default.GOROOT,
+ "GOPATH": build.Default.GOPATH,
+ "PATH": os.Getenv("PATH"),
}
+ varPat := regexp.MustCompile(`^((?:MARGO|GO|CGO)\w+)=(.+)$`)
for _, s := range os.Environ() {
- l := strings.SplitN(s, "=", 2)
- if len(l) == 2 && strings.HasPrefix(l[0], "MARGO_") {
- env[l[0]] = l[1]
+ m := varPat.FindStringSubmatch(s)
+ if len(m) == 3 {
+ env[m[1]] = m[2]
}
}
diff --git a/gosubl/sh.py b/gosubl/sh.py
index c9700a6c..bcf423d7 100644
--- a/gosubl/sh.py
+++ b/gosubl/sh.py
@@ -48,11 +48,6 @@ def proc(self):
else:
input = None
- try:
- setsid = os.setsid
- except Exception:
- setsid = None
-
out = ''
err = ''
exc = None
@@ -82,7 +77,7 @@ def proc(self):
shell=False,
env=nv,
cwd=wd,
- preexec_fn=setsid,
+ start_new_session=True,
bufsize=0
)
except Exception as e:
@@ -206,12 +201,13 @@ def gs_init(_={}):
bs_fn = gs.file_path('gosubl/sh-bootstrap.go')
bs_exe = gs.file_path('bin/gosubl-sh-bootstrap.exe')
- def run(cmd_str):
+ def run(cmd_str, *, env={}):
cmd = ShellCommand(cmd_str)
cmd.wd = root_dir
+ cmd.env = env
return cmd.run()
- cr = run('go build -o %s %s' % (bs_exe, bs_fn))
+ cr = run('go build -o %s %s' % (bs_exe, bs_fn), env={'GO111MODULE': 'off'})
if cr.exc or cr.err:
_print('error building %s: %s' % (bs_fn, cr.exc or cr.err))
@@ -321,6 +317,8 @@ def env(m={}):
m = m.copy()
del m['PATH']
+ add_path.append(gs.dist_path('bin'))
+ add_path.append(gs.user_path('bin'))
add_path.append(bin_dir())
e = st_environ.copy()
diff --git a/gosubl/vendor/umsgpack.py b/gosubl/vendor/umsgpack.py
index cd7a2037..422ec381 100644
--- a/gosubl/vendor/umsgpack.py
+++ b/gosubl/vendor/umsgpack.py
@@ -1,4 +1,4 @@
-# u-msgpack-python v2.4.1 - v at sergeev.io
+# u-msgpack-python v2.5.0 - v at sergeev.io
# https://github.com/vsergeev/u-msgpack-python
#
# u-msgpack-python is a lightweight MessagePack serializer and deserializer
@@ -31,7 +31,7 @@
# THE SOFTWARE.
#
"""
-u-msgpack-python v2.4.1 - v at sergeev.io
+u-msgpack-python v2.5.0 - v at sergeev.io
https://github.com/vsergeev/u-msgpack-python
u-msgpack-python is a lightweight MessagePack serializer and deserializer
@@ -45,13 +45,14 @@
"""
import struct
import collections
+import datetime
import sys
import io
-__version__ = "2.4.1"
+__version__ = "2.5.0"
"Module version string"
-version = (2, 4, 1)
+version = (2, 5, 0)
"Module version tuple"
@@ -71,13 +72,9 @@ def __init__(self, type, data):
Construct a new Ext object.
Args:
- type: application-defined type integer from 0 to 127
+ type: application-defined type integer
data: application-defined data byte array
- Raises:
- TypeError:
- Specified ext type is outside of 0 to 127 range.
-
Example:
>>> foo = umsgpack.Ext(0x05, b"\x01\x02\x03")
>>> umsgpack.packb({u"special stuff": foo, u"awesome": True})
@@ -87,9 +84,9 @@ def __init__(self, type, data):
Ext Object (Type: 0x05, Data: 01 02 03)
>>>
"""
- # Application ext type should be 0 <= type <= 127
- if not isinstance(type, int) or not (type >= 0 and type <= 127):
- raise TypeError("ext type out of range")
+ # Check type is type int
+ if not isinstance(type, int):
+ raise TypeError("ext type is not type integer")
# Check data is type bytes
elif sys.version_info[0] == 3 and not isinstance(data, bytes):
raise TypeError("ext data is not type \'bytes\'")
@@ -168,6 +165,11 @@ class InvalidStringException(UnpackException):
pass
+class UnsupportedTimestampException(UnpackException):
+ "Unsupported timestamp format encountered during unpacking."
+ pass
+
+
class ReservedCodeException(UnpackException):
"Reserved code encountered during unpacking."
pass
@@ -341,6 +343,29 @@ def _pack_ext(obj, fp, options):
raise UnsupportedTypeException("huge ext data")
+def _pack_ext_timestamp(obj, fp, options):
+ delta = obj - _epoch
+ seconds = delta.seconds + delta.days * 86400
+ microseconds = delta.microseconds
+
+ if microseconds == 0 and 0 <= seconds <= 2**32 - 1:
+ # 32-bit timestamp
+ fp.write(b"\xd6\xff" +
+ struct.pack(">I", seconds))
+ elif 0 <= seconds <= 2**34 - 1:
+ # 64-bit timestamp
+ value = ((microseconds * 1000) << 34) | seconds
+ fp.write(b"\xd7\xff" +
+ struct.pack(">Q", value))
+ elif -2**63 <= abs(seconds) <= 2**63 - 1:
+ # 96-bit timestamp
+ fp.write(b"\xc7\x0c\xff" +
+ struct.pack(">I", microseconds * 1000) +
+ struct.pack(">q", seconds))
+ else:
+ raise UnsupportedTypeException("huge timestamp")
+
+
def _pack_array(obj, fp, options):
if len(obj) <= 15:
fp.write(struct.pack("B", 0x90 | len(obj)))
@@ -428,6 +453,8 @@ def _pack2(obj, fp, **options):
_pack_array(obj, fp, options)
elif isinstance(obj, dict):
_pack_map(obj, fp, options)
+ elif isinstance(obj, datetime.datetime):
+ _pack_ext_timestamp(obj, fp, options)
elif isinstance(obj, Ext):
_pack_ext(obj, fp, options)
elif ext_handlers:
@@ -498,6 +525,8 @@ def _pack3(obj, fp, **options):
_pack_array(obj, fp, options)
elif isinstance(obj, dict):
_pack_map(obj, fp, options)
+ elif isinstance(obj, datetime.datetime):
+ _pack_ext_timestamp(obj, fp, options)
elif isinstance(obj, Ext):
_pack_ext(obj, fp, options)
elif ext_handlers:
@@ -584,10 +613,26 @@ def _packb3(obj, **options):
def _read_except(fp, n):
- data = fp.read(n)
- if len(data) < n:
- raise InsufficientDataException()
- return data
+ # when reading from files, networks, etc. there's no guarantee that a read(n)
+ # will return n bytes, so we must keep reading until we get n bytes
+ data = None
+ for _ in range(1000):
+ s = fp.read(n)
+ n -= len(s)
+
+ if data is None:
+ data = s
+ else:
+ data += s
+
+ if n <= 0:
+ return data
+
+ if len(s) == 0:
+ # AFAIK, Python won't return 0 bytes unless we reached EOF
+ raise InsufficientDataException()
+
+ raise InsufficientDataException()
def _unpack_integer(code, fp, options):
@@ -703,16 +748,46 @@ def _unpack_ext(code, fp, options):
else:
raise Exception("logic error, not ext: 0x%02x" % ord(code))
- ext = Ext(ord(_read_except(fp, 1)), _read_except(fp, length))
+ ext_type = struct.unpack("b", _read_except(fp, 1))[0]
+ ext_data = _read_except(fp, length)
+
+ # Create extension object
+ ext = Ext(ext_type, ext_data)
# Unpack with ext handler, if we have one
ext_handlers = options.get("ext_handlers")
if ext_handlers and ext.type in ext_handlers:
- ext = ext_handlers[ext.type](ext)
+ return ext_handlers[ext.type](ext)
+
+ # Timestamp extension
+ if ext.type == -1:
+ return _unpack_ext_timestamp(ext, options)
return ext
+def _unpack_ext_timestamp(ext, options):
+ if len(ext.data) == 4:
+ # 32-bit timestamp
+ seconds = struct.unpack(">I", ext.data)[0]
+ microseconds = 0
+ elif len(ext.data) == 8:
+ # 64-bit timestamp
+ value = struct.unpack(">Q", ext.data)[0]
+ seconds = value & 0x3ffffffff
+ microseconds = (value >> 34) // 1000
+ elif len(ext.data) == 12:
+ # 96-bit timestamp
+ seconds = struct.unpack(">q", ext.data[4:12])[0]
+ microseconds = struct.unpack(">I", ext.data[0:4])[0] // 1000
+ else:
+ raise UnsupportedTimestampException(
+ "unsupported timestamp with data length %d" % len(ext.data))
+
+ return _epoch + datetime.timedelta(seconds=seconds,
+ microseconds=microseconds)
+
+
def _unpack_array(code, fp, options):
if (ord(code) & 0xf0) == 0x90:
length = (ord(code) & ~0xf0)
@@ -801,6 +876,8 @@ def _unpack2(fp, **options):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
+ UnsupportedTimestampException(UnpackException):
+ Unsupported timestamp format encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
@@ -843,6 +920,8 @@ def _unpack3(fp, **options):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
+ UnsupportedTimestampException(UnpackException):
+ Unsupported timestamp format encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
@@ -888,6 +967,8 @@ def _unpackb2(s, **options):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
+ UnsupportedTimestampException(UnpackException):
+ Unsupported timestamp format encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
@@ -934,6 +1015,8 @@ def _unpackb3(s, **options):
Insufficient data to unpack the serialized object.
InvalidStringException(UnpackException):
Invalid UTF-8 string encountered during unpacking.
+ UnsupportedTimestampException(UnpackException):
+ Unsupported timestamp format encountered during unpacking.
ReservedCodeException(UnpackException):
Reserved code encountered during unpacking.
UnhashableKeyException(UnpackException):
@@ -966,6 +1049,8 @@ def __init():
global load
global loads
global compatibility
+ global _epoch
+ global _utc_tzinfo
global _float_precision
global _unpack_dispatch_table
global xrange
@@ -973,6 +1058,14 @@ def __init():
# Compatibility mode for handling strings/bytes with the old specification
compatibility = False
+ if sys.version_info[0] == 3:
+ _utc_tzinfo = datetime.timezone.utc
+ else:
+ _utc_tzinfo = None
+
+ # Calculate epoch datetime
+ _epoch = datetime.datetime(1970, 1, 1, tzinfo=_utc_tzinfo)
+
# Auto-detect system float precision
if sys.float_info.mant_dig == 53:
_float_precision = "double"
diff --git a/gs9o.py b/gs9o.py
index 39be5bee..ecafffe7 100644
--- a/gs9o.py
+++ b/gs9o.py
@@ -4,6 +4,8 @@
from .gosubl import gsshell
from .gosubl import mg9
from .gosubl import sh
+from .gosubl.margo import mg
+from .gosubl.margo_state import actions
import datetime
import json
import os
@@ -18,9 +20,9 @@
DOMAIN = "9o"
AC_OPTS = sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
SPLIT_FN_POS_PAT = re.compile(r'(.+?)(?:[:](\d+))?(?:[:](\d+))?$')
-URL_SCHEME_PAT = re.compile(r'^[\w.+-]+://')
-URL_PATH_PAT = re.compile(r'^(?:[\w.+-]+://|(?:www|(?:\w+\.)*(?:golang|pkgdoc|gosublime)\.org))')
-HIST_EXPAND_PAT = re.compile(r'^(\^+)\s*(\d+)$')
+URL_SCHEME_PAT = re.compile(r'[\w.+-]+://')
+URL_PATH_PAT = re.compile(r'(?:[\w.+-]+://|(?:www|(?:\w+\.)*(?:golang|pkgdoc|gosublime)\.org))')
+HIST_EXPAND_PAT = re.compile(r'^[\'"\s]*(\^+)\s*(\d+)[\'"\s]*$')
HOURGLASS = u'\u231B'
@@ -30,9 +32,6 @@
'build',
'replay',
'clear',
- 'tskill',
- 'tskill replay',
- 'tskill go',
'go',
'go build',
'go clean',
@@ -58,25 +57,45 @@
]
DEFAULT_CL = [(s, s+' ') for s in DEFAULT_COMMANDS]
-stash = {}
-tid_alias = {}
+try:
+ stash
+except NameError:
+ stash = {}
+
+try:
+ tid_alias
+except NameError:
+ tid_alias = {}
def active_wd(win=None):
_, v = gs.win_view(win=win)
return gs.basedir_or_cwd(v.file_name() if v else '')
+_9o_instance_default = '9o'
+
+def _9o_instance(wd):
+ name = gs.setting('9o_instance') or _9o_instance_default
+ if name == 'auto':
+ name = wd or name
+
+ return name.replace('#', '~')
+
+def _rkey(wd):
+ _, rkey = mg.run_tokens.next()
+ return rkey.replace('#', '~')
+
+def _rcmd_wdid_rkey(*, fd):
+ l = fd.split('#', 1)
+ return (l[0], l[1]) if len(l) == 2 else (_wdid(_9o_instance_default), l[0])
+
+def _rcmd_fd(*, wd, rkey):
+ return '%s#%s' % (_wdid(wd), rkey)
+
def _hkey(wd):
- name = gs.setting("9o_instance")
- if name:
- wd = name
- return '9o.hist.%s' % wd
+ return '9o.hist.%s' % _9o_instance(wd)
def _wdid(wd):
- name = gs.setting("9o_instance")
- if name:
- return name
- return '9o://%s' % wd
-
+ return '9o://%s' % _9o_instance(wd)
class EV(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
@@ -164,6 +183,7 @@ def run(self, edit, up):
class Gs9oInitCommand(sublime_plugin.TextCommand):
def run(self, edit, wd=None):
v = self.view
+ mg.view(v.id(), view=v).is_9o = True
vs = v.settings()
if not wd:
@@ -232,45 +252,62 @@ def run(self, edit, wd=None):
os.chdir(wd)
class Gs9oOpenCommand(sublime_plugin.TextCommand):
- def run(self, edit, wd=None, run=[], save_hist=False, focus_view=True):
- self.view.window().run_command('gs9o_win_open', {
- 'wd': wd,
- 'run': run,
- 'save_hist': save_hist,
- 'focus_view': focus_view,
- })
+ def run(self, edit, **kw):
+ win = self.view.window() or sublime.active_window()
+ win.run_command('gs9o_win_open', kw)
class Gs9oWinOpenCommand(sublime_plugin.WindowCommand):
- def run(self, wd=None, run=[], save_hist=False, focus_view=True):
+ def run(
+ self,
+ wd = None,
+ run = [],
+ save_hist = False,
+ focus_view = True,
+ show_view = True,
+ env = {},
+ push_output = [],
+ wdid = '',
+ action_data={},
+ ):
win = self.window
wid = win.id()
if not wd:
wd = active_wd(win=win)
- id = _wdid(wd)
+ id = wdid or _wdid(wd)
st = stash.setdefault(wid, {})
v = st.get(id)
if v is None:
v = win.get_output_panel(id)
st[id] = v
- win.run_command("show_panel", {"panel": ("output.%s" % id)})
+ if show_view:
+ win.run_command("show_panel", {"panel": ("output.%s" % id)})
if focus_view:
win.focus_view(v)
- v.run_command('gs9o_init', {'wd': wd})
+ if not push_output:
+ v.run_command('gs9o_init', {'wd': wd})
+
+ if push_output:
+ v.run_command('gs9o_push_output', push_output)
if run:
- v.run_command('gs9o_paste_exec', {'cmd': ' '.join(run), 'save_hist': save_hist})
+ v.run_command('gs9o_paste_exec', {
+ 'cmd': ' '.join(shlex.quote(s) for s in run),
+ 'save_hist': save_hist,
+ 'env': env,
+ 'action_data': action_data,
+ })
class Gs9oPasteExecCommand(sublime_plugin.TextCommand):
- def run(self, edit, cmd, save_hist=False):
+ def run(self, edit, cmd, save_hist=False, env={}, action_data={}):
view = self.view
view.insert(edit, view.line(view.size()-1).end(), cmd)
view.sel().clear()
view.sel().add(view.line(view.size()-1).end())
- view.run_command('gs9o_exec', {'save_hist': save_hist})
+ view.run_command('gs9o_exec', {'save_hist': save_hist, 'env': env, 'action_data': action_data})
class Gs9oOpenSelectionCommand(sublime_plugin.TextCommand):
def is_enabled(self):
@@ -309,20 +346,20 @@ def act_on_path(view, path):
row = 0
col = 0
- m = gs.VFN_ID_PAT.match(path)
+ m = gs.VFN_ID_PAT.search(path)
if m:
path = 'gs.view://%s' % m.group(1)
- m2 = gs.ROWCOL_PAT.match(m.group(2))
+ m2 = gs.ROWCOL_PAT.search(m.group(2))
if m2:
row = int(m2.group(1))-1 if m2.group(1) else 0
col = int(m2.group(2))-1 if m2.group(2) else 0
else:
- if URL_PATH_PAT.match(path):
+ if URL_PATH_PAT.search(path):
if path.lower().startswith('gs.packages://'):
path = os.path.join(gs.packages_dir(), path[14:])
else:
try:
- if not URL_SCHEME_PAT.match(path):
+ if not URL_SCHEME_PAT.search(path):
path = 'http://%s' % path
gs.notify(DOMAIN, 'open url: %s' % path)
webbrowser.open_new_tab(path)
@@ -346,18 +383,12 @@ def act_on_path(view, path):
return False
-
-def _exparg(s, m):
- s = string.Template(s).safe_substitute(m)
- s = os.path.expanduser(s)
- return s
-
class Gs9oExecCommand(sublime_plugin.TextCommand):
def is_enabled(self):
pos = gs.sel(self.view).begin()
return self.view.score_selector(pos, 'text.9o') > 0
- def run(self, edit, save_hist=False):
+ def run(self, edit, save_hist=False, env={}, action_data={}):
view = self.view
pos = gs.sel(view).begin()
line = view.line(pos)
@@ -402,12 +433,14 @@ def run(self, edit, save_hist=False):
view.run_command('gs9o_init')
return
+ rkey = _rkey(wd)
+ # view.add_regions(rkey, [sublime.Region(line.begin(), view.size())], '')
+ view.add_regions(rkey, [line], '')
view.replace(edit, line, (u'[ `%s` %s ]' % (cmd, HOURGLASS)))
- rkey = '9o.exec.%s' % uuid.uuid4()
- view.add_regions(rkey, [sublime.Region(line.begin(), view.size())], '')
view.run_command('gs9o_init')
nv = sh.env()
+ nv.update(env)
anv = nv.copy()
seen = {}
am = aliases()
@@ -432,56 +465,80 @@ def run(self, edit, save_hist=False):
cmd = string.Template(alias).safe_substitute(anv)
if nm != 'sh':
+ args = []
+ if ag:
+ args = shlex.split(gs.astr(ag))
+
f = builtins().get(nm)
if f:
- args = []
- if ag:
- args = [_exparg(s, nv) for s in shlex.split(gs.astr(ag))]
-
- f(view, edit, args, wd, rkey)
+ try:
+ f(view, edit, args, wd, rkey, action_data=action_data)
+ except TypeError:
+ f(view, edit, args, wd, rkey)
+ return
+ else:
+ _rcmd(
+ view=view,
+ edit=edit,
+ name=nm,
+ args=args,
+ wd=wd,
+ rkey=rkey,
+ action_data=action_data,
+ )
return
if nm == 'sh':
args = sh.cmd(ag)
+ cmd_sh(view, edit, args, wd, rkey)
else:
args = sh.cmd(cmd)
-
- cmd_sh(view, edit, args, wd, rkey)
else:
view.insert(edit, gs.sel(view).begin(), '\n')
class Gs9oPushOutput(sublime_plugin.TextCommand):
- def run(self, edit, rkey, output, hourglass_repl=''):
+ def run(self, edit, rkey, output, hourglass_repl='', done=True):
view = self.view
output = '\t%s' % gs.ustr(output).strip().replace('\r', '').replace('\n', '\n\t')
+ xpos, vpos = view.viewport_position()
+ ypos = view.layout_extent()[1] - vpos
regions = view.get_regions(rkey)
if regions:
- line = view.line(regions[0].begin())
- lsrc = view.substr(line).replace(HOURGLASS, (hourglass_repl or '| done'))
- view.replace(edit, line, lsrc)
- r = line
+ prompt = view.line(regions[0].begin())
+ if done:
+ lsrc = view.substr(prompt).replace(HOURGLASS, (hourglass_repl or '| done'))
+ view.replace(edit, prompt, lsrc)
+
+ regions = view.get_regions(rkey)
+ r = view.line(regions[-1].end())
if output.strip():
- line = view.line(regions[0].begin())
- view.insert(edit, line.end(), '\n%s' % output)
- r = view.get_regions(rkey)[0]
+ n = view.insert(edit, r.end(), '\n%s' % output)
+ r = sublime.Region(prompt.begin(), r.end() + n)
+ view.erase_regions(rkey)
+ view.add_regions(rkey, [r])
else:
n = view.size()
view.insert(edit, n, '\n%s' % output)
r = sublime.Region(n, view.size())
- if gs.setting('9o_show_end') is True:
- view.show(r.end())
- else:
- view.show(r.begin())
+ if done:
+ if gs.setting('9o_show_end') is True:
+ view.show(r.end(), True)
+ else:
+ view.show(r.begin(), True)
+ elif gs.sel(view).begin() >= r.begin():
+ ypos = view.layout_extent()[1] - ypos
+ view.set_viewport_position((xpos, ypos), False)
class Gs9oRunManyCommand(sublime_plugin.TextCommand):
- def run(self, edit, wd=None, commands=[], save_hist=False, focus_view=False):
+ def run(self, edit, wd=None, commands=[], save_hist=False, focus_view=False, show_view=True):
for run in commands:
self.view.run_command("gs9o_open", {
'run': run,
'wd': wd,
'save_hist': save_hist,
'focus_view': focus_view,
+ 'show_view': show_view,
})
def aliases():
@@ -499,15 +556,13 @@ def builtins():
return m
-def push_output(view, rkey, output, hourglass_repl=''):
- def f():
- view.run_command('gs9o_push_output', {
- 'rkey': rkey,
- 'output': output,
- 'hourglass_repl': hourglass_repl,
- })
-
- sublime.set_timeout(f, 0)
+def push_output(view, rkey, output, hourglass_repl='', done=True):
+ view.run_command('gs9o_push_output', {
+ 'rkey': rkey,
+ 'output': output,
+ 'hourglass_repl': hourglass_repl,
+ 'done': done,
+ })
def _save_all(win, wd):
if gs.setting('autosave') is True and win is not None:
@@ -515,7 +570,7 @@ def _save_all(win, wd):
try:
fn = v.file_name()
if fn and v.is_dirty() and fn.endswith('.go') and os.path.dirname(fn) == wd:
- v.run_command('gs_fmt_save')
+ v.run_command('save')
except Exception:
gs.error_traceback(DOMAIN)
@@ -550,17 +605,37 @@ def f():
return cid, cb
-def cmd_margo_reinstall(view, edit, args, wd, rkey):
- def cb():
- gs.del_attr(mg9._inst_name())
- out = mg9.install('', True, True)
- gs.notify(DOMAIN, 'MarGo re-installed done')
- push_output(view, rkey, out)
-
- gsq.launch(DOMAIN, cb)
-
-def cmd_echo(view, edit, args, wd, rkey):
- push_output(view, rkey, ' '.join(args))
+def _rcmd_output_handler(rs, act):
+ wdid, rkey = _rcmd_wdid_rkey(fd=act.fd)
+ sublime.active_window().run_command('gs9o_win_open', {
+ 'wdid': wdid,
+ 'focus_view': False,
+ 'show_view': False,
+ 'push_output': {
+ 'rkey': rkey,
+ 'output': act.output,
+ 'done': act.close,
+ },
+ })
+
+mg.output_handler = _rcmd_output_handler
+
+def _rcmd(*, view, edit, name, args, wd, rkey, action_data={}):
+ def cb(rs):
+ if rs.error:
+ push_output(view, rkey, rs.error)
+
+ wd = action_data.get('Dir') or wd
+ act = actions.RunCmd.copy()
+ act['Data'] = {
+ 'Fd': _rcmd_fd(wd=wd, rkey=rkey),
+ 'Name': name,
+ 'Args': args,
+ }
+ act['Data'].update(action_data)
+ # `view` is the 9o view, but the command wants the `active/editor view`
+ run_view = None
+ mg.send(view=run_view, cb=cb, actions=[act])
def cmd_which(view, edit, args, wd, rkey):
l = []
@@ -596,7 +671,9 @@ def cmd_cd(view, edit, args, wd, rkey):
wd = args[0]
wd = string.Template(wd).safe_substitute(sh.env())
wd = os.path.expanduser(wd)
+ print('>'+wd)
wd = os.path.abspath(wd)
+ print('<'+wd)
else:
fn = view.window().active_view().file_name()
if fn:
@@ -614,24 +691,23 @@ def cmd_reset(view, edit, args, wd, rkey):
push_output(view, rkey, '')
view.erase(edit, sublime.Region(0, view.size()))
view.run_command('gs9o_init')
+ if args:
+ view.run_command('gs9o_paste_exec', {'cmd': ' '.join(args), 'save_hist': False})
def cmd_clear(view, edit, args, wd, rkey):
cmd_reset(view, edit, args, wd, rkey)
-def cmd_go(view, edit, args, wd, rkey):
+def cmd_go(view, edit, args, wd, rkey, action_data={}):
_save_all(view.window(), wd)
-
- cid, cb = _9_begin_call('go', view, edit, args, wd, rkey, '9go-%s' % wd)
- a = {
- 'cid': cid,
- 'env': sh.env(),
- 'cwd': wd,
- 'cmd': {
- 'name': 'go',
- 'args': args,
- }
- }
- sublime.set_timeout(lambda: mg9.acall('sh', a, cb), 0)
+ sublime.set_timeout_async(lambda: _rcmd(
+ view=view,
+ edit=edit,
+ name='go',
+ args=args,
+ wd=wd,
+ rkey=rkey,
+ action_data=action_data,
+ ))
def cmd_cancel_replay(view, edit, args, wd, rkey):
cid = ''
@@ -649,12 +725,12 @@ def cmd_cancel_replay(view, edit, args, wd, rkey):
mg9.acall('kill', {'cid': cid}, None)
push_output(view, rkey, '')
-def cmd_sh(view, edit, args, wd, rkey):
+def cmd_sh(view, edit, args, wd, rkey, action_data={}):
cid, cb = _9_begin_call('sh', view, edit, args, wd, rkey, '')
a = {
'cid': cid,
'env': sh.env(),
- 'cwd': wd,
+ 'cwd': action_data.get('Dir') or wd,
'cmd': {
'name': args[0],
'args': args[1:],
@@ -678,61 +754,16 @@ def cmd_help(view, edit, args, wd, rkey):
gs.focus(gs.dist_path('9o.md'))
push_output(view, rkey, '')
-def cmd_run(view, edit, args, wd, rkey):
- cmd_9(view, edit, gs.lst('run', args), wd, rkey)
-
def cmd_replay(view, edit, args, wd, rkey):
- cmd_9(view, edit, gs.lst('replay', args), wd, rkey)
-
-def cmd_build(view, edit, args, wd, rkey):
- cmd_9(view, edit, gs.lst('build', args), wd, rkey)
-
-def cmd_9(view, edit, args, wd, rkey):
- if len(args) == 0 or args[0] not in ('run', 'replay', 'build'):
- push_output(view, rkey, ('9: invalid args %s' % args))
- return
-
- subcmd = args[0]
- cid = ''
- if subcmd == 'replay':
- cid = '9replay-%s' % wd
- cid, cb = _9_begin_call(subcmd, view, edit, args, wd, rkey, cid)
-
- a = {
- 'cid': cid,
- 'env': sh.env(),
- 'dir': wd,
- 'args': args[1:],
- 'build_only': (subcmd == 'build'),
- }
-
- win = view.window()
- if win is not None:
- av = win.active_view()
- if av is not None:
- fn = av.file_name()
- if fn:
- _save_all(win, wd)
- else:
- if gs.is_go_source_view(av, False):
- a['fn'] = gs.view_fn(av)
- a['src'] = av.substr(sublime.Region(0, av.size()))
-
- sublime.set_timeout(lambda: mg9.acall('play', a, cb), 0)
-
-def cmd_tskill(view, edit, args, wd, rkey):
- if len(args) == 0:
- sublime.set_timeout(lambda: sublime.active_window().run_command("gs_show_tasks"), 0)
- push_output(view, rkey, '')
- return
-
- l = []
- for tid in args:
- tid = tid.lstrip('#')
- tid = tid_alias.get('%s-%s' % (tid, wd), tid)
- l.append('kill %s: %s' % (tid, ('yes' if gs.cancel_task(tid) else 'no')))
-
- push_output(view, rkey, '\n'.join(l))
+ _save_all(view.window(), wd)
+ _rcmd(
+ view=view,
+ edit=edit,
+ name='go.replay',
+ args=args,
+ wd=wd,
+ rkey=rkey,
+ )
def _env_settings(d, view, edit, args, wd, rkey):
if len(args) > 0:
diff --git a/gscommands.py b/gscommands.py
index 90518880..82df256b 100644
--- a/gscommands.py
+++ b/gscommands.py
@@ -82,7 +82,9 @@ def run(self, edit, row, col=0):
r = sublime.Region(pt, pt)
self.view.sel().clear()
self.view.sel().add(r)
- self.view.show(pt)
+ self.view.show(pt, True)
+ xpos, ypos = self.view.viewport_position()
+ self.view.set_viewport_position((0, ypos), False)
dmn = 'gs.focus.%s:%s:%s' % (gs.view_fn(self.view), row, col)
flags = sublime.DRAW_EMPTY_AS_OVERWRITE
show = lambda: self.view.add_regions(dmn, [r], 'comment', 'bookmark', flags)
@@ -117,7 +119,7 @@ def run(self):
gs.error_traceback('GsNewGoFile')
self.window.new_file().run_command('gs_create_new_go_file', {
- 'pkg_name': pkg_name,
+ 'pkg_name': '',
'file_name': 'main.go',
})
@@ -126,9 +128,17 @@ def run(self, edit, pkg_name, file_name):
view = self.view
view.set_name(file_name)
view.set_syntax_file(gs.tm_path('go'))
- view.replace(edit, sublime.Region(0, view.size()), 'package %s\n' % pkg_name)
- view.sel().clear()
- view.sel().add(view.find(pkg_name, 0, sublime.LITERAL))
+ if pkg_name == '':
+ view.sel().add(sublime.Region(0, 0))
+ view.run_command('auto_complete', {
+ 'api_completions_only': True,
+ 'disable_auto_insert': True,
+ 'next_completion_if_showing': False,
+ })
+ else:
+ view.replace(edit, sublime.Region(0, view.size()), 'package %s\n' % pkg_name)
+ view.sel().clear()
+ view.sel().add(view.find(pkg_name, 0, sublime.LITERAL))
class GsShowTasksCommand(sublime_plugin.WindowCommand):
def run(self):
diff --git a/gsev.py b/gsev.py
index a0dfdb66..554605f8 100644
--- a/gsev.py
+++ b/gsev.py
@@ -6,36 +6,6 @@
DOMAIN = 'GsEV'
-class UncleSam(object):
- def __init__(self):
- self.phantoms = None
-
- def on_load(self, view):
- if view.file_name() != gs.dist_path('CHANGELOG.md'):
- return
-
- self.phantoms = sublime.PhantomSet(view, 'gs.uncle-sam')
- self.phantoms.update([sublime.Phantom(
- sublime.Region(-1, -1),
- '''
-
-
-
-
-
- '''.format(
- url='https://margo.sh/gosublime-future',
- src=gs.dist_path('images/fight-the-future.png')
- ),
- sublime.LAYOUT_INLINE,
- self._on_click
- )])
-
- def _on_click(self, url):
- webbrowser.open_new_tab(url)
-
-uncle_sam = UncleSam()
-
class EV(sublime_plugin.EventListener):
def on_pre_save(self, view):
view.run_command('gs_fmt')
@@ -55,14 +25,16 @@ def on_activated(self, view):
def on_load(self, view):
sublime.set_timeout(lambda: do_set_gohtml_syntax(view), 0)
- sublime.set_timeout_async(lambda: uncle_sam.on_load(view), 0)
class GsOnLeftClick(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
if gs.is_go_source_view(view):
- if not gstest.handle_action(view, 'left-click'):
- view.run_command('gs_doc', {"mode": "goto"})
+ view.run_command('gs9o_open', {
+ "run": [".actuate", "-button=left"],
+ "focus_view": False,
+ "show_view": False,
+ })
elif view.score_selector(gs.sel(view).begin(), "text.9o") > 0:
view.window().run_command("gs9o_open_selection")
@@ -70,8 +42,11 @@ class GsOnRightClick(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
if gs.is_go_source_view(view):
- if not gstest.handle_action(view, 'right-click'):
- view.run_command('gs_doc', {"mode": "hint"})
+ view.run_command('gs9o_open', {
+ "run": [".actuate", "-button=right"],
+ "focus_view": False,
+ "show_view": False,
+ })
def do_post_save(view):
if not gs.is_pkg_view(view):
diff --git a/something_borrowed/Go/Go.sublime-syntax b/something_borrowed/Go/Go-Copy.sublime-syntax
similarity index 99%
rename from something_borrowed/Go/Go.sublime-syntax
rename to something_borrowed/Go/Go-Copy.sublime-syntax
index 56e5c7bf..1d2815b3 100644
--- a/something_borrowed/Go/Go.sublime-syntax
+++ b/something_borrowed/Go/Go-Copy.sublime-syntax
@@ -1,7 +1,7 @@
%YAML 1.2
---
# http://www.sublimetext.com/docs/3/syntax.html
-name: Go
+name: 'GoSublime: Go (Copy)'
file_extensions:
- go
first_line_match: "-[*]-( Mode:)? Go -[*]-"
diff --git a/something_borrowed/Go/generate.go b/something_borrowed/Go/generate.go
index 47a906b6..fae7048b 100644
--- a/something_borrowed/Go/generate.go
+++ b/something_borrowed/Go/generate.go
@@ -6,6 +6,7 @@
package main
import (
+ "bytes"
"fmt"
"io/ioutil"
"net/http"
@@ -18,6 +19,7 @@ type dlFile struct {
name string
url string
dirs []string
+ filt func(s []byte) []byte
}
func main() {
@@ -36,6 +38,9 @@ func main() {
name: "Go.sublime-syntax",
url: "https://raw.githubusercontent.com/sublimehq/Packages/master/Go/Go.sublime-syntax",
dirs: []string{"."},
+ filt: func(s []byte) []byte {
+ return bytes.Replace(s, []byte("name: Go"), []byte("name: 'GoSublime: Go (Copy)'"), -1)
+ },
},
}
for _, f := range urls {
@@ -64,6 +69,10 @@ func dl(f dlFile) {
return
}
+ if f.filt != nil {
+ content = f.filt(content)
+ }
+
for _, dir := range f.dirs {
ioutil.WriteFile(filepath.Join(dir, f.name), content, 0644)
if err != nil {
diff --git a/src/disposa.blue/margo/.travis.yml b/src/disposa.blue/margo/.travis.yml
deleted file mode 100644
index 71c75fc7..00000000
--- a/src/disposa.blue/margo/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-go_import_path: disposa.blue/margo
-language: go
-go:
- - 1.9
- - 1.x
-
-script:
- - go test -race -v ./...
diff --git a/src/disposa.blue/margo/Gopkg.lock b/src/disposa.blue/margo/Gopkg.lock
deleted file mode 100644
index 3a538bbd..00000000
--- a/src/disposa.blue/margo/Gopkg.lock
+++ /dev/null
@@ -1,27 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- branch = "master"
- name = "github.com/ugorji/go"
- packages = ["codec"]
- revision = "16f09ef744fd4227190f626f14cfdefb14362b3b"
-
-[[projects]]
- name = "github.com/urfave/cli"
- packages = ["."]
- revision = "cfb38830724cc34fedffe9a2a29fb54fa9169cd1"
- version = "v1.20.0"
-
-[[projects]]
- branch = "master"
- name = "golang.org/x/crypto"
- packages = ["blake2b"]
- revision = "91a49db82a88618983a78a06c1cbd4e00ab749ab"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- inputs-digest = "abcbbea5d46799cef82db8ef20f2f68d7d9580d20996b07df2f68d66cad13a49"
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/src/disposa.blue/margo/Gopkg.toml b/src/disposa.blue/margo/Gopkg.toml
deleted file mode 100644
index 46f81a2c..00000000
--- a/src/disposa.blue/margo/Gopkg.toml
+++ /dev/null
@@ -1,15 +0,0 @@
-[[constraint]]
- name = "github.com/ugorji/go"
- branch = "master"
-
-[[constraint]]
- name = "github.com/urfave/cli"
- version = "1.20.0"
-
-[[constraint]]
- branch = "master"
- name = "golang.org/x/crypto"
-
-[prune]
- go-tests = true
- unused-packages = true
diff --git a/src/disposa.blue/margo/cmd/margo/main.go b/src/disposa.blue/margo/cmd/margo/main.go
deleted file mode 100644
index a93d4f8c..00000000
--- a/src/disposa.blue/margo/cmd/margo/main.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package main
-
-import (
- "disposa.blue/margo/cmdpkg/margo"
-)
-
-func main() {
- margo.Main()
-}
diff --git a/src/disposa.blue/margo/cmdpkg/margo/main.go b/src/disposa.blue/margo/cmdpkg/margo/main.go
deleted file mode 100644
index f9661961..00000000
--- a/src/disposa.blue/margo/cmdpkg/margo/main.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package margo
-
-import (
- "disposa.blue/margo/mgcli"
- "disposa.blue/margo/sublime"
- "github.com/urfave/cli"
-)
-
-func Main() {
- app := mgcli.NewApp()
- app.Commands = []cli.Command{
- sublime.Command,
- }
- app.RunAndExitOnError()
-}
diff --git a/src/disposa.blue/margo/extension-example/extension-example.go b/src/disposa.blue/margo/extension-example/extension-example.go
deleted file mode 100644
index 5c07e51e..00000000
--- a/src/disposa.blue/margo/extension-example/extension-example.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package margo
-
-import (
- "disposa.blue/margo/golang"
- "disposa.blue/margo/mg"
- "time"
-)
-
-// Margo is the entry-point to margo
-func Margo(ma mg.Args) {
- // add our reducers (margo plugins) to the store
- // they are run in the specified order
- // and should ideally not block for more than a couple milliseconds
- ma.Store.Use(
- // by default, events (e.g. ViewSaved) are triggered in all files
- // uncomment the reducer below to restict event to Go(-lang) files
- // please note, however, that this mode is not tested
- // and saving a non-go file will not trigger linters, etc. for that go pkg
- //
- // mg.Reduce(func(mx *mg.Ctx) *mg.State {
- // return mx.SetConfig(mx.Config.EnabledForLangs("go"))
- // }),
-
- // add the day and time to the status bar
- // DayTimeStatus,
-
- // both GoFmt and GoImports will automatically disable the GoSublime version
- // you will need to install the `goimports` tool manually
- // https://godoc.org/golang.org/x/tools/cmd/goimports
- //
- // golang.GoFmt,
- // or
- // golang.GoImports,
-
- // use gocode for autocompletion
- &golang.Gocode{
- // automatically install missing packages
- // Autobuild: true,
-
- // autocompete packages that are not yet imported
- // this goes well with GoImports
- UnimportedPackages: true,
-
- // show the function parameters. this can take up a lot of space
- ShowFuncParams: true,
- },
-
- // add some default context aware-ish snippets
- golang.Snippets,
-
- // add our own snippets
-
- // check the file for syntax errors
- &golang.SyntaxCheck{},
-
- // add our own snippets
- MySnippets,
-
- // run `go install` on save
- // or use GoInstallDiscardBinaries which will additionally set $GOBIN
- // to a temp directory so binaries are not installed into your $PATH
- //
- // golang.GoInstall(),
- // or
- // golang.GoInstallDiscardBinaries(),
-
- // run `go vet` on save. go vet is ran automatically as part of `go test` in go1.10
- // golang.GoVet(),
-
- // run `go test -race` on save
- // in go1.10, go vet is ran automatically
- golang.GoTest("-race"),
-
- // run `golint` on save
- // &golang.Linter{Name: "golint", Label: "Go/Lint"},
-
- // run gometalinter on save
- // &golang.Linter{Name: "gometalinter", Args: []string{
- // "--disable=gas",
- // "--fast",
- // }},
- )
-}
-
-// DayTimeStatus adds the current day and time to the status bar
-var DayTimeStatus = mg.Reduce(func(mx *mg.Ctx) *mg.State {
- if _, ok := mx.Action.(mg.Started); ok {
- dispatch := mx.Store.Dispatch
- // kick off the ticker when we start
- go func() {
- ticker := time.NewTicker(1 * time.Second)
- for range ticker.C {
- dispatch(mg.Render)
- }
- }()
- }
-
- // we always want to render the time
- // otherwise it will sometimes disappear from the status bar
- now := time.Now()
- format := "Mon, 15:04"
- if now.Second()%2 == 0 {
- format = "Mon, 15 04"
- }
- return mx.AddStatus(now.Format(format))
-})
-
-// MySnippets is a slice of functions returning our own snippets
-var MySnippets = golang.SnippetFuncs{
- func(cx *golang.CompletionCtx) []mg.Completion {
- // if we're not in a block (i.e. function), do nothing
- if !cx.Scope.Is(golang.BlockScope) {
- return nil
- }
-
- return []mg.Completion{
- {
- Query: "if err",
- Title: "err != nil { return }",
- Src: "if ${1:err} != nil {\n\treturn $0\n}",
- },
- }
- },
-}
diff --git a/src/disposa.blue/margo/golang/common.go b/src/disposa.blue/margo/golang/common.go
deleted file mode 100644
index fd889529..00000000
--- a/src/disposa.blue/margo/golang/common.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package golang
-
-import (
- "disposa.blue/margo/mg"
- "go/ast"
- "go/build"
- "go/token"
- "os"
- "path/filepath"
- "reflect"
- "regexp"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-var (
- CommonPatterns = append([]*regexp.Regexp{
- regexp.MustCompile(`^\s*(?P.+?\.\w+):(?P\d+:)(?P\d+:?)?(?:(?Pwarning|error)[:])?(?P.+?)(?: [(](?P[-\w]+)[)])?$`),
- regexp.MustCompile(`(?Pcan't load package: package .+: found packages .+ \((?P.+?\.go)\).+)`),
- }, mg.CommonPatterns...)
-)
-
-func BuildContext(mx *mg.Ctx) *build.Context {
- c := build.Default
- c.GOARCH = mx.Env.Get("GOARCH", c.GOARCH)
- c.GOOS = mx.Env.Get("GOOS", c.GOOS)
- // these must be passed by the client
- // if we leave them unset, there's a risk something will end up using os.Getenv(...)
- logUndefined := func(k string) string {
- v := mx.Env[k]
- if v == "" {
- v = k + "-is-not-defined"
- mx.Log.Println(v)
- }
- return v
- }
- c.GOROOT = logUndefined("GOROOT")
- c.GOPATH = logUndefined("GOPATH")
- return &c
-}
-
-func PathList(p string) []string {
- l := []string{}
- for _, s := range strings.Split(p, string(filepath.ListSeparator)) {
- if s != "" {
- l = append(l, s)
- }
- }
- return l
-}
-
-func NodeEnclosesPos(node ast.Node, pos token.Pos) bool {
- if node == nil {
- return false
- }
- // apparently node can be (*T)(nil)
- if reflect.ValueOf(node).IsNil() {
- return false
- }
- if np := node.Pos(); !np.IsValid() || pos <= np {
- return false
- }
- ne := node.End()
- if c, ok := node.(*ast.Comment); ok && strings.HasPrefix(c.Text, "//") {
- // line comments' end don't include the newline
- ne++
- }
- return pos < ne || !ne.IsValid()
-}
-
-type CursorNode struct {
- Pos token.Pos
- AstFile *ast.File
- TokenFile *token.File
-
- GenDecl *ast.GenDecl
- ImportSpec *ast.ImportSpec
- Comment *ast.Comment
- BlockStmt *ast.BlockStmt
- CallExpr *ast.CallExpr
- BasicLit *ast.BasicLit
- Nodes []ast.Node
- Node ast.Node
-}
-
-func (cn *CursorNode) ScanFile(af *ast.File) {
- pos := af.Package
- end := pos + token.Pos(len("package"))
- if af.Name != nil {
- end = pos + token.Pos(af.Name.End())
- }
- if cn.Pos >= pos && cn.Pos <= end {
- return
- }
-
- cn.Append(af)
- ast.Walk(cn, af)
- for _, cg := range af.Comments {
- for _, c := range cg.List {
- if NodeEnclosesPos(c, cn.Pos) {
- cn.Append(c)
- }
- }
- }
- cn.Node = cn.Nodes[len(cn.Nodes)-1]
- cn.Set(&cn.GenDecl)
- cn.Set(&cn.BlockStmt)
- cn.Set(&cn.BasicLit)
- cn.Set(&cn.CallExpr)
- cn.Set(&cn.Comment)
- cn.Set(&cn.ImportSpec)
-}
-
-func (cn *CursorNode) Visit(node ast.Node) ast.Visitor {
- if NodeEnclosesPos(node, cn.Pos) {
- cn.Append(node)
- }
- return cn
-}
-
-func (cn *CursorNode) Append(n ast.Node) {
- for _, x := range cn.Nodes {
- if n == x {
- return
- }
- }
- cn.Nodes = append(cn.Nodes, n)
-}
-
-func (cn *CursorNode) Set(destPtr interface{}) bool {
- v := reflect.ValueOf(destPtr).Elem()
- if !v.CanSet() {
- return false
- }
- for i := len(cn.Nodes) - 1; i >= 0; i-- {
- x := reflect.ValueOf(cn.Nodes[i])
- if x.Type() == v.Type() {
- v.Set(x)
- return true
- }
- }
- return false
-}
-
-func ParseCursorNode(kvs mg.KVStore, src []byte, offset int) *CursorNode {
- pf := ParseFile(kvs, "", src)
- cn := &CursorNode{
- AstFile: pf.AstFile,
- TokenFile: pf.TokenFile,
- Pos: token.Pos(pf.TokenFile.Base() + offset),
- }
- cn.ScanFile(cn.AstFile)
- return cn
-}
-
-func IsLetter(ch rune) bool {
- return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
-}
-
-func IsPkgDir(dir string) bool {
- if dir == "" || dir == "." {
- return false
- }
-
- f, err := os.Open(dir)
- if err != nil {
- return false
- }
-
- l, _ := f.Readdirnames(-1)
- for _, fn := range l {
- if strings.HasSuffix(fn, ".go") {
- return true
- }
- }
- return false
-}
diff --git a/src/disposa.blue/margo/golang/completion.go b/src/disposa.blue/margo/golang/completion.go
deleted file mode 100644
index 982cc91c..00000000
--- a/src/disposa.blue/margo/golang/completion.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package golang
-
-import (
- "disposa.blue/margo/mg"
- "go/ast"
- "go/token"
- "strings"
-)
-
-const (
- PackageScope CompletionScope = 1 << iota
- FileScope
- DeclScope
- BlockScope
- ImportScope
- ConstScope
- VarScope
- TypeScope
- CommentScope
- StringScope
- ImportPathScope
-)
-
-type CompletionScope uint64
-
-func (cs CompletionScope) Is(scope CompletionScope) bool {
- return cs&scope != 0
-}
-
-func (cs CompletionScope) Any(scopes ...CompletionScope) bool {
- for _, s := range scopes {
- if cs&s != 0 {
- return true
- }
- }
- return false
-}
-
-func (cs CompletionScope) All(scopes ...CompletionScope) bool {
- for _, s := range scopes {
- if cs&s == 0 {
- return false
- }
- }
- return true
-}
-
-type CompletionCtx struct {
- *mg.Ctx
- CursorNode *CursorNode
- AstFile *ast.File
- Scope CompletionScope
- PkgName string
- IsTestFile bool
-}
-
-func NewCompletionCtx(mx *mg.Ctx, src []byte, pos int) *CompletionCtx {
- cn := ParseCursorNode(mx.Store, src, pos)
- af := cn.AstFile
- if af == nil {
- af = NilAstFile
- }
- cx := &CompletionCtx{
- Ctx: mx,
- CursorNode: cn,
- AstFile: af,
- PkgName: af.Name.String(),
- }
- cx.IsTestFile = strings.HasSuffix(mx.View.Filename(), "_test.go") ||
- strings.HasSuffix(cx.PkgName, "_test")
-
- if cx.PkgName == "_" || cx.PkgName == "" {
- cx.Scope |= PackageScope
- return cx
- }
-
- switch cx.CursorNode.Node.(type) {
- case nil:
- cx.Scope |= PackageScope
- case *ast.File:
- cx.Scope |= FileScope
- case *ast.BlockStmt:
- cx.Scope |= BlockScope
- }
-
- if gd := cn.GenDecl; gd != nil {
- switch gd.Tok {
- case token.IMPORT:
- cx.Scope |= ImportScope
- case token.CONST:
- cx.Scope |= ConstScope
- case token.VAR:
- cx.Scope |= VarScope
- case token.TYPE:
- cx.Scope |= TypeScope
- }
- }
- if cn.Comment != nil {
- cx.Scope |= CommentScope
- }
- if lit := cn.BasicLit; lit != nil && lit.Kind == token.STRING {
- if cn.ImportSpec != nil {
- cx.Scope |= ImportPathScope
- } else {
- cx.Scope |= StringScope
- }
- }
- return cx
-}
-
-func DedentCompletion(s string) string {
- s = strings.TrimLeft(s, "\n")
- sfx := strings.TrimLeft(s, " \t")
- pfx := s[:len(s)-len(sfx)]
- if pfx == "" {
- return s
- }
- s = strings.TrimSpace(s)
- lines := strings.Split(s, "\n")
- for i, ln := range lines {
- lines[i] = strings.TrimPrefix(ln, pfx)
- }
- return strings.Join(lines, "\n")
-}
diff --git a/src/disposa.blue/margo/golang/completion_test.go b/src/disposa.blue/margo/golang/completion_test.go
deleted file mode 100644
index 577725af..00000000
--- a/src/disposa.blue/margo/golang/completion_test.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package golang
-
-import (
- "testing"
-)
-
-func TestDedentCompletion(t *testing.T) {
- src := `
- type T struct {
- S string
- }
- `
- want := `type T struct {
- S string
-}`
- got := DedentCompletion(src)
- if want != got {
- t.Errorf("got `\n%s\n`\nwant `\n%s\n`", got, want)
- }
-}
diff --git a/src/disposa.blue/margo/golang/gocode.go b/src/disposa.blue/margo/golang/gocode.go
deleted file mode 100644
index 32f3877a..00000000
--- a/src/disposa.blue/margo/golang/gocode.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package golang
-
-import (
- "bytes"
- "disposa.blue/margo/golang/internal/gocode"
- "disposa.blue/margo/mg"
- "disposa.blue/margo/sublime"
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/printer"
- "go/token"
- "io"
- "strings"
- "unicode"
-)
-
-var (
- gocodeClassTags = map[string]mg.CompletionTag{
- "const": mg.ConstantTag,
- "func": mg.FunctionTag,
- "package": mg.PackageTag,
- "import": mg.PackageTag,
- "type": mg.TypeTag,
- "var": mg.VariableTag,
- }
-)
-
-type Gocode struct {
- InstallSuffix string
- ProposeBuiltins bool
- ProposeTests bool
- Autobuild bool
- UnimportedPackages bool
- AllowExplicitCompletions bool
- AllowWordCompletions bool
- ShowFuncParams bool
- ShowFuncResultNames bool
-}
-
-func (g *Gocode) Reduce(mx *mg.Ctx) *mg.State {
- st, gx := initGocodeReducer(mx, g)
- if gx == nil || !gx.query.completions {
- return st
- }
-
- candidates := gx.candidates()
- completions := make([]mg.Completion, 0, len(candidates))
- for _, v := range candidates {
- if c, ok := g.completion(mx, gx, v); ok {
- completions = append(completions, c)
- }
- }
- return st.AddCompletions(completions...)
-}
-
-func (g *Gocode) funcTitle(fx *ast.FuncType, buf *bytes.Buffer, decl string) string {
- // TODO: caching
-
- buf.Reset()
- fset := token.NewFileSet()
-
- buf.WriteString("func(")
- if fx.Params != nil {
- switch {
- case g.ShowFuncParams:
- g.printFields(buf, fset, fx.Params.List, true)
- case fx.Params.NumFields() != 0:
- buf.WriteString("…")
- }
- }
- buf.WriteString(")")
-
- if fl := fx.Results; fl != nil {
- buf.WriteString(" ")
- hasNames := g.ShowFuncResultNames && len(fl.List) != 0 && len(fl.List[0].Names) != 0
- if hasNames {
- buf.WriteString("(")
- }
- g.printFields(buf, fset, fl.List, g.ShowFuncResultNames)
- if hasNames {
- buf.WriteString(")")
- }
- }
-
- return buf.String()
-}
-
-func (g *Gocode) funcSrc(fx *ast.FuncType, buf *bytes.Buffer, v gocode.MargoCandidate, gx *gocodeCtx) string {
- // TODO: caching
- // TODO: only output the name, if we're in a call, assignment, etc. that takes a func
-
- outputArgs := true
- for _, c := range gx.src[gx.pos:] {
- if c == '(' {
- outputArgs = false
- break
- }
- r := rune(c)
- if !IsLetter(r) && !unicode.IsSpace(r) {
- break
- }
- }
-
- buf.Reset()
- buf.WriteString(v.Name)
- if outputArgs {
- buf.WriteString("(")
- pos := 0
- for _, field := range fx.Params.List {
- for _, name := range field.Names {
- pos++
- if pos > 1 {
- buf.WriteString(", ")
- }
- fmt.Fprintf(buf, "${%d:%s}", pos, name)
- }
- }
- buf.WriteString(")")
- }
- buf.WriteString("${0}")
- return buf.String()
-}
-
-func (g *Gocode) printFields(w io.Writer, fset *token.FileSet, list []*ast.Field, printNames bool) {
- for i, field := range list {
- if i > 0 {
- fmt.Fprint(w, ", ")
- }
- if printNames {
- for j, name := range field.Names {
- if j > 0 {
- fmt.Fprint(w, ", ")
- }
- fmt.Fprint(w, name.String())
- }
- if len(field.Names) != 0 {
- fmt.Fprint(w, " ")
- }
- }
- printer.Fprint(w, fset, field.Type)
- }
-}
-
-func (g *Gocode) completion(mx *mg.Ctx, gx *gocodeCtx, v gocode.MargoCandidate) (c mg.Completion, ok bool) {
- buf := bytes.NewBuffer(nil)
- if v.Class.String() == "PANIC" {
- mx.Log.Printf("gocode panicked in '%s' at pos '%d'\n", gx.fn, gx.pos)
- return c, false
- }
- if !g.ProposeTests && g.matchTests(v) {
- return c, false
- }
-
- var fx *ast.FuncType
- if strings.HasPrefix(v.Type, "func(") {
- x, _ := parser.ParseExpr(v.Type)
- fx, _ = x.(*ast.FuncType)
- }
-
- c = mg.Completion{
- Query: g.compQuery(v),
- Tag: g.compTag(v),
- Src: g.compSrc(fx, buf, v, gx),
- Title: g.compTitle(fx, buf, v),
- }
- return c, true
-}
-
-func (g *Gocode) compQuery(v gocode.MargoCandidate) string {
- return v.Name
-}
-
-func (g *Gocode) compSrc(fx *ast.FuncType, buf *bytes.Buffer, v gocode.MargoCandidate, gx *gocodeCtx) string {
- if fx == nil {
- return v.Name
- }
- return g.funcSrc(fx, buf, v, gx)
-}
-
-func (g *Gocode) compTag(v gocode.MargoCandidate) mg.CompletionTag {
- if tag, ok := gocodeClassTags[v.Class.String()]; ok {
- return tag
- }
- return mg.UnknownTag
-}
-
-func (g *Gocode) compTitle(fx *ast.FuncType, buf *bytes.Buffer, v gocode.MargoCandidate) string {
- if fx != nil {
- return g.funcTitle(fx, buf, v.Type)
- }
- if v.Type == "" {
- return v.Class.String()
- }
- return v.Type
-}
-
-func (g *Gocode) matchTests(c gocode.MargoCandidate) bool {
- return strings.HasPrefix(c.Name, "Test") ||
- strings.HasPrefix(c.Name, "Benchmark") ||
- strings.HasPrefix(c.Name, "Example")
-}
-
-type gocodeCtx struct {
- Gocode
- cn *CursorNode
- fn string
- src []byte
- pos int
- bctx *build.Context
- cfg gocode.MargoConfig
- query struct {
- completions bool
- tooltips bool
- }
-}
-
-func initGocodeReducer(mx *mg.Ctx, g *Gocode) (*mg.State, *gocodeCtx) {
- st := mx.State
- if !st.View.LangIs("go") {
- return st, nil
- }
-
- if cfg, ok := st.Config.(sublime.Config); ok {
- cfg = cfg.DisableGsComplete()
- if !g.AllowExplicitCompletions {
- cfg = cfg.InhibitExplicitCompletions()
- }
- if !g.AllowWordCompletions {
- cfg = cfg.InhibitWordCompletions()
- }
- st = st.SetConfig(cfg)
- }
-
- // TODO: use QueryCompletions.Pos when support is added
- _, tooltips := mx.Action.(mg.QueryTooltips)
- _, completions := mx.Action.(mg.QueryCompletions)
- if !completions && !tooltips {
- return st, nil
- }
-
- bctx := BuildContext(mx)
- src, _ := st.View.ReadAll()
- if len(src) == 0 {
- return st, nil
- }
- pos := clampSrcPos(src, st.View.Pos)
- pos = mg.BytePos(src, pos)
-
- cx := NewCompletionCtx(mx, src, pos)
- if cx.Scope.Any(PackageScope, FileScope) {
- return st, nil
- }
- cn := cx.CursorNode
- // don't do completion inside comments
- if cn.Comment != nil {
- return st, nil
- }
- // don't do completion inside strings unless it's an import
- if cn.ImportSpec == nil && cn.BasicLit != nil && cn.BasicLit.Kind == token.STRING {
- return st, nil
- }
-
- gx := &gocodeCtx{
- cn: cn,
- fn: st.View.Filename(),
- pos: pos,
- src: src,
- bctx: bctx,
- cfg: gocode.MargoConfig{
- GOROOT: bctx.GOROOT,
- GOPATHS: PathList(bctx.GOPATH),
- InstallSuffix: g.InstallSuffix,
- ProposeBuiltins: g.ProposeBuiltins,
- Autobuild: g.Autobuild,
- UnimportedPackages: g.UnimportedPackages,
- },
- }
- gx.query.completions = completions
- gx.query.tooltips = tooltips
- return st, gx
-}
-
-func (gx *gocodeCtx) candidates() []gocode.MargoCandidate {
- if len(gx.src) == 0 {
- return nil
- }
- return gocode.Margo.Complete(gx.cfg, gx.src, gx.fn, gx.pos)
-}
-
-func clampSrcPos(src []byte, pos int) int {
- if pos < 0 {
- return 0
- }
- if pos > len(src) {
- return len(src) - 1
- }
- return pos
-}
diff --git a/src/disposa.blue/margo/golang/gofmt.go b/src/disposa.blue/margo/golang/gofmt.go
deleted file mode 100644
index f30b1496..00000000
--- a/src/disposa.blue/margo/golang/gofmt.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package golang
-
-import (
- "bytes"
- "fmt"
- "go/format"
- "os/exec"
-
- "disposa.blue/margo/mg"
- "disposa.blue/margo/sublime"
-)
-
-var (
- GoFmt = FmtFunc(goFmt)
- GoImports = FmtFunc(goImports)
-)
-
-type FmtFunc func(mx *mg.Ctx, src []byte) ([]byte, error)
-
-func (ff FmtFunc) Reduce(mx *mg.Ctx) *mg.State {
- st := mx.State
- if cfg, ok := mx.Config.(sublime.Config); ok {
- st = st.SetConfig(cfg.DisableGsFmt())
- }
-
- if !mx.View.LangIs("go") {
- return st
- }
- if !mx.ActionIs(mg.ViewFmt{}, mg.ViewPreSave{}) {
- return st
- }
-
- fn := st.View.Filename()
- src, err := st.View.ReadAll()
- if err != nil {
- return st.Errorf("failed to read %s: %s\n", fn, err)
- }
-
- src, err = ff(mx, src)
- if err != nil {
- return st.Errorf("failed to fmt %s: %s\n", fn, err)
- }
- return st.SetSrc(src)
-}
-
-func goFmt(_ *mg.Ctx, src []byte) ([]byte, error) {
- return format.Source(src)
-}
-
-func goImports(mx *mg.Ctx, src []byte) ([]byte, error) {
- stdin := bytes.NewReader(src)
- stdout := bytes.NewBuffer(nil)
- stderr := bytes.NewBuffer(nil)
- cmd := exec.Command("goimports", "-srcdir", mx.View.Filename())
- cmd.Env = mx.Env.Environ()
- cmd.Stdin = stdin
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- if err := cmd.Run(); err != nil {
- return nil, err
- }
- if stderr.Len() != 0 {
- return nil, fmt.Errorf("fmt completed successfully, but contains stderr output: %s", stderr.Bytes())
- }
- return stdout.Bytes(), nil
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/.gitignore b/src/disposa.blue/margo/golang/internal/gocode/.gitignore
deleted file mode 100644
index d00f74ff..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/.gitignore
+++ /dev/null
@@ -1,11 +0,0 @@
-*.8
-*.a
-*.out
-gocode
-goremote
-gocodetest
-*.swp
-listidents
-showcursor
-showsmap
-rename
diff --git a/src/disposa.blue/margo/golang/internal/gocode/README.md b/src/disposa.blue/margo/golang/internal/gocode/README.md
deleted file mode 100644
index 11148c89..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/README.md
+++ /dev/null
@@ -1,195 +0,0 @@
-## An autocompletion daemon for the Go programming language
-
-Gocode is a helper tool which is intended to be integrated with your source code editor, like vim, neovim and emacs. It provides several advanced capabilities, which currently includes:
-
- - Context-sensitive autocompletion
-
-It is called *daemon*, because it uses client/server architecture for caching purposes. In particular, it makes autocompletions very fast. Typical autocompletion time with warm cache is 30ms, which is barely noticeable.
-
-Also watch the [demo screencast](http://nosmileface.ru/images/gocode-demo.swf).
-
-
-
-
-
-### Setup
-
- 1. You should have a correctly installed Go compiler environment and your personal workspace ($GOPATH). If you have no idea what **$GOPATH** is, take a look [here](http://golang.org/doc/code.html). Please make sure that your **$GOPATH/bin** is available in your **$PATH**. This is important, because most editors assume that **gocode** binary is available in one of the directories, specified by your **$PATH** environment variable. Otherwise manually copy the **gocode** binary from **$GOPATH/bin** to a location which is part of your **$PATH** after getting it in step 2.
-
- Do these steps only if you understand why you need to do them:
-
- `export GOPATH=$HOME/goprojects`
-
- `export PATH=$PATH:$GOPATH/bin`
-
- 2. Then you need to get the appropriate version of the gocode, for 6g/8g/5g compiler you can do this:
-
- `go get -u github.com/nsf/gocode` (-u flag for "update")
-
- Windows users should consider doing this instead:
-
- `go get -u -ldflags -H=windowsgui github.com/nsf/gocode`
-
- That way on the Windows OS gocode will be built as a GUI application and doing so solves hanging window issues with some of the editors.
-
- 3. Next steps are editor specific. See below.
-
-### Vim setup
-
-#### Vim manual installation
-
-Note: As of go 1.5 there is no $GOROOT/misc/vim script. Suggested installation is via [vim-go plugin](https://github.com/fatih/vim-go).
-
-In order to install vim scripts, you need to fulfill the following steps:
-
- 1. Install official Go vim scripts from **$GOROOT/misc/vim**. If you did that already, proceed to the step 2.
-
- 2. Install gocode vim scripts. Usually it's enough to do the following:
-
- 2.1. `vim/update.sh`
-
- **update.sh** script does the following:
-
- #!/bin/sh
- mkdir -p "$HOME/.vim/autoload"
- mkdir -p "$HOME/.vim/ftplugin/go"
- cp "${0%/*}/autoload/gocomplete.vim" "$HOME/.vim/autoload"
- cp "${0%/*}/ftplugin/go/gocomplete.vim" "$HOME/.vim/ftplugin/go"
-
- 2.2. Alternatively, you can create symlinks using symlink.sh script in order to avoid running update.sh after every gocode update.
-
- **symlink.sh** script does the following:
-
- #!/bin/sh
- cd "${0%/*}"
- ROOTDIR=`pwd`
- mkdir -p "$HOME/.vim/autoload"
- mkdir -p "$HOME/.vim/ftplugin/go"
- ln -s "$ROOTDIR/autoload/gocomplete.vim" "$HOME/.vim/autoload/"
- ln -s "$ROOTDIR/ftplugin/go/gocomplete.vim" "$HOME/.vim/ftplugin/go/"
-
- 3. Make sure vim has filetype plugin enabled. Simply add that to your **.vimrc**:
-
- `filetype plugin on`
-
- 4. Autocompletion should work now. Use `` for autocompletion (omnifunc autocompletion).
-
-#### Using Vundle in Vim
-
-Add the following line to your **.vimrc**:
-
-`Plugin 'nsf/gocode', {'rtp': 'vim/'}`
-
-And then update your packages by running `:PluginInstall`.
-
-#### Using vim-plug in Vim
-
-Add the following line to your **.vimrc**:
-
-`Plug 'nsf/gocode', { 'rtp': 'vim', 'do': '~/.vim/plugged/gocode/vim/symlink.sh' }`
-
-And then update your packages by running `:PlugInstall`.
-
-#### Other
-
-Alternatively take a look at the vundle/pathogen friendly repo: https://github.com/Blackrush/vim-gocode.
-
-### Neovim setup
-#### Neovim manual installation
-
- Neovim users should also follow `Vim manual installation`, except that you should goto `gocode/nvim` in step 2, and remember that, the Neovim configuration file is `~/.config/nvim/init.vim`.
-
-#### Using Vundle in Neovim
-
-Add the following line to your **init.vim**:
-
-`Plugin 'nsf/gocode', {'rtp': 'nvim/'}`
-
-And then update your packages by running `:PluginInstall`.
-
-#### Using vim-plug in Neovim
-
-Add the following line to your **init.vim**:
-
-`Plug 'nsf/gocode', { 'rtp': 'nvim', 'do': '~/.config/nvim/plugged/gocode/nvim/symlink.sh' }`
-
-And then update your packages by running `:PlugInstall`.
-
-### Emacs setup
-
-In order to install emacs script, you need to fulfill the following steps:
-
- 1. Install [auto-complete-mode](http://www.emacswiki.org/emacs/AutoComplete)
-
- 2. Copy **emacs/go-autocomplete.el** file from the gocode source distribution to a directory which is in your 'load-path' in emacs.
-
- 3. Add these lines to your **.emacs**:
-
- (require 'go-autocomplete)
- (require 'auto-complete-config)
- (ac-config-default)
-
-Also, there is an alternative plugin for emacs using company-mode. See `emacs-company/README` for installation instructions.
-
-If you're a MacOSX user, you may find that script useful: https://github.com/purcell/exec-path-from-shell. It helps you with setting up the right environment variables as Go and gocode require it. By default it pulls the PATH, but don't forget to add the GOPATH as well, e.g.:
-
-```
-(when (memq window-system '(mac ns))
- (exec-path-from-shell-initialize)
- (exec-path-from-shell-copy-env "GOPATH"))
-```
-
-### Options
-
-You can change all available options using `gocode set` command. The config file uses json format and is usually stored somewhere in **~/.config/gocode** directory. On windows it's stored in the appropriate AppData folder. It's suggested to avoid modifying config file manually, do that using the `gocode set` command.
-
-`gocode set` lists all options and their values.
-
-`gocode set ` shows the value of that *option*.
-
-`gocode set ` sets the new *value* for that *option*.
-
- - *propose-builtins*
-
- A boolean option. If **true**, gocode will add built-in types, functions and constants to an autocompletion proposals. Default: **false**.
-
- - *lib-path*
-
- A string option. Allows you to add search paths for packages. By default, gocode only searches **$GOPATH/pkg/$GOOS_$GOARCH** and **$GOROOT/pkg/$GOOS_$GOARCH** in terms of previously existed environment variables. Also you can specify multiple paths using ':' (colon) as a separator (on Windows use semicolon ';'). The paths specified by *lib-path* are prepended to the default ones.
-
- - *autobuild*
-
- A boolean option. If **true**, gocode will try to automatically build out-of-date packages when their source files are modified, in order to obtain the freshest autocomplete results for them. This feature is experimental. Default: **false**.
-
- - *force-debug-output*
-
- A string option. If is not empty, gocode will forcefully redirect the logging into that file. Also forces enabling of the debug mode on the server side. Default: "" (empty).
-
- - *package-lookup-mode*
-
- A string option. If **go**, use standard Go package lookup rules. If **gb**, use gb-specific lookup rules. See https://github.com/constabulary/gb for details. Default: **go**.
-
- - *close-timeout*
-
- An integer option. If there have been no completion requests after this number of seconds, the gocode process will terminate. Defaults to 1800 (30 minutes).
-
-### Debugging
-
-If something went wrong, the first thing you may want to do is manually start the gocode daemon with a debug mode enabled and in a separate terminal window. It will show you all the stack traces, panics if any and additional info about autocompletion requests. Shutdown the daemon if it was already started and run a new one explicitly with a debug mode enabled:
-
-`gocode close`
-
-`gocode -s -debug`
-
-Please, report bugs, feature suggestions and other rants to the [github issue tracker](http://github.com/nsf/gocode/issues) of this project.
-
-### Developing
-
-There is [Guide for IDE/editor plugin developers](docs/IDE_integration.md).
-
-If you have troubles, please, contact me and I will try to do my best answering your questions. You can contact me via email . Or for short question find me on IRC: #go-nuts @ freenode.
-
-### Misc
-
- - It's a good idea to use the latest git version always. I'm trying to keep it in a working state.
- - Use `go install` (not `go build`) for building a local source tree. The objects in `pkg/` are needed for Gocode to work.
diff --git a/src/disposa.blue/margo/golang/internal/gocode/autocompletecontext.go b/src/disposa.blue/margo/golang/internal/gocode/autocompletecontext.go
deleted file mode 100644
index 10611695..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/autocompletecontext.go
+++ /dev/null
@@ -1,808 +0,0 @@
-package gocode
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/parser"
- "go/token"
- "log"
- "os"
- "path/filepath"
- "runtime"
- "sort"
- "strings"
- "time"
-)
-
-//-------------------------------------------------------------------------
-// out_buffers
-//
-// Temporary structure for writing autocomplete response.
-//-------------------------------------------------------------------------
-
-// fields must be exported for RPC
-type candidate struct {
- Name string
- Type string
- Class decl_class
- Package string
-}
-
-type out_buffers struct {
- tmpbuf *bytes.Buffer
- candidates []candidate
- canonical_aliases map[string]string
- ctx *auto_complete_context
- tmpns map[string]bool
- ignorecase bool
-}
-
-func new_out_buffers(ctx *auto_complete_context) *out_buffers {
- b := new(out_buffers)
- b.tmpbuf = bytes.NewBuffer(make([]byte, 0, 1024))
- b.candidates = make([]candidate, 0, 64)
- b.ctx = ctx
- b.canonical_aliases = make(map[string]string)
- for _, imp := range b.ctx.current.packages {
- b.canonical_aliases[imp.abspath] = imp.alias
- }
- return b
-}
-
-func (b *out_buffers) Len() int {
- return len(b.candidates)
-}
-
-func (b *out_buffers) Less(i, j int) bool {
- x := b.candidates[i]
- y := b.candidates[j]
- if x.Class == y.Class {
- return x.Name < y.Name
- }
- return x.Class < y.Class
-}
-
-func (b *out_buffers) Swap(i, j int) {
- b.candidates[i], b.candidates[j] = b.candidates[j], b.candidates[i]
-}
-
-func (b *out_buffers) append_decl(p, name, pkg string, decl *decl, class decl_class) {
- c1 := !g_config.ProposeBuiltins && decl.scope == g_universe_scope && decl.name != "Error"
- c2 := class != decl_invalid && decl.class != class
- c3 := class == decl_invalid && !has_prefix(name, p, b.ignorecase)
- c4 := !decl.matches()
- c5 := !check_type_expr(decl.typ)
-
- if c1 || c2 || c3 || c4 || c5 {
- return
- }
-
- decl.pretty_print_type(b.tmpbuf, b.canonical_aliases)
- b.candidates = append(b.candidates, candidate{
- Name: name,
- Type: b.tmpbuf.String(),
- Class: decl.class,
- Package: pkg,
- })
- b.tmpbuf.Reset()
-}
-
-func (b *out_buffers) append_embedded(p string, decl *decl, pkg string, class decl_class) {
- if decl.embedded == nil {
- return
- }
-
- first_level := false
- if b.tmpns == nil {
- // first level, create tmp namespace
- b.tmpns = make(map[string]bool)
- first_level = true
-
- // add all children of the current decl to the namespace
- for _, c := range decl.children {
- b.tmpns[c.name] = true
- }
- }
-
- for _, emb := range decl.embedded {
- typedecl := type_to_decl(emb, decl.scope)
- if typedecl == nil {
- continue
- }
-
- // could be type alias
- if typedecl.is_alias() {
- typedecl = typedecl.type_dealias()
- }
-
- // prevent infinite recursion here
- if typedecl.is_visited() {
- continue
- }
- typedecl.set_visited()
- defer typedecl.clear_visited()
-
- for _, c := range typedecl.children {
- if _, has := b.tmpns[c.name]; has {
- continue
- }
- b.append_decl(p, c.name, pkg, c, class)
- b.tmpns[c.name] = true
- }
- b.append_embedded(p, typedecl, pkg, class)
- }
-
- if first_level {
- // remove tmp namespace
- b.tmpns = nil
- }
-}
-
-//-------------------------------------------------------------------------
-// auto_complete_context
-//
-// Context that holds cache structures for autocompletion needs. It
-// includes cache for packages and for main package files.
-//-------------------------------------------------------------------------
-
-type auto_complete_context struct {
- current *auto_complete_file // currently edited file
- others []*decl_file_cache // other files of the current package
- pkg *scope
-
- pcache package_cache // packages cache
- declcache *decl_cache // top-level declarations cache
-}
-
-func new_auto_complete_context(pcache package_cache, declcache *decl_cache) *auto_complete_context {
- c := new(auto_complete_context)
- c.current = new_auto_complete_file("", declcache.context)
- c.pcache = pcache
- c.declcache = declcache
- return c
-}
-
-func (c *auto_complete_context) update_caches() {
- // temporary map for packages that we need to check for a cache expiration
- // map is used as a set of unique items to prevent double checks
- ps := make(map[string]*package_file_cache)
-
- // collect import information from all of the files
- c.pcache.append_packages(ps, c.current.packages)
- c.others = get_other_package_files(c.current.name, c.current.package_name, c.declcache)
- for _, other := range c.others {
- c.pcache.append_packages(ps, other.packages)
- }
-
- update_packages(ps)
-
- // fix imports for all files
- fixup_packages(c.current.filescope, c.current.packages, c.pcache)
- for _, f := range c.others {
- fixup_packages(f.filescope, f.packages, c.pcache)
- }
-
- // At this point we have collected all top level declarations, now we need to
- // merge them in the common package block.
- c.merge_decls()
-}
-
-func (c *auto_complete_context) merge_decls() {
- c.pkg = new_scope(g_universe_scope)
- merge_decls(c.current.filescope, c.pkg, c.current.decls)
- merge_decls_from_packages(c.pkg, c.current.packages, c.pcache)
- for _, f := range c.others {
- merge_decls(f.filescope, c.pkg, f.decls)
- merge_decls_from_packages(c.pkg, f.packages, c.pcache)
- }
-
- // special pass for type aliases which also have methods, while this is
- // valid code, it shouldn't happen a lot in practice, so, whatever
- // let's move all type alias methods to their first non-alias type down in
- // the chain
- propagate_type_alias_methods(c.pkg)
-}
-
-func (c *auto_complete_context) make_decl_set(scope *scope) map[string]*decl {
- set := make(map[string]*decl, len(c.pkg.entities)*2)
- make_decl_set_recursive(set, scope)
- return set
-}
-
-func (c *auto_complete_context) get_candidates_from_set(set map[string]*decl, partial string, class decl_class, b *out_buffers) {
- for key, value := range set {
- if value == nil {
- continue
- }
- value.infer_type()
- pkgname := ""
- if pkg, ok := c.pcache[value.name]; ok {
- pkgname = pkg.import_name
- }
- b.append_decl(partial, key, pkgname, value, class)
- }
-}
-
-func (c *auto_complete_context) get_candidates_from_decl_alias(cc cursor_context, class decl_class, b *out_buffers) {
- if cc.decl.is_visited() {
- return
- }
-
- cc.decl = cc.decl.type_dealias()
- if cc.decl == nil {
- return
- }
-
- cc.decl.set_visited()
- defer cc.decl.clear_visited()
-
- c.get_candidates_from_decl(cc, class, b)
- return
-}
-
-func (c *auto_complete_context) decl_package_import_path(decl *decl) string {
- if decl == nil || decl.scope == nil {
- return ""
- }
- if pkg, ok := c.pcache[decl.scope.pkgname]; ok {
- return pkg.import_name
- }
- return ""
-}
-
-func (c *auto_complete_context) get_candidates_from_decl(cc cursor_context, class decl_class, b *out_buffers) {
- if cc.decl.is_alias() {
- c.get_candidates_from_decl_alias(cc, class, b)
- return
- }
-
- // propose all children of a subject declaration and
- for _, decl := range cc.decl.children {
- if cc.decl.class == decl_package && !ast.IsExported(decl.name) {
- continue
- }
- if cc.struct_field {
- // if we're autocompleting struct field init, skip all methods
- if _, ok := decl.typ.(*ast.FuncType); ok {
- continue
- }
- }
- b.append_decl(cc.partial, decl.name, c.decl_package_import_path(decl), decl, class)
- }
- // propose all children of an underlying struct/interface type
- adecl := advance_to_struct_or_interface(cc.decl)
- if adecl != nil && adecl != cc.decl {
- for _, decl := range adecl.children {
- if decl.class == decl_var {
- b.append_decl(cc.partial, decl.name, c.decl_package_import_path(decl), decl, class)
- }
- }
- }
- // propose all children of its embedded types
- b.append_embedded(cc.partial, cc.decl, c.decl_package_import_path(cc.decl), class)
-}
-
-func (c *auto_complete_context) get_import_candidates(partial string, b *out_buffers) {
- currentPackagePath, pkgdirs := g_daemon.context.pkg_dirs()
- resultSet := map[string]struct{}{}
- for _, pkgdir := range pkgdirs {
- // convert srcpath to pkgpath and get candidates
- get_import_candidates_dir(pkgdir, filepath.FromSlash(partial), b.ignorecase, currentPackagePath, resultSet)
- }
- for k := range resultSet {
- b.candidates = append(b.candidates, candidate{Name: k, Class: decl_import})
- }
-}
-
-func get_import_candidates_dir(root, partial string, ignorecase bool, currentPackagePath string, r map[string]struct{}) {
- var fpath string
- var match bool
- if strings.HasSuffix(partial, "/") {
- fpath = filepath.Join(root, partial)
- } else {
- fpath = filepath.Join(root, filepath.Dir(partial))
- match = true
- }
- fi := readdir(fpath)
- for i := range fi {
- name := fi[i].Name()
- rel, err := filepath.Rel(root, filepath.Join(fpath, name))
- if err != nil {
- panic(err)
- }
- if match && !has_prefix(rel, partial, ignorecase) {
- continue
- } else if fi[i].IsDir() {
- get_import_candidates_dir(root, rel+string(filepath.Separator), ignorecase, currentPackagePath, r)
- } else {
- ext := filepath.Ext(name)
- if ext != ".a" {
- continue
- } else {
- rel = rel[0 : len(rel)-2]
- }
- if ipath, ok := vendorlessImportPath(filepath.ToSlash(rel), currentPackagePath); ok {
- r[ipath] = struct{}{}
- }
- }
- }
-}
-
-// returns three slices of the same length containing:
-// 1. apropos names
-// 2. apropos types (pretty-printed)
-// 3. apropos classes
-// and length of the part that should be replaced (if any)
-func (c *auto_complete_context) apropos(file []byte, filename string, cursor int) ([]candidate, int) {
- c.current.cursor = cursor
- c.current.name = filename
-
- // Update caches and parse the current file.
- // This process is quite complicated, because I was trying to design it in a
- // concurrent fashion. Apparently I'm not really good at that. Hopefully
- // will be better in future.
-
- // Ugly hack, but it actually may help in some cases. Insert a
- // semicolon right at the cursor location.
- filesemi := make([]byte, len(file)+1)
- copy(filesemi, file[:cursor])
- filesemi[cursor] = ';'
- copy(filesemi[cursor+1:], file[cursor:])
-
- // Does full processing of the currently edited file (top-level declarations plus
- // active function).
- c.current.process_data(filesemi)
-
- // Updates cache of other files and packages. See the function for details of
- // the process. At the end merges all the top-level declarations into the package
- // block.
- c.update_caches()
-
- // And we're ready to Go. ;)
-
- b := new_out_buffers(c)
- if g_config.IgnoreCase {
- if *g_debug {
- log.Printf("ignoring case sensitivity")
- }
- b.ignorecase = true
- }
-
- cc, ok := c.deduce_cursor_context(file, cursor)
- partial := len(cc.partial)
- if !g_config.Partials {
- if *g_debug {
- log.Printf("not performing partial prefix matching")
- }
- cc.partial = ""
- }
- if !ok {
- var d *decl
- if ident, ok := cc.expr.(*ast.Ident); ok && g_config.UnimportedPackages {
- p := resolveKnownPackageIdent(ident.Name, c.current.name, c.current.context)
- if p != nil {
- c.pcache[p.name] = p
- d = p.main
- }
- }
- if d == nil {
- return nil, 0
- }
- cc.decl = d
- }
-
- class := decl_invalid
- if g_config.ClassFiltering {
- switch cc.partial {
- case "const":
- class = decl_const
- case "var":
- class = decl_var
- case "type":
- class = decl_type
- case "func":
- class = decl_func
- case "package":
- class = decl_package
- }
- }
-
- if cc.decl_import {
- c.get_import_candidates(cc.partial, b)
- if cc.partial != "" && len(b.candidates) == 0 {
- // as a fallback, try case insensitive approach
- b.ignorecase = true
- c.get_import_candidates(cc.partial, b)
- }
- } else if cc.decl == nil {
- // In case if no declaraion is a subject of completion, propose all:
- set := c.make_decl_set(c.current.scope)
- c.get_candidates_from_set(set, cc.partial, class, b)
- if cc.partial != "" && len(b.candidates) == 0 {
- // as a fallback, try case insensitive approach
- b.ignorecase = true
- c.get_candidates_from_set(set, cc.partial, class, b)
- }
- } else {
- c.get_candidates_from_decl(cc, class, b)
- if cc.partial != "" && len(b.candidates) == 0 {
- // as a fallback, try case insensitive approach
- b.ignorecase = true
- c.get_candidates_from_decl(cc, class, b)
- }
- }
-
- if len(b.candidates) == 0 {
- return nil, 0
- }
-
- sort.Sort(b)
- return b.candidates, partial
-}
-
-func update_packages(ps map[string]*package_file_cache) {
- // initiate package cache update
- done := make(chan bool)
- for _, p := range ps {
- go func(p *package_file_cache) {
- defer func() {
- if err := recover(); err != nil {
- print_backtrace(err)
- done <- false
- }
- }()
- p.update_cache()
- done <- true
- }(p)
- }
-
- // wait for its completion
- for _ = range ps {
- if !<-done {
- panic("One of the package cache updaters panicked")
- }
- }
-}
-
-func collect_type_alias_methods(d *decl) map[string]*decl {
- if d == nil || d.is_visited() || !d.is_alias() {
- return nil
- }
- d.set_visited()
- defer d.clear_visited()
-
- // add own methods
- m := map[string]*decl{}
- for k, v := range d.children {
- m[k] = v
- }
-
- // recurse into more aliases
- dd := type_to_decl(d.typ, d.scope)
- for k, v := range collect_type_alias_methods(dd) {
- m[k] = v
- }
-
- return m
-}
-
-func propagate_type_alias_methods(s *scope) {
- for _, e := range s.entities {
- if !e.is_alias() {
- continue
- }
-
- methods := collect_type_alias_methods(e)
- if len(methods) == 0 {
- continue
- }
-
- dd := e.type_dealias()
- if dd == nil {
- continue
- }
-
- decl := dd.deep_copy()
- for _, v := range methods {
- decl.add_child(v)
- }
- s.entities[decl.name] = decl
- }
-}
-
-func merge_decls(filescope *scope, pkg *scope, decls map[string]*decl) {
- for _, d := range decls {
- pkg.merge_decl(d)
- }
- filescope.parent = pkg
-}
-
-func merge_decls_from_packages(pkgscope *scope, pkgs []package_import, pcache package_cache) {
- for _, p := range pkgs {
- path, alias := p.abspath, p.alias
- if alias != "." {
- continue
- }
- p := pcache[path].main
- if p == nil {
- continue
- }
- for _, d := range p.children {
- if ast.IsExported(d.name) {
- pkgscope.merge_decl(d)
- }
- }
- }
-}
-
-func fixup_packages(filescope *scope, pkgs []package_import, pcache package_cache) {
- for _, p := range pkgs {
- path, alias := p.abspath, p.alias
- if alias == "" {
- alias = pcache[path].defalias
- }
- // skip packages that will be merged to the package scope
- if alias == "." {
- continue
- }
- filescope.replace_decl(alias, pcache[path].main)
- }
-}
-
-func get_other_package_files(filename, packageName string, declcache *decl_cache) []*decl_file_cache {
- others := find_other_package_files(filename, packageName)
-
- ret := make([]*decl_file_cache, len(others))
- done := make(chan *decl_file_cache)
-
- for _, nm := range others {
- go func(name string) {
- defer func() {
- if err := recover(); err != nil {
- print_backtrace(err)
- done <- nil
- }
- }()
- done <- declcache.get_and_update(name)
- }(nm)
- }
-
- for i := range others {
- ret[i] = <-done
- if ret[i] == nil {
- panic("One of the decl cache updaters panicked")
- }
- }
-
- return ret
-}
-
-func find_other_package_files(filename, package_name string) []string {
- if filename == "" {
- return nil
- }
-
- dir, file := filepath.Split(filename)
- files_in_dir, err := readdir_lstat(dir)
- if err != nil {
- panic(err)
- }
-
- count := 0
- for _, stat := range files_in_dir {
- ok, _ := filepath.Match("*.go", stat.Name())
- if !ok || stat.Name() == file {
- continue
- }
- count++
- }
-
- out := make([]string, 0, count)
- for _, stat := range files_in_dir {
- const non_regular = os.ModeDir | os.ModeSymlink |
- os.ModeDevice | os.ModeNamedPipe | os.ModeSocket
-
- ok, _ := filepath.Match("*.go", stat.Name())
- if !ok || stat.Name() == file || stat.Mode()&non_regular != 0 {
- continue
- }
-
- abspath := filepath.Join(dir, stat.Name())
- if file_package_name(abspath) == package_name {
- n := len(out)
- out = out[:n+1]
- out[n] = abspath
- }
- }
-
- return out
-}
-
-func file_package_name(filename string) string {
- file, _ := parser.ParseFile(token.NewFileSet(), filename, nil, parser.PackageClauseOnly)
- return file.Name.Name
-}
-
-func make_decl_set_recursive(set map[string]*decl, scope *scope) {
- for name, ent := range scope.entities {
- if _, ok := set[name]; !ok {
- set[name] = ent
- }
- }
- if scope.parent != nil {
- make_decl_set_recursive(set, scope.parent)
- }
-}
-
-func check_func_field_list(f *ast.FieldList) bool {
- if f == nil {
- return true
- }
-
- for _, field := range f.List {
- if !check_type_expr(field.Type) {
- return false
- }
- }
- return true
-}
-
-// checks for a type expression correctness, it the type expression has
-// ast.BadExpr somewhere, returns false, otherwise true
-func check_type_expr(e ast.Expr) bool {
- switch t := e.(type) {
- case *ast.StarExpr:
- return check_type_expr(t.X)
- case *ast.ArrayType:
- return check_type_expr(t.Elt)
- case *ast.SelectorExpr:
- return check_type_expr(t.X)
- case *ast.FuncType:
- a := check_func_field_list(t.Params)
- b := check_func_field_list(t.Results)
- return a && b
- case *ast.MapType:
- a := check_type_expr(t.Key)
- b := check_type_expr(t.Value)
- return a && b
- case *ast.Ellipsis:
- return check_type_expr(t.Elt)
- case *ast.ChanType:
- return check_type_expr(t.Value)
- case *ast.BadExpr:
- return false
- default:
- return true
- }
-}
-
-//-------------------------------------------------------------------------
-// Status output
-//-------------------------------------------------------------------------
-
-type decl_slice []*decl
-
-func (s decl_slice) Less(i, j int) bool {
- if s[i].class != s[j].class {
- return s[i].name < s[j].name
- }
- return s[i].class < s[j].class
-}
-func (s decl_slice) Len() int { return len(s) }
-func (s decl_slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-const (
- color_red = "\033[0;31m"
- color_red_bold = "\033[1;31m"
- color_green = "\033[0;32m"
- color_green_bold = "\033[1;32m"
- color_yellow = "\033[0;33m"
- color_yellow_bold = "\033[1;33m"
- color_blue = "\033[0;34m"
- color_blue_bold = "\033[1;34m"
- color_magenta = "\033[0;35m"
- color_magenta_bold = "\033[1;35m"
- color_cyan = "\033[0;36m"
- color_cyan_bold = "\033[1;36m"
- color_white = "\033[0;37m"
- color_white_bold = "\033[1;37m"
- color_none = "\033[0m"
-)
-
-var g_decl_class_to_color = [...]string{
- decl_const: color_white_bold,
- decl_var: color_magenta,
- decl_type: color_cyan,
- decl_func: color_green,
- decl_package: color_red,
- decl_methods_stub: color_red,
-}
-
-var g_decl_class_to_string_status = [...]string{
- decl_const: " const",
- decl_var: " var",
- decl_type: " type",
- decl_func: " func",
- decl_package: "package",
- decl_methods_stub: " stub",
-}
-
-func (c *auto_complete_context) status() string {
-
- buf := bytes.NewBuffer(make([]byte, 0, 4096))
- fmt.Fprintf(buf, "Server's GOMAXPROCS == %d\n", runtime.GOMAXPROCS(0))
- fmt.Fprintf(buf, "\nPackage cache contains %d entries\n", len(c.pcache))
- fmt.Fprintf(buf, "\nListing these entries:\n")
- for _, mod := range c.pcache {
- fmt.Fprintf(buf, "\tname: %s (default alias: %s)\n", mod.name, mod.defalias)
- fmt.Fprintf(buf, "\timports %d declarations and %d packages\n", len(mod.main.children), len(mod.others))
- if mod.mtime == -1 {
- fmt.Fprintf(buf, "\tthis package stays in cache forever (built-in package)\n")
- } else {
- mtime := time.Unix(0, mod.mtime)
- fmt.Fprintf(buf, "\tlast modification time: %s\n", mtime)
- }
- fmt.Fprintf(buf, "\n")
- }
- if c.current.name != "" {
- fmt.Fprintf(buf, "Last edited file: %s (package: %s)\n", c.current.name, c.current.package_name)
- if len(c.others) > 0 {
- fmt.Fprintf(buf, "\nOther files from the current package:\n")
- }
- for _, f := range c.others {
- fmt.Fprintf(buf, "\t%s\n", f.name)
- }
- fmt.Fprintf(buf, "\nListing declarations from files:\n")
-
- const status_decls = "\t%s%s" + color_none + " " + color_yellow + "%s" + color_none + "\n"
- const status_decls_children = "\t%s%s" + color_none + " " + color_yellow + "%s" + color_none + " (%d)\n"
-
- fmt.Fprintf(buf, "\n%s:\n", c.current.name)
- ds := make(decl_slice, len(c.current.decls))
- i := 0
- for _, d := range c.current.decls {
- ds[i] = d
- i++
- }
- sort.Sort(ds)
- for _, d := range ds {
- if len(d.children) > 0 {
- fmt.Fprintf(buf, status_decls_children,
- g_decl_class_to_color[d.class],
- g_decl_class_to_string_status[d.class],
- d.name, len(d.children))
- } else {
- fmt.Fprintf(buf, status_decls,
- g_decl_class_to_color[d.class],
- g_decl_class_to_string_status[d.class],
- d.name)
- }
- }
-
- for _, f := range c.others {
- fmt.Fprintf(buf, "\n%s:\n", f.name)
- ds = make(decl_slice, len(f.decls))
- i = 0
- for _, d := range f.decls {
- ds[i] = d
- i++
- }
- sort.Sort(ds)
- for _, d := range ds {
- if len(d.children) > 0 {
- fmt.Fprintf(buf, status_decls_children,
- g_decl_class_to_color[d.class],
- g_decl_class_to_string_status[d.class],
- d.name, len(d.children))
- } else {
- fmt.Fprintf(buf, status_decls,
- g_decl_class_to_color[d.class],
- g_decl_class_to_string_status[d.class],
- d.name)
- }
- }
- }
- }
- return buf.String()
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/autocompletefile.go b/src/disposa.blue/margo/golang/internal/gocode/autocompletefile.go
deleted file mode 100644
index 6d47dcdb..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/autocompletefile.go
+++ /dev/null
@@ -1,420 +0,0 @@
-package gocode
-
-import (
- "bytes"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "log"
-)
-
-func parse_decl_list(fset *token.FileSet, data []byte) ([]ast.Decl, error) {
- var buf bytes.Buffer
- buf.WriteString("package p;")
- buf.Write(data)
- file, err := parser.ParseFile(fset, "", buf.Bytes(), parser.AllErrors)
- if err != nil {
- return file.Decls, err
- }
- return file.Decls, nil
-}
-
-func log_parse_error(intro string, err error) {
- if el, ok := err.(scanner.ErrorList); ok {
- log.Printf("%s:", intro)
- for _, er := range el {
- log.Printf(" %s", er)
- }
- } else {
- log.Printf("%s: %s", intro, err)
- }
-}
-
-//-------------------------------------------------------------------------
-// auto_complete_file
-//-------------------------------------------------------------------------
-
-type auto_complete_file struct {
- name string
- package_name string
-
- decls map[string]*decl
- packages []package_import
- filescope *scope
- scope *scope
-
- cursor int // for current file buffer only
- fset *token.FileSet
- context *package_lookup_context
-}
-
-func new_auto_complete_file(name string, context *package_lookup_context) *auto_complete_file {
- p := new(auto_complete_file)
- p.name = name
- p.cursor = -1
- p.fset = token.NewFileSet()
- p.context = context
- return p
-}
-
-func (f *auto_complete_file) offset(p token.Pos) int {
- const fixlen = len("package p;")
- return f.fset.Position(p).Offset - fixlen
-}
-
-// this one is used for current file buffer exclusively
-func (f *auto_complete_file) process_data(data []byte) {
- cur, filedata, block := rip_off_decl(data, f.cursor)
- file, err := parser.ParseFile(f.fset, "", filedata, parser.AllErrors)
- if err != nil && *g_debug {
- log_parse_error("Error parsing input file (outer block)", err)
- }
- f.package_name = package_name(file)
-
- f.decls = make(map[string]*decl)
- f.packages = collect_package_imports(f.name, file.Decls, f.context)
- f.filescope = new_scope(nil)
- f.scope = f.filescope
-
- for _, d := range file.Decls {
- anonymify_ast(d, 0, f.filescope)
- }
-
- // process all top-level declarations
- for _, decl := range file.Decls {
- append_to_top_decls(f.decls, decl, f.scope)
- }
- if block != nil {
- // process local function as top-level declaration
- decls, err := parse_decl_list(f.fset, block)
- if err != nil && *g_debug {
- log_parse_error("Error parsing input file (inner block)", err)
- }
-
- for _, d := range decls {
- anonymify_ast(d, 0, f.filescope)
- }
-
- for _, decl := range decls {
- append_to_top_decls(f.decls, decl, f.scope)
- }
-
- // process function internals
- f.cursor = cur
- for _, decl := range decls {
- f.process_decl_locals(decl)
- }
- }
-
-}
-
-func (f *auto_complete_file) process_decl_locals(decl ast.Decl) {
- switch t := decl.(type) {
- case *ast.FuncDecl:
- if f.cursor_in(t.Body) {
- s := f.scope
- f.scope = new_scope(f.scope)
-
- f.process_field_list(t.Recv, s)
- f.process_field_list(t.Type.Params, s)
- f.process_field_list(t.Type.Results, s)
- f.process_block_stmt(t.Body)
- }
- default:
- v := new(func_lit_visitor)
- v.ctx = f
- ast.Walk(v, decl)
- }
-}
-
-func (f *auto_complete_file) process_decl(decl ast.Decl) {
- if t, ok := decl.(*ast.GenDecl); ok && f.offset(t.TokPos) > f.cursor {
- return
- }
- prevscope := f.scope
- foreach_decl(decl, func(data *foreach_decl_struct) {
- class := ast_decl_class(data.decl)
- if class != decl_type {
- f.scope, prevscope = advance_scope(f.scope)
- }
- for i, name := range data.names {
- typ, v, vi := data.type_value_index(i)
-
- d := new_decl_full(name.Name, class, ast_decl_flags(data.decl), typ, v, vi, prevscope)
- if d == nil {
- return
- }
-
- f.scope.add_named_decl(d)
- }
- })
-}
-
-func (f *auto_complete_file) process_block_stmt(block *ast.BlockStmt) {
- if block != nil && f.cursor_in(block) {
- f.scope, _ = advance_scope(f.scope)
-
- for _, stmt := range block.List {
- f.process_stmt(stmt)
- }
-
- // hack to process all func literals
- v := new(func_lit_visitor)
- v.ctx = f
- ast.Walk(v, block)
- }
-}
-
-type func_lit_visitor struct {
- ctx *auto_complete_file
-}
-
-func (v *func_lit_visitor) Visit(node ast.Node) ast.Visitor {
- if t, ok := node.(*ast.FuncLit); ok && v.ctx.cursor_in(t.Body) {
- s := v.ctx.scope
- v.ctx.scope = new_scope(v.ctx.scope)
-
- v.ctx.process_field_list(t.Type.Params, s)
- v.ctx.process_field_list(t.Type.Results, s)
- v.ctx.process_block_stmt(t.Body)
-
- return nil
- }
- return v
-}
-
-func (f *auto_complete_file) process_stmt(stmt ast.Stmt) {
- switch t := stmt.(type) {
- case *ast.DeclStmt:
- f.process_decl(t.Decl)
- case *ast.AssignStmt:
- f.process_assign_stmt(t)
- case *ast.IfStmt:
- if f.cursor_in_if_head(t) {
- f.process_stmt(t.Init)
- } else if f.cursor_in_if_stmt(t) {
- f.scope, _ = advance_scope(f.scope)
- f.process_stmt(t.Init)
- f.process_block_stmt(t.Body)
- f.process_stmt(t.Else)
- }
- case *ast.BlockStmt:
- f.process_block_stmt(t)
- case *ast.RangeStmt:
- f.process_range_stmt(t)
- case *ast.ForStmt:
- if f.cursor_in_for_head(t) {
- f.process_stmt(t.Init)
- } else if f.cursor_in(t.Body) {
- f.scope, _ = advance_scope(f.scope)
-
- f.process_stmt(t.Init)
- f.process_block_stmt(t.Body)
- }
- case *ast.SwitchStmt:
- f.process_switch_stmt(t)
- case *ast.TypeSwitchStmt:
- f.process_type_switch_stmt(t)
- case *ast.SelectStmt:
- f.process_select_stmt(t)
- case *ast.LabeledStmt:
- f.process_stmt(t.Stmt)
- }
-}
-
-func (f *auto_complete_file) process_select_stmt(a *ast.SelectStmt) {
- if !f.cursor_in(a.Body) {
- return
- }
- var prevscope *scope
- f.scope, prevscope = advance_scope(f.scope)
-
- var last_cursor_after *ast.CommClause
- for _, s := range a.Body.List {
- if cc := s.(*ast.CommClause); f.cursor > f.offset(cc.Colon) {
- last_cursor_after = cc
- }
- }
-
- if last_cursor_after != nil {
- if last_cursor_after.Comm != nil {
- //if lastCursorAfter.Lhs != nil && lastCursorAfter.Tok == token.DEFINE {
- if astmt, ok := last_cursor_after.Comm.(*ast.AssignStmt); ok && astmt.Tok == token.DEFINE {
- vname := astmt.Lhs[0].(*ast.Ident).Name
- v := new_decl_var(vname, nil, astmt.Rhs[0], -1, prevscope)
- if v != nil {
- f.scope.add_named_decl(v)
- }
- }
- }
- for _, s := range last_cursor_after.Body {
- f.process_stmt(s)
- }
- }
-}
-
-func (f *auto_complete_file) process_type_switch_stmt(a *ast.TypeSwitchStmt) {
- if !f.cursor_in(a.Body) {
- return
- }
- var prevscope *scope
- f.scope, prevscope = advance_scope(f.scope)
-
- f.process_stmt(a.Init)
- // type var
- var tv *decl
- if a, ok := a.Assign.(*ast.AssignStmt); ok {
- lhs := a.Lhs
- rhs := a.Rhs
- if lhs != nil && len(lhs) == 1 {
- tvname := lhs[0].(*ast.Ident).Name
- tv = new_decl_var(tvname, nil, rhs[0], -1, prevscope)
- }
- }
-
- var last_cursor_after *ast.CaseClause
- for _, s := range a.Body.List {
- if cc := s.(*ast.CaseClause); f.cursor > f.offset(cc.Colon) {
- last_cursor_after = cc
- }
- }
-
- if last_cursor_after != nil {
- if tv != nil {
- if last_cursor_after.List != nil && len(last_cursor_after.List) == 1 {
- tv.typ = last_cursor_after.List[0]
- tv.value = nil
- }
- f.scope.add_named_decl(tv)
- }
- for _, s := range last_cursor_after.Body {
- f.process_stmt(s)
- }
- }
-}
-
-func (f *auto_complete_file) process_switch_stmt(a *ast.SwitchStmt) {
- if !f.cursor_in(a.Body) {
- return
- }
- f.scope, _ = advance_scope(f.scope)
-
- f.process_stmt(a.Init)
- var last_cursor_after *ast.CaseClause
- for _, s := range a.Body.List {
- if cc := s.(*ast.CaseClause); f.cursor > f.offset(cc.Colon) {
- last_cursor_after = cc
- }
- }
- if last_cursor_after != nil {
- for _, s := range last_cursor_after.Body {
- f.process_stmt(s)
- }
- }
-}
-
-func (f *auto_complete_file) process_range_stmt(a *ast.RangeStmt) {
- if !f.cursor_in(a.Body) {
- return
- }
- var prevscope *scope
- f.scope, prevscope = advance_scope(f.scope)
-
- if a.Tok == token.DEFINE {
- if t, ok := a.Key.(*ast.Ident); ok {
- d := new_decl_var(t.Name, nil, a.X, 0, prevscope)
- if d != nil {
- d.flags |= decl_rangevar
- f.scope.add_named_decl(d)
- }
- }
-
- if a.Value != nil {
- if t, ok := a.Value.(*ast.Ident); ok {
- d := new_decl_var(t.Name, nil, a.X, 1, prevscope)
- if d != nil {
- d.flags |= decl_rangevar
- f.scope.add_named_decl(d)
- }
- }
- }
- }
-
- f.process_block_stmt(a.Body)
-}
-
-func (f *auto_complete_file) process_assign_stmt(a *ast.AssignStmt) {
- if a.Tok != token.DEFINE || f.offset(a.TokPos) > f.cursor {
- return
- }
-
- names := make([]*ast.Ident, len(a.Lhs))
- for i, name := range a.Lhs {
- id, ok := name.(*ast.Ident)
- if !ok {
- // something is wrong, just ignore the whole stmt
- return
- }
- names[i] = id
- }
-
- var prevscope *scope
- f.scope, prevscope = advance_scope(f.scope)
-
- pack := decl_pack{names, nil, a.Rhs}
- for i, name := range pack.names {
- typ, v, vi := pack.type_value_index(i)
- d := new_decl_var(name.Name, typ, v, vi, prevscope)
- if d == nil {
- continue
- }
-
- f.scope.add_named_decl(d)
- }
-}
-
-func (f *auto_complete_file) process_field_list(field_list *ast.FieldList, s *scope) {
- if field_list != nil {
- decls := ast_field_list_to_decls(field_list, decl_var, 0, s, false)
- for _, d := range decls {
- f.scope.add_named_decl(d)
- }
- }
-}
-
-func (f *auto_complete_file) cursor_in_if_head(s *ast.IfStmt) bool {
- if f.cursor > f.offset(s.If) && f.cursor <= f.offset(s.Body.Lbrace) {
- return true
- }
- return false
-}
-
-func (f *auto_complete_file) cursor_in_if_stmt(s *ast.IfStmt) bool {
- if f.cursor > f.offset(s.If) {
- // magic -10 comes from auto_complete_file.offset method, see
- // len() expr in there
- if f.offset(s.End()) == -10 || f.cursor < f.offset(s.End()) {
- return true
- }
- }
- return false
-}
-
-func (f *auto_complete_file) cursor_in_for_head(s *ast.ForStmt) bool {
- if f.cursor > f.offset(s.For) && f.cursor <= f.offset(s.Body.Lbrace) {
- return true
- }
- return false
-}
-
-func (f *auto_complete_file) cursor_in(block *ast.BlockStmt) bool {
- if f.cursor == -1 || block == nil {
- return false
- }
-
- if f.cursor > f.offset(block.Lbrace) && f.cursor <= f.offset(block.Rbrace) {
- return true
- }
- return false
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/bridge._margo_.go b/src/disposa.blue/margo/golang/internal/gocode/bridge._margo_.go
deleted file mode 100644
index f000801e..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/bridge._margo_.go
+++ /dev/null
@@ -1,144 +0,0 @@
-//go:generate go run generate._margo_.go
-
-package gocode
-
-import (
- "fmt"
- "go/build"
- "os"
- "path/filepath"
- "runtime"
- "strings"
- "sync"
-)
-
-var Margo = newMargoState()
-
-func init() {
- g_daemon = &daemon{}
-}
-
-type MargoConfig struct {
- ProposeBuiltins bool
- InstallSuffix string
- GOROOT string
- GOPATHS []string
- Autobuild bool
- UnimportedPackages bool
-}
-
-type margoEnv struct {
- LibPath string
- GOOS string
- GOARCH string
- Compiler string
- GOROOT string
- GOPATH string
- InstallSuffix string
-}
-
-func (m *margoEnv) assignConfig(gc *config, p *package_lookup_context, mc MargoConfig) {
- gc.LibPath = m.LibPath
- gc.ProposeBuiltins = mc.ProposeBuiltins
- gc.Autobuild = mc.Autobuild
- gc.UnimportedPackages = mc.UnimportedPackages
- gc.Partials = false
- gc.IgnoreCase = true
-
- p.GOOS = m.GOOS
- p.GOARCH = m.GOARCH
- p.Compiler = m.Compiler
- p.GOROOT = m.GOROOT
- p.GOPATH = m.GOPATH
- p.InstallSuffix = m.InstallSuffix
-}
-
-type margoState struct {
- sync.Mutex
-
- ctx *auto_complete_context
- env *package_lookup_context
- pkgCache package_cache
- declCache *decl_cache
- prevEnv margoEnv
- prevPkg string
-}
-
-type MargoCandidate struct {
- candidate
-}
-
-func newMargoState() *margoState {
- env := &package_lookup_context{}
- pkgCache := new_package_cache()
- declCache := new_decl_cache(env)
- return &margoState{
- ctx: new_auto_complete_context(pkgCache, declCache),
- env: env,
- pkgCache: pkgCache,
- declCache: declCache,
- }
-}
-
-func (m *margoState) Complete(c MargoConfig, file []byte, filename string, cursor int) []MargoCandidate {
- m.Lock()
- defer m.Unlock()
-
- m.updateConfig(c, filename)
-
- list, _ := m.ctx.apropos(file, filename, cursor)
- candidates := make([]MargoCandidate, len(list))
- for i, c := range list {
- candidates[i] = MargoCandidate{candidate: c}
- }
- return candidates
-}
-
-func (m *margoState) updateConfig(c MargoConfig, filename string) {
- pl := []string{}
- osArch := runtime.GOOS + "_" + runtime.GOARCH
- if c.InstallSuffix != "" {
- osArch += "_" + c.InstallSuffix
- }
- add := func(p string) {
- if p != "" {
- pl = append(pl, filepath.Join(p, "pkg", osArch))
- }
- }
-
- add(c.GOROOT)
- for _, p := range c.GOPATHS {
- add(p)
- }
-
- sep := string(filepath.ListSeparator)
-
- nv := margoEnv{
- LibPath: strings.Join(pl, sep),
- GOOS: runtime.GOOS,
- GOARCH: runtime.GOARCH,
- Compiler: runtime.Compiler,
- GOROOT: c.GOROOT,
- GOPATH: strings.Join(c.GOPATHS, sep),
- InstallSuffix: c.InstallSuffix,
- }
- nv.assignConfig(&g_config, m.env, c)
- m.env.CurrentPackagePath = ""
- p, _ := m.env.ImportDir(filepath.Dir(filename), build.FindOnly)
- if p != nil {
- m.env.CurrentPackagePath = p.ImportPath
- }
- if m.prevPkg != m.env.CurrentPackagePath {
- m.prevPkg = m.env.CurrentPackagePath
- fmt.Fprintf(os.Stderr, "Gocode pkg: %#v\n", m.env.CurrentPackagePath)
- }
- if m.prevEnv != nv {
- m.prevEnv = nv
- fmt.Fprintf(os.Stderr, "Gocode env: %#v\n", nv)
- }
-
- g_daemon.autocomplete = m.ctx
- g_daemon.pkgcache = m.pkgCache
- g_daemon.declcache = m.declCache
- g_daemon.context = *m.env
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/client.go b/src/disposa.blue/margo/golang/internal/gocode/client.go
deleted file mode 100644
index 59d861a5..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/client.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package gocode
-
-import (
- "flag"
- "fmt"
- "go/build"
- "io/ioutil"
- "net/rpc"
- "os"
- "path/filepath"
- "strconv"
- "time"
-)
-
-func do_client() int {
- addr := *g_addr
- if *g_sock == "unix" {
- addr = get_socket_filename()
- }
-
- // client
- client, err := rpc.Dial(*g_sock, addr)
- if err != nil {
- if *g_sock == "unix" && file_exists(addr) {
- os.Remove(addr)
- }
-
- err = try_run_server()
- if err != nil {
- fmt.Printf("%s\n", err.Error())
- return 1
- }
- client, err = try_to_connect(*g_sock, addr)
- if err != nil {
- fmt.Printf("%s\n", err.Error())
- return 1
- }
- }
- defer client.Close()
-
- if flag.NArg() > 0 {
- switch flag.Arg(0) {
- case "autocomplete":
- cmd_auto_complete(client)
- case "close":
- cmd_close(client)
- case "status":
- cmd_status(client)
- case "drop-cache":
- cmd_drop_cache(client)
- case "set":
- cmd_set(client)
- case "options":
- cmd_options(client)
- default:
- fmt.Printf("unknown argument: %q, try running \"gocode -h\"\n", flag.Arg(0))
- return 1
- }
- }
- return 0
-}
-
-func try_run_server() error {
- path := get_executable_filename()
- args := []string{os.Args[0], "-s", "-sock", *g_sock, "-addr", *g_addr}
- cwd, _ := os.Getwd()
-
- var err error
- stdin, err := os.Open(os.DevNull)
- if err != nil {
- return err
- }
- stdout, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
- if err != nil {
- return err
- }
- stderr, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
- if err != nil {
- return err
- }
-
- procattr := os.ProcAttr{Dir: cwd, Env: os.Environ(), Files: []*os.File{stdin, stdout, stderr}}
- p, err := os.StartProcess(path, args, &procattr)
- if err != nil {
- return err
- }
-
- return p.Release()
-}
-
-func try_to_connect(network, address string) (client *rpc.Client, err error) {
- t := 0
- for {
- client, err = rpc.Dial(network, address)
- if err != nil && t < 1000 {
- time.Sleep(10 * time.Millisecond)
- t += 10
- continue
- }
- break
- }
-
- return
-}
-
-func prepare_file_filename_cursor() ([]byte, string, int) {
- var file []byte
- var err error
-
- if *g_input != "" {
- file, err = ioutil.ReadFile(*g_input)
- } else {
- file, err = ioutil.ReadAll(os.Stdin)
- }
-
- if err != nil {
- panic(err.Error())
- }
-
- var skipped int
- file, skipped = filter_out_shebang(file)
-
- filename := *g_input
- cursor := -1
-
- offset := ""
- switch flag.NArg() {
- case 2:
- offset = flag.Arg(1)
- case 3:
- filename = flag.Arg(1) // Override default filename
- offset = flag.Arg(2)
- }
-
- if offset != "" {
- if offset[0] == 'c' || offset[0] == 'C' {
- cursor, _ = strconv.Atoi(offset[1:])
- cursor = char_to_byte_offset(file, cursor)
- } else {
- cursor, _ = strconv.Atoi(offset)
- }
- }
-
- cursor -= skipped
- if filename != "" && !filepath.IsAbs(filename) {
- cwd, _ := os.Getwd()
- filename = filepath.Join(cwd, filename)
- }
- return file, filename, cursor
-}
-
-//-------------------------------------------------------------------------
-// commands
-//-------------------------------------------------------------------------
-
-func cmd_status(c *rpc.Client) {
- fmt.Printf("%s\n", client_status(c, 0))
-}
-
-func cmd_auto_complete(c *rpc.Client) {
- context := pack_build_context(&build.Default)
- file, filename, cursor := prepare_file_filename_cursor()
- f := get_formatter(*g_format)
- f.write_candidates(client_auto_complete(c, file, filename, cursor, context))
-}
-
-func cmd_close(c *rpc.Client) {
- client_close(c, 0)
-}
-
-func cmd_drop_cache(c *rpc.Client) {
- client_drop_cache(c, 0)
-}
-
-func cmd_set(c *rpc.Client) {
- switch flag.NArg() {
- case 1:
- fmt.Print(client_set(c, "\x00", "\x00"))
- case 2:
- fmt.Print(client_set(c, flag.Arg(1), "\x00"))
- case 3:
- fmt.Print(client_set(c, flag.Arg(1), flag.Arg(2)))
- }
-}
-
-func cmd_options(c *rpc.Client) {
- fmt.Print(client_options(c, 0))
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/config.go b/src/disposa.blue/margo/golang/internal/gocode/config.go
deleted file mode 100644
index 63ed558c..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/config.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package gocode
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "reflect"
- "regexp"
- "strconv"
-)
-
-//-------------------------------------------------------------------------
-// config
-//
-// Structure represents persistent config storage of the gocode daemon. Usually
-// the config is located somewhere in ~/.config/gocode directory.
-//-------------------------------------------------------------------------
-
-type config struct {
- ProposeBuiltins bool `json:"propose-builtins"`
- LibPath string `json:"lib-path"`
- CustomPkgPrefix string `json:"custom-pkg-prefix"`
- CustomVendorDir string `json:"custom-vendor-dir"`
- Autobuild bool `json:"autobuild"`
- ForceDebugOutput string `json:"force-debug-output"`
- PackageLookupMode string `json:"package-lookup-mode"`
- CloseTimeout int `json:"close-timeout"`
- UnimportedPackages bool `json:"unimported-packages"`
- Partials bool `json:"partials"`
- IgnoreCase bool `json:"ignore-case"`
- ClassFiltering bool `json:"class-filtering"`
-}
-
-var g_config_desc = map[string]string{
- "propose-builtins": "If set to {true}, gocode will add built-in types, functions and constants to autocompletion proposals.",
- "lib-path": "A string option. Allows you to add search paths for packages. By default, gocode only searches {$GOPATH/pkg/$GOOS_$GOARCH} and {$GOROOT/pkg/$GOOS_$GOARCH} in terms of previously existed environment variables. Also you can specify multiple paths using ':' (colon) as a separator (on Windows use semicolon ';'). The paths specified by {lib-path} are prepended to the default ones.",
- "custom-pkg-prefix": "",
- "custom-vendor-dir": "",
- "autobuild": "If set to {true}, gocode will try to automatically build out-of-date packages when their source files are modified, in order to obtain the freshest autocomplete results for them. This feature is experimental.",
- "force-debug-output": "If is not empty, gocode will forcefully redirect the logging into that file. Also forces enabling of the debug mode on the server side.",
- "package-lookup-mode": "If set to {go}, use standard Go package lookup rules. If set to {gb}, use gb-specific lookup rules. See {https://github.com/constabulary/gb} for details.",
- "close-timeout": "If there have been no completion requests after this number of seconds, the gocode process will terminate. Default is 30 minutes.",
- "unimported-packages": "If set to {true}, gocode will try to import certain known packages automatically for identifiers which cannot be resolved otherwise. Currently only a limited set of standard library packages is supported.",
- "partials": "If set to {false}, gocode will not filter autocompletion results based on entered prefix before the cursor. Instead it will return all available autocompletion results viable for a given context. Whether this option is set to {true} or {false}, gocode will return a valid prefix length for output formats which support it. Setting this option to a non-default value may result in editor misbehaviour.",
- "ignore-case": "If set to {true}, gocode will perform case-insensitive matching when doing prefix-based filtering.",
- "class-filtering": "Enables or disables gocode's feature where it performs class-based filtering if partial input matches corresponding class keyword: const, var, type, func, package.",
-}
-
-var g_default_config = config{
- ProposeBuiltins: false,
- LibPath: "",
- CustomPkgPrefix: "",
- Autobuild: false,
- ForceDebugOutput: "",
- PackageLookupMode: "go",
- CloseTimeout: 1800,
- UnimportedPackages: false,
- Partials: true,
- IgnoreCase: false,
- ClassFiltering: true,
-}
-var g_config = g_default_config
-
-var g_string_to_bool = map[string]bool{
- "t": true,
- "true": true,
- "y": true,
- "yes": true,
- "on": true,
- "1": true,
- "f": false,
- "false": false,
- "n": false,
- "no": false,
- "off": false,
- "0": false,
-}
-
-func set_value(v reflect.Value, value string) {
- switch t := v; t.Kind() {
- case reflect.Bool:
- v, ok := g_string_to_bool[value]
- if ok {
- t.SetBool(v)
- }
- case reflect.String:
- t.SetString(value)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- v, err := strconv.ParseInt(value, 10, 64)
- if err == nil {
- t.SetInt(v)
- }
- case reflect.Float32, reflect.Float64:
- v, err := strconv.ParseFloat(value, 64)
- if err == nil {
- t.SetFloat(v)
- }
- }
-}
-
-func list_value(v reflect.Value, name string, w io.Writer) {
- switch t := v; t.Kind() {
- case reflect.Bool:
- fmt.Fprintf(w, "%s %v\n", name, t.Bool())
- case reflect.String:
- fmt.Fprintf(w, "%s \"%v\"\n", name, t.String())
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- fmt.Fprintf(w, "%s %v\n", name, t.Int())
- case reflect.Float32, reflect.Float64:
- fmt.Fprintf(w, "%s %v\n", name, t.Float())
- }
-}
-
-func (this *config) list() string {
- str, typ := this.value_and_type()
- buf := bytes.NewBuffer(make([]byte, 0, 256))
- for i := 0; i < str.NumField(); i++ {
- v := str.Field(i)
- name := typ.Field(i).Tag.Get("json")
- list_value(v, name, buf)
- }
- return buf.String()
-}
-
-func (this *config) list_option(name string) string {
- str, typ := this.value_and_type()
- buf := bytes.NewBuffer(make([]byte, 0, 256))
- for i := 0; i < str.NumField(); i++ {
- v := str.Field(i)
- nm := typ.Field(i).Tag.Get("json")
- if nm == name {
- list_value(v, name, buf)
- }
- }
- return buf.String()
-}
-
-func (this *config) set_option(name, value string) string {
- str, typ := this.value_and_type()
- buf := bytes.NewBuffer(make([]byte, 0, 256))
- for i := 0; i < str.NumField(); i++ {
- v := str.Field(i)
- nm := typ.Field(i).Tag.Get("json")
- if nm == name {
- set_value(v, value)
- list_value(v, name, buf)
- }
- }
- this.write()
- return buf.String()
-
-}
-
-func (this *config) value_and_type() (reflect.Value, reflect.Type) {
- v := reflect.ValueOf(this).Elem()
- return v, v.Type()
-}
-
-func (this *config) write() error {
- data, err := json.Marshal(this)
- if err != nil {
- return err
- }
-
- // make sure config dir exists
- dir := config_dir()
- if !file_exists(dir) {
- os.MkdirAll(dir, 0755)
- }
-
- f, err := os.Create(config_file())
- if err != nil {
- return err
- }
- defer f.Close()
-
- _, err = f.Write(data)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (this *config) read() error {
- data, err := ioutil.ReadFile(config_file())
- if err != nil {
- return err
- }
-
- err = json.Unmarshal(data, this)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func quoted(v interface{}) string {
- switch v.(type) {
- case string:
- return fmt.Sprintf("%q", v)
- case int:
- return fmt.Sprint(v)
- case bool:
- return fmt.Sprint(v)
- default:
- panic("unreachable")
- }
-}
-
-var descRE = regexp.MustCompile(`{[^}]+}`)
-
-func preprocess_desc(v string) string {
- return descRE.ReplaceAllStringFunc(v, func(v string) string {
- return color_cyan + v[1:len(v)-1] + color_none
- })
-}
-
-func (this *config) options() string {
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "%sConfig file location%s: %s\n", color_white_bold, color_none, config_file())
- dv := reflect.ValueOf(g_default_config)
- v, t := this.value_and_type()
- for i, n := 0, t.NumField(); i < n; i++ {
- f := t.Field(i)
- index := f.Index
- tag := f.Tag.Get("json")
- fmt.Fprintf(&buf, "\n%s%s%s\n", color_yellow_bold, tag, color_none)
- fmt.Fprintf(&buf, "%stype%s: %s\n", color_yellow, color_none, f.Type)
- fmt.Fprintf(&buf, "%svalue%s: %s\n", color_yellow, color_none, quoted(v.FieldByIndex(index).Interface()))
- fmt.Fprintf(&buf, "%sdefault%s: %s\n", color_yellow, color_none, quoted(dv.FieldByIndex(index).Interface()))
- fmt.Fprintf(&buf, "%sdescription%s: %s\n", color_yellow, color_none, preprocess_desc(g_config_desc[tag]))
- }
-
- return buf.String()
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/cursorcontext.go b/src/disposa.blue/margo/golang/internal/gocode/cursorcontext.go
deleted file mode 100644
index 6a90d0dc..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/cursorcontext.go
+++ /dev/null
@@ -1,582 +0,0 @@
-package gocode
-
-import (
- "bytes"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "log"
-)
-
-type cursor_context struct {
- decl *decl
- partial string
- struct_field bool
- decl_import bool
-
- // store expression that was supposed to be deduced to "decl", however
- // if decl is nil, then deduction failed, we could try to resolve it to
- // unimported package instead
- expr ast.Expr
-}
-
-type token_iterator struct {
- tokens []token_item
- token_index int
-}
-
-type token_item struct {
- off int
- tok token.Token
- lit string
-}
-
-func (i token_item) literal() string {
- if i.tok.IsLiteral() {
- return i.lit
- }
- return i.tok.String()
-}
-
-func new_token_iterator(src []byte, cursor int) token_iterator {
- tokens := make([]token_item, 0, 1000)
- var s scanner.Scanner
- fset := token.NewFileSet()
- file := fset.AddFile("", fset.Base(), len(src))
- s.Init(file, src, nil, 0)
- for {
- pos, tok, lit := s.Scan()
- off := fset.Position(pos).Offset
- if tok == token.EOF || cursor <= off {
- break
- }
- tokens = append(tokens, token_item{
- off: off,
- tok: tok,
- lit: lit,
- })
- }
- return token_iterator{
- tokens: tokens,
- token_index: len(tokens) - 1,
- }
-}
-
-func (this *token_iterator) token() token_item {
- return this.tokens[this.token_index]
-}
-
-func (this *token_iterator) go_back() bool {
- if this.token_index <= 0 {
- return false
- }
- this.token_index--
- return true
-}
-
-var bracket_pairs_map = map[token.Token]token.Token{
- token.RPAREN: token.LPAREN,
- token.RBRACK: token.LBRACK,
- token.RBRACE: token.LBRACE,
-}
-
-func (ti *token_iterator) skip_to_left(left, right token.Token) bool {
- if ti.token().tok == left {
- return true
- }
- balance := 1
- for balance != 0 {
- if !ti.go_back() {
- return false
- }
- switch ti.token().tok {
- case right:
- balance++
- case left:
- balance--
- }
- }
- return true
-}
-
-// when the cursor is at the ')' or ']' or '}', move the cursor to an opposite
-// bracket pair, this functions takes nested bracket pairs into account
-func (this *token_iterator) skip_to_balanced_pair() bool {
- right := this.token().tok
- left := bracket_pairs_map[right]
- return this.skip_to_left(left, right)
-}
-
-// Move the cursor to the open brace of the current block, taking nested blocks
-// into account.
-func (this *token_iterator) skip_to_left_curly() bool {
- return this.skip_to_left(token.LBRACE, token.RBRACE)
-}
-
-func (ti *token_iterator) extract_type_alike() string {
- if ti.token().tok != token.IDENT { // not Foo, return nothing
- return ""
- }
- b := ti.token().literal()
- if !ti.go_back() { // just Foo
- return b
- }
- if ti.token().tok != token.PERIOD { // not .Foo, return Foo
- return b
- }
- if !ti.go_back() { // just .Foo, return Foo (best choice recovery)
- return b
- }
- if ti.token().tok != token.IDENT { // not lib.Foo, return Foo
- return b
- }
- out := ti.token().literal() + "." + b // lib.Foo
- ti.go_back()
- return out
-}
-
-// Extract the type expression right before the enclosing curly bracket block.
-// Examples (# - the cursor):
-// &lib.Struct{Whatever: 1, Hel#} // returns "lib.Struct"
-// X{#} // returns X
-// The idea is that we check if this type expression is a type and it is, we
-// can apply special filtering for autocompletion results.
-// Sadly, this doesn't cover anonymous structs.
-func (ti *token_iterator) extract_struct_type() string {
- if !ti.skip_to_left_curly() {
- return ""
- }
- if !ti.go_back() {
- return ""
- }
- if ti.token().tok == token.LBRACE { // Foo{#{}}
- if !ti.go_back() {
- return ""
- }
- } else if ti.token().tok == token.COMMA { // Foo{abc,#{}}
- return ti.extract_struct_type()
- }
- typ := ti.extract_type_alike()
- if typ == "" {
- return ""
- }
- if ti.token().tok == token.RPAREN || ti.token().tok == token.MUL {
- return ""
- }
- return typ
-}
-
-// Starting from the token under the cursor move back and extract something
-// that resembles a valid Go primary expression. Examples of primary expressions
-// from Go spec:
-// x
-// 2
-// (s + ".txt")
-// f(3.1415, true)
-// Point{1, 2}
-// m["foo"]
-// s[i : j + 1]
-// obj.color
-// f.p[i].x()
-//
-// As you can see we can move through all of them using balanced bracket
-// matching and applying simple rules
-// E.g.
-// Point{1, 2}.m["foo"].s[i : j + 1].MethodCall(a, func(a, b int) int { return a + b }).
-// Can be seen as:
-// Point{ }.m[ ].s[ ].MethodCall( ).
-// Which boils the rules down to these connected via dots:
-// ident
-// ident[]
-// ident{}
-// ident()
-// Of course there are also slightly more complicated rules for brackets:
-// ident{}.ident()[5][4](), etc.
-func (this *token_iterator) extract_go_expr() string {
- orig := this.token_index
-
- // Contains the type of the previously scanned token (initialized with
- // the token right under the cursor). This is the token to the *right* of
- // the current one.
- prev := this.token().tok
-loop:
- for {
- if !this.go_back() {
- return token_items_to_string(this.tokens[:orig])
- }
- switch this.token().tok {
- case token.PERIOD:
- // If the '.' is not followed by IDENT, it's invalid.
- if prev != token.IDENT {
- break loop
- }
- case token.IDENT:
- // Valid tokens after IDENT are '.', '[', '{' and '('.
- switch prev {
- case token.PERIOD, token.LBRACK, token.LBRACE, token.LPAREN:
- // all ok
- default:
- break loop
- }
- case token.RBRACE:
- // This one can only be a part of type initialization, like:
- // Dummy{}.Hello()
- // It is valid Go if Hello method is defined on a non-pointer receiver.
- if prev != token.PERIOD {
- break loop
- }
- this.skip_to_balanced_pair()
- case token.RPAREN, token.RBRACK:
- // After ']' and ')' their opening counterparts are valid '[', '(',
- // as well as the dot.
- switch prev {
- case token.PERIOD, token.LBRACK, token.LPAREN:
- // all ok
- default:
- break loop
- }
- this.skip_to_balanced_pair()
- default:
- break loop
- }
- prev = this.token().tok
- }
- expr := token_items_to_string(this.tokens[this.token_index+1 : orig])
- if *g_debug {
- log.Printf("extracted expression tokens: %s", expr)
- }
- return expr
-}
-
-// Given a slice of token_item, reassembles them into the original literal
-// expression.
-func token_items_to_string(tokens []token_item) string {
- var buf bytes.Buffer
- for _, t := range tokens {
- buf.WriteString(t.literal())
- }
- return buf.String()
-}
-
-// this function is called when the cursor is at the '.' and you need to get the
-// declaration before that dot
-func (c *auto_complete_context) deduce_cursor_decl(iter *token_iterator) (*decl, ast.Expr) {
- expr, err := parser.ParseExpr(iter.extract_go_expr())
- if err != nil {
- return nil, nil
- }
- return expr_to_decl(expr, c.current.scope), expr
-}
-
-// try to find and extract the surrounding struct literal type
-func (c *auto_complete_context) deduce_struct_type_decl(iter *token_iterator) *decl {
- typ := iter.extract_struct_type()
- if typ == "" {
- return nil
- }
-
- expr, err := parser.ParseExpr(typ)
- if err != nil {
- return nil
- }
- decl := type_to_decl(expr, c.current.scope)
- if decl == nil {
- return nil
- }
-
- // we allow only struct types here, but also support type aliases
- if decl.is_alias() {
- dd := decl.type_dealias()
- if _, ok := dd.typ.(*ast.StructType); !ok {
- return nil
- }
- } else if _, ok := decl.typ.(*ast.StructType); !ok {
- return nil
- }
- return decl
-}
-
-// Entry point from autocompletion, the function looks at text before the cursor
-// and figures out the declaration the cursor is on. This declaration is
-// used in filtering the resulting set of autocompletion suggestions.
-func (c *auto_complete_context) deduce_cursor_context(file []byte, cursor int) (cursor_context, bool) {
- if cursor <= 0 {
- return cursor_context{}, true
- }
-
- iter := new_token_iterator(file, cursor)
- if len(iter.tokens) == 0 {
- return cursor_context{}, false
- }
-
- // figure out what is just before the cursor
- switch tok := iter.token(); tok.tok {
- case token.STRING:
- // make sure cursor is inside the string
- s := tok.literal()
- if len(s) > 1 && s[len(s)-1] == '"' && tok.off+len(s) <= cursor {
- return cursor_context{}, true
- }
- // now figure out if inside an import declaration
- var ptok = token.STRING
- for iter.go_back() {
- itok := iter.token().tok
- switch itok {
- case token.STRING:
- switch ptok {
- case token.SEMICOLON, token.IDENT, token.PERIOD:
- default:
- return cursor_context{}, true
- }
- case token.LPAREN, token.SEMICOLON:
- switch ptok {
- case token.STRING, token.IDENT, token.PERIOD:
- default:
- return cursor_context{}, true
- }
- case token.IDENT, token.PERIOD:
- switch ptok {
- case token.STRING:
- default:
- return cursor_context{}, true
- }
- case token.IMPORT:
- switch ptok {
- case token.STRING, token.IDENT, token.PERIOD, token.LPAREN:
- path_len := cursor - tok.off
- path := s[1:path_len]
- return cursor_context{decl_import: true, partial: path}, true
- default:
- return cursor_context{}, true
- }
- default:
- return cursor_context{}, true
- }
- ptok = itok
- }
- case token.PERIOD:
- // we're '.'
- // figure out decl, Partial is ""
- decl, expr := c.deduce_cursor_decl(&iter)
- return cursor_context{decl: decl, expr: expr}, decl != nil
- case token.IDENT, token.TYPE, token.CONST, token.VAR, token.FUNC, token.PACKAGE:
- // we're '.'
- // parse as Partial and figure out decl
- var partial string
- if tok.tok == token.IDENT {
- // Calculate the offset of the cursor position within the identifier.
- // For instance, if we are 'ab#c', we want partial_len = 2 and partial = ab.
- partial_len := cursor - tok.off
-
- // If it happens that the cursor is past the end of the literal,
- // means there is a space between the literal and the cursor, think
- // of it as no context, because that's what it really is.
- if partial_len > len(tok.literal()) {
- return cursor_context{}, true
- }
- partial = tok.literal()[0:partial_len]
- } else {
- // Do not try to truncate if it is not an identifier.
- partial = tok.literal()
- }
-
- iter.go_back()
- switch iter.token().tok {
- case token.PERIOD:
- decl, expr := c.deduce_cursor_decl(&iter)
- return cursor_context{decl: decl, partial: partial, expr: expr}, decl != nil
- case token.COMMA, token.LBRACE:
- // This can happen for struct fields:
- // &Struct{Hello: 1, Wor#} // (# - the cursor)
- // Let's try to find the struct type
- decl := c.deduce_struct_type_decl(&iter)
- return cursor_context{
- decl: decl,
- partial: partial,
- struct_field: decl != nil,
- }, true
- default:
- return cursor_context{partial: partial}, true
- }
- case token.COMMA, token.LBRACE:
- // Try to parse the current expression as a structure initialization.
- decl := c.deduce_struct_type_decl(&iter)
- return cursor_context{
- decl: decl,
- partial: "",
- struct_field: decl != nil,
- }, true
- }
-
- return cursor_context{}, true
-}
-
-// Decl deduction failed, but we're on ".", this ident can be an
-// unexported package, let's try to match the ident against a set of known
-// packages and if it matches try to import it.
-// TODO: Right now I've made a static list of built-in packages, but in theory
-// we could scan all GOPATH packages as well. Now, don't forget that default
-// package name has nothing to do with package file name, that's why we need to
-// scan the packages. And many of them will have conflicts. Can we make a smart
-// prediction algorithm which will prefer certain packages over another ones?
-func resolveKnownPackageIdent(ident string, filename string, context *package_lookup_context) *package_file_cache {
- importPath, ok := knownPackageIdents[ident]
- if !ok {
- return nil
- }
-
- path, ok := abs_path_for_package(filename, importPath, context)
- if !ok {
- return nil
- }
-
- p := new_package_file_cache(path, importPath)
- p.update_cache()
- return p
-}
-
-var knownPackageIdents = map[string]string{
- "adler32": "hash/adler32",
- "aes": "crypto/aes",
- "ascii85": "encoding/ascii85",
- "asn1": "encoding/asn1",
- "ast": "go/ast",
- "atomic": "sync/atomic",
- "base32": "encoding/base32",
- "base64": "encoding/base64",
- "big": "math/big",
- "binary": "encoding/binary",
- "bufio": "bufio",
- "build": "go/build",
- "bytes": "bytes",
- "bzip2": "compress/bzip2",
- "cgi": "net/http/cgi",
- "cgo": "runtime/cgo",
- "cipher": "crypto/cipher",
- "cmplx": "math/cmplx",
- "color": "image/color",
- "constant": "go/constant",
- "context": "context",
- "cookiejar": "net/http/cookiejar",
- "crc32": "hash/crc32",
- "crc64": "hash/crc64",
- "crypto": "crypto",
- "csv": "encoding/csv",
- "debug": "runtime/debug",
- "des": "crypto/des",
- "doc": "go/doc",
- "draw": "image/draw",
- "driver": "database/sql/driver",
- "dsa": "crypto/dsa",
- "dwarf": "debug/dwarf",
- "ecdsa": "crypto/ecdsa",
- "elf": "debug/elf",
- "elliptic": "crypto/elliptic",
- "encoding": "encoding",
- "errors": "errors",
- "exec": "os/exec",
- "expvar": "expvar",
- "fcgi": "net/http/fcgi",
- "filepath": "path/filepath",
- "flag": "flag",
- "flate": "compress/flate",
- "fmt": "fmt",
- "fnv": "hash/fnv",
- "format": "go/format",
- "gif": "image/gif",
- "gob": "encoding/gob",
- "gosym": "debug/gosym",
- "gzip": "compress/gzip",
- "hash": "hash",
- "heap": "container/heap",
- "hex": "encoding/hex",
- "hmac": "crypto/hmac",
- "hpack": "vendor/golang_org/x/net/http2/hpack",
- "html": "html",
- "http": "net/http",
- "httplex": "vendor/golang_org/x/net/lex/httplex",
- "httptest": "net/http/httptest",
- "httptrace": "net/http/httptrace",
- "httputil": "net/http/httputil",
- "image": "image",
- "importer": "go/importer",
- "io": "io",
- "iotest": "testing/iotest",
- "ioutil": "io/ioutil",
- "jpeg": "image/jpeg",
- "json": "encoding/json",
- "jsonrpc": "net/rpc/jsonrpc",
- "list": "container/list",
- "log": "log",
- "lzw": "compress/lzw",
- "macho": "debug/macho",
- "mail": "net/mail",
- "math": "math",
- "md5": "crypto/md5",
- "mime": "mime",
- "multipart": "mime/multipart",
- "net": "net",
- "os": "os",
- "palette": "image/color/palette",
- "parse": "text/template/parse",
- "parser": "go/parser",
- "path": "path",
- "pe": "debug/pe",
- "pem": "encoding/pem",
- "pkix": "crypto/x509/pkix",
- "plan9obj": "debug/plan9obj",
- "png": "image/png",
- "pprof": "net/http/pprof",
- "printer": "go/printer",
- "quick": "testing/quick",
- "quotedprintable": "mime/quotedprintable",
- "race": "runtime/race",
- "rand": "math/rand",
- "rc4": "crypto/rc4",
- "reflect": "reflect",
- "regexp": "regexp",
- "ring": "container/ring",
- "rpc": "net/rpc",
- "rsa": "crypto/rsa",
- "runtime": "runtime",
- "scanner": "text/scanner",
- "sha1": "crypto/sha1",
- "sha256": "crypto/sha256",
- "sha512": "crypto/sha512",
- "signal": "os/signal",
- "smtp": "net/smtp",
- "sort": "sort",
- "sql": "database/sql",
- "strconv": "strconv",
- "strings": "strings",
- "subtle": "crypto/subtle",
- "suffixarray": "index/suffixarray",
- "sync": "sync",
- "syntax": "regexp/syntax",
- "syscall": "syscall",
- "syslog": "log/syslog",
- "tabwriter": "text/tabwriter",
- "tar": "archive/tar",
- "template": "html/template",
- "testing": "testing",
- "textproto": "net/textproto",
- "time": "time",
- "tls": "crypto/tls",
- "token": "go/token",
- "trace": "runtime/trace",
- "types": "go/types",
- "unicode": "unicode",
- "url": "net/url",
- "user": "os/user",
- "utf16": "unicode/utf16",
- "utf8": "unicode/utf8",
- "x509": "crypto/x509",
- "xml": "encoding/xml",
- "zip": "archive/zip",
- "zlib": "compress/zlib",
- //"scanner": "go/scanner", // DUP: prefer text/scanner
- //"template": "text/template", // DUP: prefer html/template
- //"pprof": "runtime/pprof", // DUP: prefer net/http/pprof
- //"rand": "crypto/rand", // DUP: prefer math/rand
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/decl.go b/src/disposa.blue/margo/golang/internal/gocode/decl.go
deleted file mode 100644
index 228f63cd..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/decl.go
+++ /dev/null
@@ -1,1485 +0,0 @@
-package gocode
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/token"
- "io"
- "reflect"
- "strings"
- "sync"
-)
-
-// decl.class
-type decl_class int16
-
-const (
- decl_invalid = decl_class(-1 + iota)
-
- // these are in a sorted order
- decl_const
- decl_func
- decl_import
- decl_package
- decl_type
- decl_var
-
- // this one serves as a temporary type for those methods that were
- // declared before their actual owner
- decl_methods_stub
-)
-
-func (this decl_class) String() string {
- switch this {
- case decl_invalid:
- return "PANIC"
- case decl_const:
- return "const"
- case decl_func:
- return "func"
- case decl_import:
- return "import"
- case decl_package:
- return "package"
- case decl_type:
- return "type"
- case decl_var:
- return "var"
- case decl_methods_stub:
- return "IF YOU SEE THIS, REPORT A BUG" // :D
- }
- panic("unreachable")
-}
-
-// decl.flags
-type decl_flags int16
-
-const (
- decl_foreign decl_flags = 1 << iota // imported from another package
-
- // means that the decl is a part of the range statement
- // its type is inferred in a special way
- decl_rangevar
-
- // decl of decl_type class is a type alias
- decl_alias
-
- // for preventing infinite recursions and loops in type inference code
- decl_visited
-)
-
-//-------------------------------------------------------------------------
-// decl
-//
-// The most important data structure of the whole gocode project. It
-// describes a single declaration and its children.
-//-------------------------------------------------------------------------
-
-type decl struct {
- // Name starts with '$' if the declaration describes an anonymous type.
- // '$s_%d' for anonymous struct types
- // '$i_%d' for anonymous interface types
- name string
- typ ast.Expr
- class decl_class
- flags decl_flags
-
- // functions for interface type, fields+methods for struct type
- children map[string]*decl
-
- // embedded types
- embedded []ast.Expr
-
- // if the type is unknown at AST building time, I'm using these
- value ast.Expr
-
- // if it's a multiassignment and the Value is a CallExpr, it is being set
- // to an index into the return value tuple, otherwise it's a -1
- value_index int
-
- // scope where this Decl was declared in (not its visibilty scope!)
- // Decl uses it for type inference
- scope *scope
-}
-
-func ast_decl_type(d ast.Decl) ast.Expr {
- switch t := d.(type) {
- case *ast.GenDecl:
- switch t.Tok {
- case token.CONST, token.VAR:
- c := t.Specs[0].(*ast.ValueSpec)
- return c.Type
- case token.TYPE:
- t := t.Specs[0].(*ast.TypeSpec)
- return t.Type
- }
- case *ast.FuncDecl:
- return t.Type
- }
- panic("unreachable")
-}
-
-func ast_decl_flags(d ast.Decl) decl_flags {
- switch t := d.(type) {
- case *ast.GenDecl:
- switch t.Tok {
- case token.TYPE:
- if isAliasTypeSpec(t.Specs[0].(*ast.TypeSpec)) {
- return decl_alias
- }
- }
- }
- return 0
-}
-
-func ast_decl_class(d ast.Decl) decl_class {
- switch t := d.(type) {
- case *ast.GenDecl:
- switch t.Tok {
- case token.VAR:
- return decl_var
- case token.CONST:
- return decl_const
- case token.TYPE:
- return decl_type
- }
- case *ast.FuncDecl:
- return decl_func
- }
- panic("unreachable")
-}
-
-func ast_decl_convertable(d ast.Decl) bool {
- switch t := d.(type) {
- case *ast.GenDecl:
- switch t.Tok {
- case token.VAR, token.CONST, token.TYPE:
- return true
- }
- case *ast.FuncDecl:
- return true
- }
- return false
-}
-
-func ast_field_list_to_decls(f *ast.FieldList, class decl_class, flags decl_flags, scope *scope, add_anonymous bool) map[string]*decl {
- count := 0
- for _, field := range f.List {
- count += len(field.Names)
- }
-
- decls := make(map[string]*decl, count)
- for _, field := range f.List {
- for _, name := range field.Names {
- if flags&decl_foreign != 0 && !ast.IsExported(name.Name) {
- continue
- }
- d := &decl{
- name: name.Name,
- typ: field.Type,
- class: class,
- flags: flags,
- scope: scope,
- value_index: -1,
- }
- decls[d.name] = d
- }
-
- // add anonymous field as a child (type embedding)
- if class == decl_var && field.Names == nil && add_anonymous {
- tp := get_type_path(field.Type)
- if flags&decl_foreign != 0 && !ast.IsExported(tp.name) {
- continue
- }
- d := &decl{
- name: tp.name,
- typ: field.Type,
- class: class,
- flags: flags,
- scope: scope,
- value_index: -1,
- }
- decls[d.name] = d
- }
- }
- return decls
-}
-
-func ast_field_list_to_embedded(f *ast.FieldList) []ast.Expr {
- count := 0
- for _, field := range f.List {
- if field.Names == nil || field.Names[0].Name == "?" {
- count++
- }
- }
-
- if count == 0 {
- return nil
- }
-
- embedded := make([]ast.Expr, count)
- i := 0
- for _, field := range f.List {
- if field.Names == nil || field.Names[0].Name == "?" {
- embedded[i] = field.Type
- i++
- }
- }
-
- return embedded
-}
-
-func ast_type_to_embedded(ty ast.Expr) []ast.Expr {
- switch t := ty.(type) {
- case *ast.StructType:
- return ast_field_list_to_embedded(t.Fields)
- case *ast.InterfaceType:
- return ast_field_list_to_embedded(t.Methods)
- }
- return nil
-}
-
-func ast_type_to_children(ty ast.Expr, flags decl_flags, scope *scope) map[string]*decl {
- switch t := ty.(type) {
- case *ast.StructType:
- return ast_field_list_to_decls(t.Fields, decl_var, flags, scope, true)
- case *ast.InterfaceType:
- return ast_field_list_to_decls(t.Methods, decl_func, flags, scope, false)
- }
- return nil
-}
-
-//-------------------------------------------------------------------------
-// anonymous_id_gen
-//
-// ID generator for anonymous types (thread-safe)
-//-------------------------------------------------------------------------
-
-type anonymous_id_gen struct {
- sync.Mutex
- i int
-}
-
-func (a *anonymous_id_gen) gen() (id int) {
- a.Lock()
- defer a.Unlock()
- id = a.i
- a.i++
- return
-}
-
-var g_anon_gen anonymous_id_gen
-
-//-------------------------------------------------------------------------
-
-func check_for_anon_type(t ast.Expr, flags decl_flags, s *scope) ast.Expr {
- if t == nil {
- return nil
- }
- var name string
-
- switch t.(type) {
- case *ast.StructType:
- name = fmt.Sprintf("$s_%d", g_anon_gen.gen())
- case *ast.InterfaceType:
- name = fmt.Sprintf("$i_%d", g_anon_gen.gen())
- }
-
- if name != "" {
- anonymify_ast(t, flags, s)
- d := new_decl_full(name, decl_type, flags, t, nil, -1, s)
- s.add_named_decl(d)
- return ast.NewIdent(name)
- }
- return t
-}
-
-//-------------------------------------------------------------------------
-
-func new_decl_full(name string, class decl_class, flags decl_flags, typ, v ast.Expr, vi int, s *scope) *decl {
- if name == "_" {
- return nil
- }
- d := new(decl)
- d.name = name
- d.class = class
- d.flags = flags
- d.typ = typ
- d.value = v
- d.value_index = vi
- d.scope = s
- d.children = ast_type_to_children(d.typ, flags, s)
- d.embedded = ast_type_to_embedded(d.typ)
- return d
-}
-
-func new_decl(name string, class decl_class, scope *scope) *decl {
- decl := new(decl)
- decl.name = name
- decl.class = class
- decl.value_index = -1
- decl.scope = scope
- return decl
-}
-
-func new_decl_var(name string, typ ast.Expr, value ast.Expr, vindex int, scope *scope) *decl {
- if name == "_" {
- return nil
- }
- decl := new(decl)
- decl.name = name
- decl.class = decl_var
- decl.typ = typ
- decl.value = value
- decl.value_index = vindex
- decl.scope = scope
- return decl
-}
-
-func method_of(d ast.Decl) string {
- if t, ok := d.(*ast.FuncDecl); ok {
- if t.Recv != nil && len(t.Recv.List) != 0 {
- switch t := t.Recv.List[0].Type.(type) {
- case *ast.StarExpr:
- if se, ok := t.X.(*ast.SelectorExpr); ok {
- return se.Sel.Name
- }
- if ident, ok := t.X.(*ast.Ident); ok {
- return ident.Name
- }
- return ""
- case *ast.Ident:
- return t.Name
- default:
- return ""
- }
- }
- }
- return ""
-}
-
-func (other *decl) deep_copy() *decl {
- d := new(decl)
- d.name = other.name
- d.class = other.class
- d.flags = other.flags
- d.typ = other.typ
- d.value = other.value
- d.value_index = other.value_index
- d.children = make(map[string]*decl, len(other.children))
- for key, value := range other.children {
- d.children[key] = value
- }
- if other.embedded != nil {
- d.embedded = make([]ast.Expr, len(other.embedded))
- copy(d.embedded, other.embedded)
- }
- d.scope = other.scope
- return d
-}
-
-func (d *decl) is_rangevar() bool {
- return d.flags&decl_rangevar != 0
-}
-
-func (d *decl) is_alias() bool {
- return d.flags&decl_alias != 0
-}
-
-func (d *decl) is_visited() bool {
- return d.flags&decl_visited != 0
-}
-
-func (d *decl) set_visited() {
- d.flags |= decl_visited
-}
-
-func (d *decl) clear_visited() {
- d.flags &^= decl_visited
-}
-
-func (d *decl) expand_or_replace(other *decl) {
- // expand only if it's a methods stub, otherwise simply keep it as is
- if d.class != decl_methods_stub && other.class != decl_methods_stub {
- return
- }
-
- if d.class == decl_methods_stub {
- d.typ = other.typ
- d.class = other.class
- d.flags = other.flags
- }
-
- if other.children != nil {
- for _, c := range other.children {
- d.add_child(c)
- }
- }
-
- if other.embedded != nil {
- d.embedded = other.embedded
- d.scope = other.scope
- }
-}
-
-func (d *decl) matches() bool {
- if strings.HasPrefix(d.name, "$") || d.class == decl_methods_stub {
- return false
- }
- return true
-}
-
-func (d *decl) pretty_print_type(out io.Writer, canonical_aliases map[string]string) {
- switch d.class {
- case decl_type:
- switch d.typ.(type) {
- case *ast.StructType:
- // TODO: not used due to anonymify?
- fmt.Fprintf(out, "struct")
- case *ast.InterfaceType:
- // TODO: not used due to anonymify?
- fmt.Fprintf(out, "interface")
- default:
- if d.typ != nil {
- pretty_print_type_expr(out, d.typ, canonical_aliases)
- }
- }
- case decl_var:
- if d.typ != nil {
- pretty_print_type_expr(out, d.typ, canonical_aliases)
- }
- case decl_func:
- pretty_print_type_expr(out, d.typ, canonical_aliases)
- }
-}
-
-func (d *decl) add_child(cd *decl) {
- if d.children == nil {
- d.children = make(map[string]*decl)
- }
- d.children[cd.name] = cd
-}
-
-func check_for_builtin_funcs(typ *ast.Ident, c *ast.CallExpr, scope *scope) (ast.Expr, *scope) {
- if strings.HasPrefix(typ.Name, "func(") {
- if t, ok := c.Fun.(*ast.Ident); ok {
- switch t.Name {
- case "new":
- if len(c.Args) > 0 {
- e := new(ast.StarExpr)
- e.X = c.Args[0]
- return e, scope
- }
- case "make":
- if len(c.Args) > 0 {
- return c.Args[0], scope
- }
- case "append":
- if len(c.Args) > 0 {
- t, scope, _ := infer_type(c.Args[0], scope, -1)
- return t, scope
- }
- case "complex":
- // TODO: fix it
- return ast.NewIdent("complex"), g_universe_scope
- case "closed":
- return ast.NewIdent("bool"), g_universe_scope
- case "cap":
- return ast.NewIdent("int"), g_universe_scope
- case "copy":
- return ast.NewIdent("int"), g_universe_scope
- case "len":
- return ast.NewIdent("int"), g_universe_scope
- }
- // TODO:
- // func recover() interface{}
- // func imag(c ComplexType) FloatType
- // func real(c ComplexType) FloatType
- }
- }
- return nil, nil
-}
-
-func func_return_type(f *ast.FuncType, index int) ast.Expr {
- if f.Results == nil {
- return nil
- }
-
- if index == -1 {
- return f.Results.List[0].Type
- }
-
- i := 0
- var field *ast.Field
- for _, field = range f.Results.List {
- n := 1
- if field.Names != nil {
- n = len(field.Names)
- }
- if i <= index && index < i+n {
- return field.Type
- }
- i += n
- }
- return nil
-}
-
-type type_path struct {
- pkg string
- name string
-}
-
-func (tp *type_path) is_nil() bool {
- return tp.pkg == "" && tp.name == ""
-}
-
-// converts type expressions like:
-// ast.Expr
-// *ast.Expr
-// $ast$go/ast.Expr
-// to a path that can be used to lookup a type related Decl
-func get_type_path(e ast.Expr) (r type_path) {
- if e == nil {
- return type_path{"", ""}
- }
-
- switch t := e.(type) {
- case *ast.Ident:
- r.name = t.Name
- case *ast.StarExpr:
- r = get_type_path(t.X)
- case *ast.SelectorExpr:
- if ident, ok := t.X.(*ast.Ident); ok {
- r.pkg = ident.Name
- }
- r.name = t.Sel.Name
- }
- return
-}
-
-func lookup_path(tp type_path, scope *scope) *decl {
- if tp.is_nil() {
- return nil
- }
- var decl *decl
- if tp.pkg != "" {
- decl = scope.lookup(tp.pkg)
- // return nil early if the package wasn't found but it's part
- // of the type specification
- if decl == nil {
- return nil
- }
- }
-
- if decl != nil {
- if tp.name != "" {
- return decl.find_child(tp.name)
- } else {
- return decl
- }
- }
-
- return scope.lookup(tp.name)
-}
-
-func lookup_pkg(tp type_path, scope *scope) string {
- if tp.is_nil() {
- return ""
- }
- if tp.pkg == "" {
- return ""
- }
- decl := scope.lookup(tp.pkg)
- if decl == nil {
- return ""
- }
- return decl.name
-}
-
-func type_to_decl(t ast.Expr, scope *scope) *decl {
- tp := get_type_path(t)
- d := lookup_path(tp, scope)
- if d != nil && d.class == decl_var {
- // weird variable declaration pointing to itself
- return nil
- }
- return d
-}
-
-func expr_to_decl(e ast.Expr, scope *scope) *decl {
- t, scope, _ := infer_type(e, scope, -1)
- return type_to_decl(t, scope)
-}
-
-//-------------------------------------------------------------------------
-// Type inference
-//-------------------------------------------------------------------------
-
-type type_predicate func(ast.Expr) bool
-
-func advance_to_type(pred type_predicate, v ast.Expr, scope *scope) (ast.Expr, *scope) {
- if pred(v) {
- return v, scope
- }
-
- decl := type_to_decl(v, scope)
- if decl == nil {
- return nil, nil
- }
-
- if decl.is_visited() {
- return nil, nil
- }
- decl.set_visited()
- defer decl.clear_visited()
-
- return advance_to_type(pred, decl.typ, decl.scope)
-}
-
-func advance_to_struct_or_interface(decl *decl) *decl {
- if decl.is_visited() {
- return nil
- }
- decl.set_visited()
- defer decl.clear_visited()
-
- if struct_interface_predicate(decl.typ) {
- return decl
- }
-
- decl = type_to_decl(decl.typ, decl.scope)
- if decl == nil {
- return nil
- }
- return advance_to_struct_or_interface(decl)
-}
-
-func struct_interface_predicate(v ast.Expr) bool {
- switch v.(type) {
- case *ast.StructType, *ast.InterfaceType:
- return true
- }
- return false
-}
-
-func chan_predicate(v ast.Expr) bool {
- _, ok := v.(*ast.ChanType)
- return ok
-}
-
-func index_predicate(v ast.Expr) bool {
- switch v.(type) {
- case *ast.ArrayType, *ast.MapType, *ast.Ellipsis:
- return true
- }
- return false
-}
-
-func star_predicate(v ast.Expr) bool {
- _, ok := v.(*ast.StarExpr)
- return ok
-}
-
-func func_predicate(v ast.Expr) bool {
- _, ok := v.(*ast.FuncType)
- return ok
-}
-
-func range_predicate(v ast.Expr) bool {
- switch t := v.(type) {
- case *ast.Ident:
- if t.Name == "string" {
- return true
- }
- case *ast.ArrayType, *ast.MapType, *ast.ChanType, *ast.Ellipsis:
- return true
- }
- return false
-}
-
-type anonymous_typer struct {
- flags decl_flags
- scope *scope
-}
-
-func (a *anonymous_typer) Visit(node ast.Node) ast.Visitor {
- switch t := node.(type) {
- case *ast.CompositeLit:
- t.Type = check_for_anon_type(t.Type, a.flags, a.scope)
- case *ast.MapType:
- t.Key = check_for_anon_type(t.Key, a.flags, a.scope)
- t.Value = check_for_anon_type(t.Value, a.flags, a.scope)
- case *ast.ArrayType:
- t.Elt = check_for_anon_type(t.Elt, a.flags, a.scope)
- case *ast.Ellipsis:
- t.Elt = check_for_anon_type(t.Elt, a.flags, a.scope)
- case *ast.ChanType:
- t.Value = check_for_anon_type(t.Value, a.flags, a.scope)
- case *ast.Field:
- t.Type = check_for_anon_type(t.Type, a.flags, a.scope)
- case *ast.CallExpr:
- t.Fun = check_for_anon_type(t.Fun, a.flags, a.scope)
- case *ast.ParenExpr:
- t.X = check_for_anon_type(t.X, a.flags, a.scope)
- case *ast.StarExpr:
- t.X = check_for_anon_type(t.X, a.flags, a.scope)
- case *ast.GenDecl:
- switch t.Tok {
- case token.VAR:
- for _, s := range t.Specs {
- vs := s.(*ast.ValueSpec)
- vs.Type = check_for_anon_type(vs.Type, a.flags, a.scope)
- }
- case token.TYPE:
- for _, s := range t.Specs {
- ts := s.(*ast.TypeSpec)
- if isAliasTypeSpec(ts) {
- ts.Type = check_for_anon_type(ts.Type, a.flags, a.scope)
- }
- }
- }
- }
- return a
-}
-
-func anonymify_ast(node ast.Node, flags decl_flags, scope *scope) {
- v := anonymous_typer{flags, scope}
- ast.Walk(&v, node)
-}
-
-// RETURNS:
-// - type expression which represents a full name of a type
-// - bool whether a type expression is actually a type (used internally)
-// - scope in which type makes sense
-func infer_type(v ast.Expr, scope *scope, index int) (ast.Expr, *scope, bool) {
- switch t := v.(type) {
- case *ast.CompositeLit:
- return t.Type, scope, true
- case *ast.Ident:
- if d := scope.lookup(t.Name); d != nil {
- if d.class == decl_package {
- return ast.NewIdent(t.Name), scope, false
- }
- typ, scope := d.infer_type()
- return typ, scope, d.class == decl_type
- }
- case *ast.UnaryExpr:
- switch t.Op {
- case token.AND:
- // &a makes sense only with values, don't even check for type
- it, s, _ := infer_type(t.X, scope, -1)
- if it == nil {
- break
- }
-
- e := new(ast.StarExpr)
- e.X = it
- return e, s, false
- case token.ARROW:
- // <-a makes sense only with values
- it, s, _ := infer_type(t.X, scope, -1)
- if it == nil {
- break
- }
- switch index {
- case -1, 0:
- it, s = advance_to_type(chan_predicate, it, s)
- return it.(*ast.ChanType).Value, s, false
- case 1:
- // technically it's a value, but in case of index == 1
- // it is always the last infer operation
- return ast.NewIdent("bool"), g_universe_scope, false
- }
- case token.ADD, token.NOT, token.SUB, token.XOR:
- it, s, _ := infer_type(t.X, scope, -1)
- if it == nil {
- break
- }
- return it, s, false
- }
- case *ast.BinaryExpr:
- switch t.Op {
- case token.EQL, token.NEQ, token.LSS, token.LEQ,
- token.GTR, token.GEQ, token.LOR, token.LAND:
- // logic operations, the result is a bool, always
- return ast.NewIdent("bool"), g_universe_scope, false
- case token.ADD, token.SUB, token.MUL, token.QUO, token.OR,
- token.XOR, token.REM, token.AND, token.AND_NOT:
- // try X, then Y, they should be the same anyway
- it, s, _ := infer_type(t.X, scope, -1)
- if it == nil {
- it, s, _ = infer_type(t.Y, scope, -1)
- if it == nil {
- break
- }
- }
- return it, s, false
- case token.SHL, token.SHR:
- // try only X for shifts, Y is always uint
- it, s, _ := infer_type(t.X, scope, -1)
- if it == nil {
- break
- }
- return it, s, false
- }
- case *ast.IndexExpr:
- // something[another] always returns a value and it works on a value too
- it, s, _ := infer_type(t.X, scope, -1)
- if it == nil {
- break
- }
- it, s = advance_to_type(index_predicate, it, s)
- switch t := it.(type) {
- case *ast.ArrayType:
- return t.Elt, s, false
- case *ast.Ellipsis:
- return t.Elt, s, false
- case *ast.MapType:
- switch index {
- case -1, 0:
- return t.Value, s, false
- case 1:
- return ast.NewIdent("bool"), g_universe_scope, false
- }
- }
- case *ast.SliceExpr:
- // something[start : end] always returns a value
- it, s, _ := infer_type(t.X, scope, -1)
- if it == nil {
- break
- }
- it, s = advance_to_type(index_predicate, it, s)
- switch t := it.(type) {
- case *ast.ArrayType:
- e := new(ast.ArrayType)
- e.Elt = t.Elt
- return e, s, false
- }
- case *ast.StarExpr:
- it, s, is_type := infer_type(t.X, scope, -1)
- if it == nil {
- break
- }
- if is_type {
- // if it's a type, add * modifier, make it a 'pointer of' type
- e := new(ast.StarExpr)
- e.X = it
- return e, s, true
- } else {
- it, s := advance_to_type(star_predicate, it, s)
- if se, ok := it.(*ast.StarExpr); ok {
- return se.X, s, false
- }
- }
- case *ast.CallExpr:
- // this is a function call or a type cast:
- // myFunc(1,2,3) or int16(myvar)
- it, s, is_type := infer_type(t.Fun, scope, -1)
- if it == nil {
- break
- }
-
- if is_type {
- // a type cast
- return it, scope, false
- } else {
- // it must be a function call or a built-in function
- // first check for built-in
- if ct, ok := it.(*ast.Ident); ok {
- ty, s := check_for_builtin_funcs(ct, t, scope)
- if ty != nil {
- return ty, s, false
- }
- }
-
- // then check for an ordinary function call
- it, scope = advance_to_type(func_predicate, it, s)
- if ct, ok := it.(*ast.FuncType); ok {
- return func_return_type(ct, index), s, false
- }
- }
- case *ast.ParenExpr:
- it, s, is_type := infer_type(t.X, scope, -1)
- if it == nil {
- break
- }
- return it, s, is_type
- case *ast.SelectorExpr:
- it, s, _ := infer_type(t.X, scope, -1)
- if it == nil {
- break
- }
-
- if d := type_to_decl(it, s); d != nil {
- c := d.find_child_and_in_embedded(t.Sel.Name)
- if c != nil {
- if c.class == decl_type {
- return t, scope, true
- } else {
- typ, s := c.infer_type()
- return typ, s, false
- }
- }
- }
- case *ast.FuncLit:
- // it's a value, but I think most likely we don't even care, cause we can only
- // call it, and CallExpr uses the type itself to figure out
- return t.Type, scope, false
- case *ast.TypeAssertExpr:
- if t.Type == nil {
- return infer_type(t.X, scope, -1)
- }
- switch index {
- case -1, 0:
- // converting a value to a different type, but return thing is a value
- it, _, _ := infer_type(t.Type, scope, -1)
- return it, scope, false
- case 1:
- return ast.NewIdent("bool"), g_universe_scope, false
- }
- case *ast.ArrayType, *ast.MapType, *ast.ChanType, *ast.Ellipsis,
- *ast.FuncType, *ast.StructType, *ast.InterfaceType:
- return t, scope, true
- default:
- _ = reflect.TypeOf(v)
- //fmt.Println(ty)
- }
- return nil, nil, false
-}
-
-// Uses Value, ValueIndex and Scope to infer the type of this
-// declaration. Returns the type itself and the scope where this type
-// makes sense.
-func (d *decl) infer_type() (ast.Expr, *scope) {
- // special case for range vars
- if d.is_rangevar() {
- var scope *scope
- d.typ, scope = infer_range_type(d.value, d.scope, d.value_index)
- return d.typ, scope
- }
-
- switch d.class {
- case decl_package:
- // package is handled specially in inferType
- return nil, nil
- case decl_type:
- return ast.NewIdent(d.name), d.scope
- }
-
- // shortcut
- if d.typ != nil && d.value == nil {
- return d.typ, d.scope
- }
-
- // prevent loops
- if d.is_visited() {
- return nil, nil
- }
- d.set_visited()
- defer d.clear_visited()
-
- var scope *scope
- d.typ, scope, _ = infer_type(d.value, d.scope, d.value_index)
- return d.typ, scope
-}
-
-func (d *decl) type_dealias() *decl {
- if d.is_visited() {
- return nil
- }
- d.set_visited()
- defer d.clear_visited()
-
- dd := type_to_decl(d.typ, d.scope)
- if dd != nil && dd.is_alias() {
- return dd.type_dealias()
- }
- return dd
-}
-
-func (d *decl) find_child(name string) *decl {
- // type aliases don't really have any children on their own, but they
- // point to a different type, let's try to find one
- if d.is_alias() {
- dd := d.type_dealias()
- if dd != nil {
- return dd.find_child(name)
- }
-
- // note that type alias can also point to a type literal, something like
- // type A = struct { A int }
- // in this case we rely on "advance_to_struct_or_interface" below
- }
-
- if d.children != nil {
- if c, ok := d.children[name]; ok {
- return c
- }
- }
-
- decl := advance_to_struct_or_interface(d)
- if decl != nil && decl != d {
- if d.is_visited() {
- return nil
- }
- d.set_visited()
- defer d.clear_visited()
-
- return decl.find_child(name)
- }
- return nil
-}
-
-func (d *decl) find_child_and_in_embedded(name string) *decl {
- if d == nil {
- return nil
- }
-
- if d.is_visited() {
- return nil
- }
- d.set_visited()
- defer d.clear_visited()
-
- c := d.find_child(name)
- if c == nil {
- for _, e := range d.embedded {
- typedecl := type_to_decl(e, d.scope)
- c = typedecl.find_child_and_in_embedded(name)
- if c != nil {
- break
- }
- }
- }
- return c
-}
-
-// Special type inference for range statements.
-// [int], [int] := range [string]
-// [int], [value] := range [slice or array]
-// [key], [value] := range [map]
-// [value], [nil] := range [chan]
-func infer_range_type(e ast.Expr, sc *scope, valueindex int) (ast.Expr, *scope) {
- t, s, _ := infer_type(e, sc, -1)
- t, s = advance_to_type(range_predicate, t, s)
- if t != nil {
- var t1, t2 ast.Expr
- var s1, s2 *scope
- s1 = s
- s2 = s
-
- switch t := t.(type) {
- case *ast.Ident:
- // string
- if t.Name == "string" {
- t1 = ast.NewIdent("int")
- t2 = ast.NewIdent("rune")
- s1 = g_universe_scope
- s2 = g_universe_scope
- } else {
- t1, t2 = nil, nil
- }
- case *ast.ArrayType:
- t1 = ast.NewIdent("int")
- s1 = g_universe_scope
- t2 = t.Elt
- case *ast.Ellipsis:
- t1 = ast.NewIdent("int")
- s1 = g_universe_scope
- t2 = t.Elt
- case *ast.MapType:
- t1 = t.Key
- t2 = t.Value
- case *ast.ChanType:
- t1 = t.Value
- t2 = nil
- default:
- t1, t2 = nil, nil
- }
-
- switch valueindex {
- case 0:
- return t1, s1
- case 1:
- return t2, s2
- }
- }
- return nil, nil
-}
-
-//-------------------------------------------------------------------------
-// Pretty printing
-//-------------------------------------------------------------------------
-
-func get_array_len(e ast.Expr) string {
- switch t := e.(type) {
- case *ast.BasicLit:
- return string(t.Value)
- case *ast.Ellipsis:
- return "..."
- }
- return ""
-}
-
-func pretty_print_type_expr(out io.Writer, e ast.Expr, canonical_aliases map[string]string) {
- switch t := e.(type) {
- case *ast.StarExpr:
- fmt.Fprintf(out, "*")
- pretty_print_type_expr(out, t.X, canonical_aliases)
- case *ast.Ident:
- if strings.HasPrefix(t.Name, "$") {
- // beautify anonymous types
- switch t.Name[1] {
- case 's':
- fmt.Fprintf(out, "struct")
- case 'i':
- // ok, in most cases anonymous interface is an
- // empty interface, I'll just pretend that
- // it's always true
- fmt.Fprintf(out, "interface{}")
- }
- } else if !*g_debug && strings.HasPrefix(t.Name, "!") {
- // these are full package names for disambiguating and pretty
- // printing packages within packages, e.g.
- // !go/ast!ast vs. !github.com/nsf/my/ast!ast
- // another ugly hack, if people are punished in hell for ugly hacks
- // I'm screwed...
- emarkIdx := strings.LastIndex(t.Name, "!")
- path := t.Name[1:emarkIdx]
- alias := canonical_aliases[path]
- if alias == "" {
- alias = t.Name[emarkIdx+1:]
- }
- fmt.Fprintf(out, alias)
- } else {
- fmt.Fprintf(out, t.Name)
- }
- case *ast.ArrayType:
- al := ""
- if t.Len != nil {
- al = get_array_len(t.Len)
- }
- if al != "" {
- fmt.Fprintf(out, "[%s]", al)
- } else {
- fmt.Fprintf(out, "[]")
- }
- pretty_print_type_expr(out, t.Elt, canonical_aliases)
- case *ast.SelectorExpr:
- pretty_print_type_expr(out, t.X, canonical_aliases)
- fmt.Fprintf(out, ".%s", t.Sel.Name)
- case *ast.FuncType:
- fmt.Fprintf(out, "func(")
- pretty_print_func_field_list(out, t.Params, canonical_aliases)
- fmt.Fprintf(out, ")")
-
- buf := bytes.NewBuffer(make([]byte, 0, 256))
- nresults := pretty_print_func_field_list(buf, t.Results, canonical_aliases)
- if nresults > 0 {
- results := buf.String()
- if strings.IndexAny(results, ", ") != -1 {
- results = "(" + results + ")"
- }
- fmt.Fprintf(out, " %s", results)
- }
- case *ast.MapType:
- fmt.Fprintf(out, "map[")
- pretty_print_type_expr(out, t.Key, canonical_aliases)
- fmt.Fprintf(out, "]")
- pretty_print_type_expr(out, t.Value, canonical_aliases)
- case *ast.InterfaceType:
- fmt.Fprintf(out, "interface{}")
- case *ast.Ellipsis:
- fmt.Fprintf(out, "...")
- pretty_print_type_expr(out, t.Elt, canonical_aliases)
- case *ast.StructType:
- fmt.Fprintf(out, "struct")
- case *ast.ChanType:
- switch t.Dir {
- case ast.RECV:
- fmt.Fprintf(out, "<-chan ")
- case ast.SEND:
- fmt.Fprintf(out, "chan<- ")
- case ast.SEND | ast.RECV:
- fmt.Fprintf(out, "chan ")
- }
- pretty_print_type_expr(out, t.Value, canonical_aliases)
- case *ast.ParenExpr:
- fmt.Fprintf(out, "(")
- pretty_print_type_expr(out, t.X, canonical_aliases)
- fmt.Fprintf(out, ")")
- case *ast.BadExpr:
- // TODO: probably I should check that in a separate function
- // and simply discard declarations with BadExpr as a part of their
- // type
- default:
- // the element has some weird type, just ignore it
- }
-}
-
-func pretty_print_func_field_list(out io.Writer, f *ast.FieldList, canonical_aliases map[string]string) int {
- count := 0
- if f == nil {
- return count
- }
- for i, field := range f.List {
- // names
- if field.Names != nil {
- hasNonblank := false
- for j, name := range field.Names {
- if name.Name != "?" {
- hasNonblank = true
- fmt.Fprintf(out, "%s", name.Name)
- if j != len(field.Names)-1 {
- fmt.Fprintf(out, ", ")
- }
- }
- count++
- }
- if hasNonblank {
- fmt.Fprintf(out, " ")
- }
- } else {
- count++
- }
-
- // type
- pretty_print_type_expr(out, field.Type, canonical_aliases)
-
- // ,
- if i != len(f.List)-1 {
- fmt.Fprintf(out, ", ")
- }
- }
- return count
-}
-
-func ast_decl_names(d ast.Decl) []*ast.Ident {
- var names []*ast.Ident
-
- switch t := d.(type) {
- case *ast.GenDecl:
- switch t.Tok {
- case token.CONST:
- c := t.Specs[0].(*ast.ValueSpec)
- names = make([]*ast.Ident, len(c.Names))
- for i, name := range c.Names {
- names[i] = name
- }
- case token.TYPE:
- t := t.Specs[0].(*ast.TypeSpec)
- names = make([]*ast.Ident, 1)
- names[0] = t.Name
- case token.VAR:
- v := t.Specs[0].(*ast.ValueSpec)
- names = make([]*ast.Ident, len(v.Names))
- for i, name := range v.Names {
- names[i] = name
- }
- }
- case *ast.FuncDecl:
- names = make([]*ast.Ident, 1)
- names[0] = t.Name
- }
-
- return names
-}
-
-func ast_decl_values(d ast.Decl) []ast.Expr {
- // TODO: CONST values here too
- switch t := d.(type) {
- case *ast.GenDecl:
- switch t.Tok {
- case token.VAR:
- v := t.Specs[0].(*ast.ValueSpec)
- if v.Values != nil {
- return v.Values
- }
- }
- }
- return nil
-}
-
-func ast_decl_split(d ast.Decl) []ast.Decl {
- var decls []ast.Decl
- if t, ok := d.(*ast.GenDecl); ok {
- decls = make([]ast.Decl, len(t.Specs))
- for i, s := range t.Specs {
- decl := new(ast.GenDecl)
- *decl = *t
- decl.Specs = make([]ast.Spec, 1)
- decl.Specs[0] = s
- decls[i] = decl
- }
- } else {
- decls = make([]ast.Decl, 1)
- decls[0] = d
- }
- return decls
-}
-
-//-------------------------------------------------------------------------
-// decl_pack
-//-------------------------------------------------------------------------
-
-type decl_pack struct {
- names []*ast.Ident
- typ ast.Expr
- values []ast.Expr
-}
-
-type foreach_decl_struct struct {
- decl_pack
- decl ast.Decl
-}
-
-func (f *decl_pack) value(i int) ast.Expr {
- if f.values == nil {
- return nil
- }
- if len(f.values) > 1 {
- return f.values[i]
- }
- return f.values[0]
-}
-
-func (f *decl_pack) value_index(i int) (v ast.Expr, vi int) {
- // default: nil value
- v = nil
- vi = -1
-
- if f.values != nil {
- // A = B, if there is only one name, the value is solo too
- if len(f.names) == 1 {
- return f.values[0], -1
- }
-
- if len(f.values) > 1 {
- // in case if there are multiple values, it's a usual
- // multiassignment
- if i >= len(f.values) {
- i = len(f.values) - 1
- }
- v = f.values[i]
- } else {
- // in case if there is one value, but many names, it's
- // a tuple unpack.. use index here
- v = f.values[0]
- vi = i
- }
- }
- return
-}
-
-func (f *decl_pack) type_value_index(i int) (ast.Expr, ast.Expr, int) {
- if f.typ != nil {
- // If there is a type, we don't care about value, just return the type
- // and zero value.
- return f.typ, nil, -1
- }
-
- // And otherwise we simply return nil type and a valid value for later inferring.
- v, vi := f.value_index(i)
- return nil, v, vi
-}
-
-type foreach_decl_func func(data *foreach_decl_struct)
-
-func foreach_decl(decl ast.Decl, do foreach_decl_func) {
- decls := ast_decl_split(decl)
- var data foreach_decl_struct
- for _, decl := range decls {
- if !ast_decl_convertable(decl) {
- continue
- }
- data.names = ast_decl_names(decl)
- data.typ = ast_decl_type(decl)
- data.values = ast_decl_values(decl)
- data.decl = decl
-
- do(&data)
- }
-}
-
-//-------------------------------------------------------------------------
-// Built-in declarations
-//-------------------------------------------------------------------------
-
-var g_universe_scope = new_scope(nil)
-
-func init() {
- builtin := ast.NewIdent("built-in")
-
- add_type := func(name string) {
- d := new_decl(name, decl_type, g_universe_scope)
- d.typ = builtin
- g_universe_scope.add_named_decl(d)
- }
- add_type("bool")
- add_type("byte")
- add_type("complex64")
- add_type("complex128")
- add_type("float32")
- add_type("float64")
- add_type("int8")
- add_type("int16")
- add_type("int32")
- add_type("int64")
- add_type("string")
- add_type("uint8")
- add_type("uint16")
- add_type("uint32")
- add_type("uint64")
- add_type("int")
- add_type("uint")
- add_type("uintptr")
- add_type("rune")
-
- add_const := func(name string) {
- d := new_decl(name, decl_const, g_universe_scope)
- d.typ = builtin
- g_universe_scope.add_named_decl(d)
- }
- add_const("true")
- add_const("false")
- add_const("iota")
- add_const("nil")
-
- add_func := func(name, typ string) {
- d := new_decl(name, decl_func, g_universe_scope)
- d.typ = ast.NewIdent(typ)
- g_universe_scope.add_named_decl(d)
- }
- add_func("append", "func([]type, ...type) []type")
- add_func("cap", "func(container) int")
- add_func("close", "func(channel)")
- add_func("complex", "func(real, imag) complex")
- add_func("copy", "func(dst, src)")
- add_func("delete", "func(map[typeA]typeB, typeA)")
- add_func("imag", "func(complex)")
- add_func("len", "func(container) int")
- add_func("make", "func(type, len[, cap]) type")
- add_func("new", "func(type) *type")
- add_func("panic", "func(interface{})")
- add_func("print", "func(...interface{})")
- add_func("println", "func(...interface{})")
- add_func("real", "func(complex)")
- add_func("recover", "func() interface{}")
-
- // built-in error interface
- d := new_decl("error", decl_type, g_universe_scope)
- d.typ = &ast.InterfaceType{}
- d.children = make(map[string]*decl)
- d.children["Error"] = new_decl("Error", decl_func, g_universe_scope)
- d.children["Error"].typ = &ast.FuncType{
- Results: &ast.FieldList{
- List: []*ast.Field{
- {
- Type: ast.NewIdent("string"),
- },
- },
- },
- }
- g_universe_scope.add_named_decl(d)
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/declcache.go b/src/disposa.blue/margo/golang/internal/gocode/declcache.go
deleted file mode 100644
index 9841b731..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/declcache.go
+++ /dev/null
@@ -1,532 +0,0 @@
-package gocode
-
-import (
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "log"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
- "sync"
-)
-
-//-------------------------------------------------------------------------
-// []package_import
-//-------------------------------------------------------------------------
-
-type package_import struct {
- alias string
- abspath string
- path string
-}
-
-// Parses import declarations until the first non-import declaration and fills
-// `packages` array with import information.
-func collect_package_imports(filename string, decls []ast.Decl, context *package_lookup_context) []package_import {
- pi := make([]package_import, 0, 16)
- for _, decl := range decls {
- if gd, ok := decl.(*ast.GenDecl); ok && gd.Tok == token.IMPORT {
- for _, spec := range gd.Specs {
- imp := spec.(*ast.ImportSpec)
- path, alias := path_and_alias(imp)
- abspath, ok := abs_path_for_package(filename, path, context)
- if ok && alias != "_" {
- pi = append(pi, package_import{alias, abspath, path})
- }
- }
- } else {
- break
- }
- }
- return pi
-}
-
-//-------------------------------------------------------------------------
-// decl_file_cache
-//
-// Contains cache for top-level declarations of a file as well as its
-// contents, AST and import information.
-//-------------------------------------------------------------------------
-
-type decl_file_cache struct {
- name string // file name
- mtime int64 // last modification time
-
- decls map[string]*decl // top-level declarations
- error error // last error
- packages []package_import // import information
- filescope *scope
-
- fset *token.FileSet
- context *package_lookup_context
-}
-
-func new_decl_file_cache(name string, context *package_lookup_context) *decl_file_cache {
- return &decl_file_cache{
- name: name,
- context: context,
- }
-}
-
-func (f *decl_file_cache) update() {
- stat, err := os.Stat(f.name)
- if err != nil {
- f.decls = nil
- f.error = err
- f.fset = nil
- return
- }
-
- statmtime := stat.ModTime().UnixNano()
- if f.mtime == statmtime {
- return
- }
-
- f.mtime = statmtime
- f.read_file()
-}
-
-func (f *decl_file_cache) read_file() {
- var data []byte
- data, f.error = file_reader.read_file(f.name)
- if f.error != nil {
- return
- }
- data, _ = filter_out_shebang(data)
-
- f.process_data(data)
-}
-
-func (f *decl_file_cache) process_data(data []byte) {
- var file *ast.File
- f.fset = token.NewFileSet()
- file, f.error = parser.ParseFile(f.fset, "", data, 0)
- f.filescope = new_scope(nil)
- for _, d := range file.Decls {
- anonymify_ast(d, 0, f.filescope)
- }
- f.packages = collect_package_imports(f.name, file.Decls, f.context)
- f.decls = make(map[string]*decl, len(file.Decls))
- for _, decl := range file.Decls {
- append_to_top_decls(f.decls, decl, f.filescope)
- }
-}
-
-func append_to_top_decls(decls map[string]*decl, decl ast.Decl, scope *scope) {
- foreach_decl(decl, func(data *foreach_decl_struct) {
- class := ast_decl_class(data.decl)
- for i, name := range data.names {
- typ, v, vi := data.type_value_index(i)
-
- d := new_decl_full(name.Name, class, ast_decl_flags(data.decl), typ, v, vi, scope)
- if d == nil {
- return
- }
-
- methodof := method_of(decl)
- if methodof != "" {
- decl, ok := decls[methodof]
- if ok {
- decl.add_child(d)
- } else {
- decl = new_decl(methodof, decl_methods_stub, scope)
- decls[methodof] = decl
- decl.add_child(d)
- }
- } else {
- decl, ok := decls[d.name]
- if ok {
- decl.expand_or_replace(d)
- } else {
- decls[d.name] = d
- }
- }
- }
- })
-}
-
-func abs_path_for_package(filename, p string, context *package_lookup_context) (string, bool) {
- dir, _ := filepath.Split(filename)
- if len(p) == 0 {
- return "", false
- }
- if p[0] == '.' {
- return fmt.Sprintf("%s.a", filepath.Join(dir, p)), true
- }
- pkg, ok := find_go_dag_package(p, dir)
- if ok {
- return pkg, true
- }
- return find_global_file(p, context)
-}
-
-func path_and_alias(imp *ast.ImportSpec) (string, string) {
- path := ""
- if imp.Path != nil && len(imp.Path.Value) > 0 {
- path = string(imp.Path.Value)
- path = path[1 : len(path)-1]
- }
- alias := ""
- if imp.Name != nil {
- alias = imp.Name.Name
- }
- return path, alias
-}
-
-func find_go_dag_package(imp, filedir string) (string, bool) {
- // Support godag directory structure
- dir, pkg := filepath.Split(imp)
- godag_pkg := filepath.Join(filedir, "..", dir, "_obj", pkg+".a")
- if file_exists(godag_pkg) {
- return godag_pkg, true
- }
- return "", false
-}
-
-// autobuild compares the mod time of the source files of the package, and if any of them is newer
-// than the package object file will rebuild it.
-func autobuild(p *build.Package) error {
- if p.Dir == "" {
- return fmt.Errorf("no files to build")
- }
- ps, err := os.Stat(p.PkgObj)
- if err != nil {
- // Assume package file does not exist and build for the first time.
- return build_package(p)
- }
- pt := ps.ModTime()
- fs, err := readdir_lstat(p.Dir)
- if err != nil {
- return err
- }
- for _, f := range fs {
- if f.IsDir() {
- continue
- }
- if f.ModTime().After(pt) {
- // Source file is newer than package file; rebuild.
- return build_package(p)
- }
- }
- return nil
-}
-
-// build_package builds the package by calling `go install package/import`. If everything compiles
-// correctly, the newly compiled package should then be in the usual place in the `$GOPATH/pkg`
-// directory, and gocode will pick it up from there.
-func build_package(p *build.Package) error {
- if *g_debug {
- log.Printf("-------------------")
- log.Printf("rebuilding package %s", p.Name)
- log.Printf("package import: %s", p.ImportPath)
- log.Printf("package object: %s", p.PkgObj)
- log.Printf("package source dir: %s", p.Dir)
- log.Printf("package source files: %v", p.GoFiles)
- log.Printf("GOPATH: %v", g_daemon.context.GOPATH)
- log.Printf("GOROOT: %v", g_daemon.context.GOROOT)
- }
- env := os.Environ()
- for i, v := range env {
- if strings.HasPrefix(v, "GOPATH=") {
- env[i] = "GOPATH=" + g_daemon.context.GOPATH
- } else if strings.HasPrefix(v, "GOROOT=") {
- env[i] = "GOROOT=" + g_daemon.context.GOROOT
- }
- }
-
- cmd := exec.Command("go", "install", p.ImportPath)
- cmd.Env = env
-
- // TODO: Should read STDERR rather than STDOUT.
- out, err := cmd.CombinedOutput()
- if err != nil {
- return err
- }
- if *g_debug {
- log.Printf("build out: %s\n", string(out))
- }
- return nil
-}
-
-// executes autobuild function if autobuild option is enabled, logs error and
-// ignores it
-func try_autobuild(p *build.Package) {
- if g_config.Autobuild {
- err := autobuild(p)
- if err != nil && *g_debug {
- log.Printf("Autobuild error: %s\n", err)
- }
- }
-}
-
-func log_found_package_maybe(imp, pkgpath string) {
- if *g_debug {
- log.Printf("Found %q at %q\n", imp, pkgpath)
- }
-}
-
-func log_build_context(context *package_lookup_context) {
- log.Printf(" GOROOT: %s\n", context.GOROOT)
- log.Printf(" GOPATH: %s\n", context.GOPATH)
- log.Printf(" GOOS: %s\n", context.GOOS)
- log.Printf(" GOARCH: %s\n", context.GOARCH)
- log.Printf(" BzlProjectRoot: %q\n", context.BzlProjectRoot)
- log.Printf(" GBProjectRoot: %q\n", context.GBProjectRoot)
- log.Printf(" lib-path: %q\n", g_config.LibPath)
-}
-
-// find_global_file returns the file path of the compiled package corresponding to the specified
-// import, and a boolean stating whether such path is valid.
-// TODO: Return only one value, possibly empty string if not found.
-func find_global_file(imp string, context *package_lookup_context) (string, bool) {
- // gocode synthetically generates the builtin package
- // "unsafe", since the "unsafe.a" package doesn't really exist.
- // Thus, when the user request for the package "unsafe" we
- // would return synthetic global file that would be used
- // just as a key name to find this synthetic package
- if imp == "unsafe" {
- return "unsafe", true
- }
-
- pkgfile := fmt.Sprintf("%s.a", imp)
-
- // if lib-path is defined, use it
- if g_config.LibPath != "" {
- for _, p := range filepath.SplitList(g_config.LibPath) {
- pkg_path := filepath.Join(p, pkgfile)
- if file_exists(pkg_path) {
- log_found_package_maybe(imp, pkg_path)
- return pkg_path, true
- }
- // Also check the relevant pkg/OS_ARCH dir for the libpath, if provided.
- pkgdir := fmt.Sprintf("%s_%s", context.GOOS, context.GOARCH)
- pkg_path = filepath.Join(p, "pkg", pkgdir, pkgfile)
- if file_exists(pkg_path) {
- log_found_package_maybe(imp, pkg_path)
- return pkg_path, true
- }
- }
- }
-
- // gb-specific lookup mode, only if the root dir was found
- if g_config.PackageLookupMode == "gb" && context.GBProjectRoot != "" {
- root := context.GBProjectRoot
- pkgdir := filepath.Join(root, "pkg", context.GOOS+"-"+context.GOARCH)
- if !is_dir(pkgdir) {
- pkgdir = filepath.Join(root, "pkg", context.GOOS+"-"+context.GOARCH+"-race")
- }
- pkg_path := filepath.Join(pkgdir, pkgfile)
- if file_exists(pkg_path) {
- log_found_package_maybe(imp, pkg_path)
- return pkg_path, true
- }
- }
-
- // bzl-specific lookup mode, only if the root dir was found
- if g_config.PackageLookupMode == "bzl" && context.BzlProjectRoot != "" {
- var root, impath string
- if strings.HasPrefix(imp, g_config.CustomPkgPrefix+"/") {
- root = filepath.Join(context.BzlProjectRoot, "bazel-bin")
- impath = imp[len(g_config.CustomPkgPrefix)+1:]
- } else if g_config.CustomVendorDir != "" {
- // Try custom vendor dir.
- root = filepath.Join(context.BzlProjectRoot, "bazel-bin", g_config.CustomVendorDir)
- impath = imp
- }
-
- if root != "" && impath != "" {
- // There might be more than one ".a" files in the pkg path with bazel.
- // But the best practice is to keep one go_library build target in each
- // pakcage directory so that it follows the standard Go package
- // structure. Thus here we assume there is at most one ".a" file existing
- // in the pkg path.
- if d, err := os.Open(filepath.Join(root, impath)); err == nil {
- defer d.Close()
-
- if fis, err := d.Readdir(-1); err == nil {
- for _, fi := range fis {
- if !fi.IsDir() && filepath.Ext(fi.Name()) == ".a" {
- pkg_path := filepath.Join(root, impath, fi.Name())
- log_found_package_maybe(imp, pkg_path)
- return pkg_path, true
- }
- }
- }
- }
- }
- }
-
- if context.CurrentPackagePath != "" {
- // Try vendor path first, see GO15VENDOREXPERIMENT.
- // We don't check this environment variable however, seems like there is
- // almost no harm in doing so (well.. if you experiment with vendoring,
- // gocode will fail after enabling/disabling the flag, and you'll be
- // forced to get rid of vendor binaries). But asking users to set this
- // env var is up will bring more trouble. Because we also need to pass
- // it from client to server, make sure their editors set it, etc.
- // So, whatever, let's just pretend it's always on.
- package_path := context.CurrentPackagePath
- for {
- limp := filepath.Join(package_path, "vendor", imp)
- if p, err := context.Import(limp, "", build.AllowBinary|build.FindOnly); err == nil {
- try_autobuild(p)
- if file_exists(p.PkgObj) {
- log_found_package_maybe(imp, p.PkgObj)
- return p.PkgObj, true
- }
- }
- if package_path == "" {
- break
- }
- next_path := filepath.Dir(package_path)
- // let's protect ourselves from inf recursion here
- if next_path == package_path {
- break
- }
- package_path = next_path
- }
- }
-
- if p, err := context.Import(imp, "", build.AllowBinary|build.FindOnly); err == nil {
- try_autobuild(p)
- if file_exists(p.PkgObj) {
- log_found_package_maybe(imp, p.PkgObj)
- return p.PkgObj, true
- }
- }
-
- if *g_debug {
- log.Printf("Import path %q was not resolved\n", imp)
- log.Println("Gocode's build context is:")
- log_build_context(context)
- }
- return "", false
-}
-
-func package_name(file *ast.File) string {
- if file.Name != nil {
- return file.Name.Name
- }
- return ""
-}
-
-//-------------------------------------------------------------------------
-// decl_cache
-//
-// Thread-safe collection of DeclFileCache entities.
-//-------------------------------------------------------------------------
-
-type package_lookup_context struct {
- build.Context
- BzlProjectRoot string
- GBProjectRoot string
- CurrentPackagePath string
-}
-
-// gopath returns the list of Go path directories.
-func (ctxt *package_lookup_context) gopath() []string {
- var all []string
- for _, p := range filepath.SplitList(ctxt.GOPATH) {
- if p == "" || p == ctxt.GOROOT {
- // Empty paths are uninteresting.
- // If the path is the GOROOT, ignore it.
- // People sometimes set GOPATH=$GOROOT.
- // Do not get confused by this common mistake.
- continue
- }
- if strings.HasPrefix(p, "~") {
- // Path segments starting with ~ on Unix are almost always
- // users who have incorrectly quoted ~ while setting GOPATH,
- // preventing it from expanding to $HOME.
- // The situation is made more confusing by the fact that
- // bash allows quoted ~ in $PATH (most shells do not).
- // Do not get confused by this, and do not try to use the path.
- // It does not exist, and printing errors about it confuses
- // those users even more, because they think "sure ~ exists!".
- // The go command diagnoses this situation and prints a
- // useful error.
- // On Windows, ~ is used in short names, such as c:\progra~1
- // for c:\program files.
- continue
- }
- all = append(all, p)
- }
- return all
-}
-
-func (ctxt *package_lookup_context) pkg_dirs() (string, []string) {
- pkgdir := fmt.Sprintf("%s_%s", ctxt.GOOS, ctxt.GOARCH)
-
- var currentPackagePath string
- var all []string
- if ctxt.GOROOT != "" {
- dir := filepath.Join(ctxt.GOROOT, "pkg", pkgdir)
- if is_dir(dir) {
- all = append(all, dir)
- }
- }
-
- switch g_config.PackageLookupMode {
- case "go":
- currentPackagePath = ctxt.CurrentPackagePath
- for _, p := range ctxt.gopath() {
- dir := filepath.Join(p, "pkg", pkgdir)
- if is_dir(dir) {
- all = append(all, dir)
- }
- dir = filepath.Join(dir, currentPackagePath, "vendor")
- if is_dir(dir) {
- all = append(all, dir)
- }
- }
- case "gb":
- if ctxt.GBProjectRoot != "" {
- pkgdir := fmt.Sprintf("%s-%s", ctxt.GOOS, ctxt.GOARCH)
- if !is_dir(pkgdir) {
- pkgdir = fmt.Sprintf("%s-%s-race", ctxt.GOOS, ctxt.GOARCH)
- }
- dir := filepath.Join(ctxt.GBProjectRoot, "pkg", pkgdir)
- if is_dir(dir) {
- all = append(all, dir)
- }
- }
- case "bzl":
- // TODO: Support bazel mode
- }
- return currentPackagePath, all
-}
-
-type decl_cache struct {
- cache map[string]*decl_file_cache
- context *package_lookup_context
- sync.Mutex
-}
-
-func new_decl_cache(context *package_lookup_context) *decl_cache {
- return &decl_cache{
- cache: make(map[string]*decl_file_cache),
- context: context,
- }
-}
-
-func (c *decl_cache) get(filename string) *decl_file_cache {
- c.Lock()
- defer c.Unlock()
-
- f, ok := c.cache[filename]
- if !ok {
- f = new_decl_file_cache(filename, c.context)
- c.cache[filename] = f
- }
- return f
-}
-
-func (c *decl_cache) get_and_update(filename string) *decl_file_cache {
- f := c.get(filename)
- f.update()
- return f
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/formatters.go b/src/disposa.blue/margo/golang/internal/gocode/formatters.go
deleted file mode 100644
index 9cf71a8b..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/formatters.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package gocode
-
-import (
- "fmt"
- "strings"
-)
-
-//-------------------------------------------------------------------------
-// formatter interfaces
-//-------------------------------------------------------------------------
-
-type formatter interface {
- write_candidates(candidates []candidate, num int)
-}
-
-//-------------------------------------------------------------------------
-// nice_formatter (just for testing, simple textual output)
-//-------------------------------------------------------------------------
-
-type nice_formatter struct{}
-
-func (*nice_formatter) write_candidates(candidates []candidate, num int) {
- if candidates == nil {
- fmt.Printf("Nothing to complete.\n")
- return
- }
-
- fmt.Printf("Found %d candidates:\n", len(candidates))
- for _, c := range candidates {
- abbr := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
- if c.Class == decl_func {
- abbr = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
- }
- fmt.Printf(" %s\n", abbr)
- }
-}
-
-//-------------------------------------------------------------------------
-// vim_formatter
-//-------------------------------------------------------------------------
-
-type vim_formatter struct{}
-
-func (*vim_formatter) write_candidates(candidates []candidate, num int) {
- if candidates == nil {
- fmt.Print("[0, []]")
- return
- }
-
- fmt.Printf("[%d, [", num)
- for i, c := range candidates {
- if i != 0 {
- fmt.Printf(", ")
- }
-
- word := c.Name
- if c.Class == decl_func {
- word += "("
- if strings.HasPrefix(c.Type, "func()") {
- word += ")"
- }
- }
-
- abbr := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
- if c.Class == decl_func {
- abbr = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
- }
- fmt.Printf("{'word': '%s', 'abbr': '%s', 'info': '%s'}", word, abbr, abbr)
- }
- fmt.Printf("]]")
-}
-
-//-------------------------------------------------------------------------
-// godit_formatter
-//-------------------------------------------------------------------------
-
-type godit_formatter struct{}
-
-func (*godit_formatter) write_candidates(candidates []candidate, num int) {
- fmt.Printf("%d,,%d\n", num, len(candidates))
- for _, c := range candidates {
- contents := c.Name
- if c.Class == decl_func {
- contents += "("
- if strings.HasPrefix(c.Type, "func()") {
- contents += ")"
- }
- }
-
- display := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
- if c.Class == decl_func {
- display = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
- }
- fmt.Printf("%s,,%s\n", display, contents)
- }
-}
-
-//-------------------------------------------------------------------------
-// emacs_formatter
-//-------------------------------------------------------------------------
-
-type emacs_formatter struct{}
-
-func (*emacs_formatter) write_candidates(candidates []candidate, num int) {
- for _, c := range candidates {
- var hint string
- switch {
- case c.Class == decl_func:
- hint = c.Type
- case c.Type == "":
- hint = c.Class.String()
- default:
- hint = c.Class.String() + " " + c.Type
- }
- fmt.Printf("%s,,%s\n", c.Name, hint)
- }
-}
-
-//-------------------------------------------------------------------------
-// csv_formatter
-//-------------------------------------------------------------------------
-
-type csv_formatter struct{}
-
-func (*csv_formatter) write_candidates(candidates []candidate, num int) {
- for _, c := range candidates {
- fmt.Printf("%s,,%s,,%s\n", c.Class, c.Name, c.Type)
- }
-}
-
-//-------------------------------------------------------------------------
-// csv_with_package_formatter
-//-------------------------------------------------------------------------
-
-type csv_with_package_formatter struct{}
-
-func (*csv_with_package_formatter) write_candidates(candidates []candidate, num int) {
- for _, c := range candidates {
- fmt.Printf("%s,,%s,,%s,,%s\n", c.Class, c.Name, c.Type, c.Package)
- }
-}
-
-//-------------------------------------------------------------------------
-// json_formatter
-//-------------------------------------------------------------------------
-
-type json_formatter struct{}
-
-func (*json_formatter) write_candidates(candidates []candidate, num int) {
- if candidates == nil {
- fmt.Print("[]")
- return
- }
-
- fmt.Printf(`[%d, [`, num)
- for i, c := range candidates {
- if i != 0 {
- fmt.Printf(", ")
- }
- fmt.Printf(`{"class": "%s", "name": "%s", "type": "%s", "package": "%s"}`,
- c.Class, c.Name, c.Type, c.Package)
- }
- fmt.Print("]]")
-}
-
-//-------------------------------------------------------------------------
-
-func get_formatter(name string) formatter {
- switch name {
- case "vim":
- return new(vim_formatter)
- case "emacs":
- return new(emacs_formatter)
- case "nice":
- return new(nice_formatter)
- case "csv":
- return new(csv_formatter)
- case "csv-with-package":
- return new(csv_with_package_formatter)
- case "json":
- return new(json_formatter)
- case "godit":
- return new(godit_formatter)
- }
- return new(nice_formatter)
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/generate._margo_.go b/src/disposa.blue/margo/golang/internal/gocode/generate._margo_.go
deleted file mode 100644
index 9291e9d1..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/generate._margo_.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// +build generate
-
-package main
-
-import (
- "fmt"
- "go/ast"
- "go/build"
- "go/format"
- "go/parser"
- "go/token"
- "io/ioutil"
- "log"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
-)
-
-func main() {
- if err := gen(); err != nil {
- log.Fatal(err)
- }
-}
-
-func gen() error {
- wd, err := os.Getwd()
- if err != nil {
- return fmt.Errorf("Cannot get wd: %s", err)
- }
-
- defer func() {
- statusCmd := exec.Command("git", "status", wd)
- statusCmd.Stdin = os.Stdin
- statusCmd.Stdout = os.Stdout
- statusCmd.Stderr = os.Stderr
- statusCmd.Run()
- }()
-
- temp, err := ioutil.TempDir(wd, ".margo.")
- if err != nil {
- return fmt.Errorf("Cannot create temp dir: %s", err)
- }
- defer os.RemoveAll(temp)
-
- cloneDir := filepath.Join(temp, "gocode-temp")
- cloneCmd := exec.Command("git", "clone", "--depth", "1", "https://github.com/nsf/gocode", cloneDir)
- cloneCmd.Stdin = os.Stdin
- cloneCmd.Stdout = os.Stdout
- cloneCmd.Stderr = os.Stderr
- if err := cloneCmd.Run(); err != nil {
- return fmt.Errorf("Cannot clone gocode: %s", err)
- }
-
- origPkg, err := build.Default.ImportDir(wd, build.ImportComment)
- if err != nil {
- return fmt.Errorf("Cannot import bundled pakage: %s", err)
- }
-
- fset := token.NewFileSet()
- pkgs, err := parser.ParseDir(fset, cloneDir, nil, parser.ParseComments)
- bundlePkg := pkgs["main"]
- if err != nil || bundlePkg == nil {
- return fmt.Errorf("Cannot parse gocode pakage: %s", err)
- }
- save := func(fn string, af *ast.File) error {
- dst, err := os.Create(filepath.Join(wd, filepath.Base(fn)))
- if err != nil {
- return fmt.Errorf("Cannot create bundle file: %s", err)
- }
- defer dst.Close()
-
- if err := format.Node(dst, fset, af); err != nil {
- return fmt.Errorf("Cannot fmt file: %s: %s", fn, err)
- }
- return nil
- }
-
- for _, fn := range origPkg.GoFiles {
- if strings.HasSuffix(fn, "._margo_.go") {
- continue
- }
- if err := os.Remove(fn); err != nil {
- return fmt.Errorf("Cannot remove: %s: %s", fn, err)
- }
- }
- for fn, p := range bundlePkg.Files {
- p.Name = ast.NewIdent("gocode")
- if err := save(fn, p); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/gocode.go b/src/disposa.blue/margo/golang/internal/gocode/gocode.go
deleted file mode 100644
index 9a4ef021..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/gocode.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package gocode
-
-import (
- "flag"
- "fmt"
- "log"
- "net/http"
- _ "net/http/pprof"
- "os"
- "path/filepath"
-)
-
-var (
- g_is_server = flag.Bool("s", false, "run a server instead of a client")
- g_format = flag.String("f", "nice", "output format (vim | emacs | nice | csv | csv-with-package | json)")
- g_input = flag.String("in", "", "use this file instead of stdin input")
- g_sock = create_sock_flag("sock", "socket type (unix | tcp)")
- g_addr = flag.String("addr", "127.0.0.1:37373", "address for tcp socket")
- g_debug = flag.Bool("debug", false, "enable server-side debug mode")
- g_profile = flag.Int("profile", 0, "port on which to expose profiling information for pprof; 0 to disable profiling")
-)
-
-func get_socket_filename() string {
- user := os.Getenv("USER")
- if user == "" {
- user = "all"
- }
- return filepath.Join(os.TempDir(), fmt.Sprintf("gocode-daemon.%s", user))
-}
-
-func show_usage() {
- fmt.Fprintf(os.Stderr,
- "Usage: %s [-s] [-f=] [-in=] [-sock=] [-addr=]\n"+
- " []\n\n",
- os.Args[0])
- fmt.Fprintf(os.Stderr,
- "Flags:\n")
- flag.PrintDefaults()
- fmt.Fprintf(os.Stderr,
- "\nCommands:\n"+
- " autocomplete [] main autocompletion command\n"+
- " close close the gocode daemon\n"+
- " drop-cache drop gocode daemon's cache\n"+
- " options list config options (extended)\n"+
- " set [ []] list or set config options\n"+
- " status gocode daemon status report\n"+
- "")
-}
-
-func main() {
- flag.Usage = show_usage
- flag.Parse()
-
- var retval int
- if *g_is_server {
- go func() {
- if *g_profile <= 0 {
- return
- }
- addr := fmt.Sprintf("localhost:%d", *g_profile)
- // Use the following commands to profile the binary:
- // go tool pprof http://localhost:6060/debug/pprof/profile # 30-second CPU profile
- // go tool pprof http://localhost:6060/debug/pprof/heap # heap profile
- // go tool pprof http://localhost:6060/debug/pprof/block # goroutine blocking profile
- // See http://blog.golang.org/profiling-go-programs for more info.
- log.Printf("enabling profiler on %s", addr)
- log.Print(http.ListenAndServe(addr, nil))
- }()
- retval = do_server()
- } else {
- retval = do_client()
- }
- os.Exit(retval)
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/os_posix.go b/src/disposa.blue/margo/golang/internal/gocode/os_posix.go
deleted file mode 100644
index 1562792a..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/os_posix.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// +build !windows
-
-package gocode
-
-import (
- "flag"
- "os"
- "os/exec"
- "path/filepath"
-)
-
-func create_sock_flag(name, desc string) *string {
- return flag.String(name, "unix", desc)
-}
-
-// Full path of the current executable
-func get_executable_filename() string {
- // try readlink first
- path, err := os.Readlink("/proc/self/exe")
- if err == nil {
- return path
- }
- // use argv[0]
- path = os.Args[0]
- if !filepath.IsAbs(path) {
- cwd, _ := os.Getwd()
- path = filepath.Join(cwd, path)
- }
- if file_exists(path) {
- return path
- }
- // Fallback : use "gocode" and assume we are in the PATH...
- path, err = exec.LookPath("gocode")
- if err == nil {
- return path
- }
- return ""
-}
-
-// config location
-
-func config_dir() string {
- return filepath.Join(xdg_home_dir(), "gocode")
-}
-
-func config_file() string {
- return filepath.Join(xdg_home_dir(), "gocode", "config.json")
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/os_windows.go b/src/disposa.blue/margo/golang/internal/gocode/os_windows.go
deleted file mode 100644
index 2ad2de54..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/os_windows.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package gocode
-
-import (
- "flag"
- "fmt"
- "path/filepath"
- "syscall"
- "unsafe"
-)
-
-var (
- shell32 = syscall.NewLazyDLL("shell32.dll")
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
-)
-
-var (
- proc_sh_get_folder_path = shell32.NewProc("SHGetFolderPathW")
- proc_get_module_file_name = kernel32.NewProc("GetModuleFileNameW")
-)
-
-func create_sock_flag(name, desc string) *string {
- return flag.String(name, "tcp", desc)
-}
-
-// Full path of the current executable
-func get_executable_filename() string {
- b := make([]uint16, syscall.MAX_PATH)
- ret, _, err := syscall.Syscall(proc_get_module_file_name.Addr(), 3,
- 0, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
- if int(ret) == 0 {
- panic(fmt.Sprintf("GetModuleFileNameW : err %d", int(err)))
- }
- return syscall.UTF16ToString(b)
-}
-
-const (
- csidl_appdata = 0x1a
-)
-
-func get_appdata_folder_path() string {
- b := make([]uint16, syscall.MAX_PATH)
- ret, _, err := syscall.Syscall6(proc_sh_get_folder_path.Addr(), 5,
- 0, csidl_appdata, 0, 0, uintptr(unsafe.Pointer(&b[0])), 0)
- if int(ret) != 0 {
- panic(fmt.Sprintf("SHGetFolderPathW : err %d", int(err)))
- }
- return syscall.UTF16ToString(b)
-}
-
-func config_dir() string {
- return filepath.Join(get_appdata_folder_path(), "gocode")
-}
-
-func config_file() string {
- return filepath.Join(get_appdata_folder_path(), "gocode", "config.json")
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/package.go b/src/disposa.blue/margo/golang/internal/gocode/package.go
deleted file mode 100644
index 59928df7..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/package.go
+++ /dev/null
@@ -1,254 +0,0 @@
-package gocode
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "os"
- "strings"
-)
-
-type package_parser interface {
- parse_export(callback func(pkg string, decl ast.Decl))
-}
-
-//-------------------------------------------------------------------------
-// package_file_cache
-//
-// Structure that represents a cache for an imported pacakge. In other words
-// these are the contents of an archive (*.a) file.
-//-------------------------------------------------------------------------
-
-type package_file_cache struct {
- name string // file name
- import_name string
- mtime int64
- defalias string
-
- scope *scope
- main *decl // package declaration
- others map[string]*decl
-}
-
-func new_package_file_cache(absname, name string) *package_file_cache {
- m := new(package_file_cache)
- m.name = absname
- m.import_name = name
- m.mtime = 0
- m.defalias = ""
- return m
-}
-
-// Creates a cache that stays in cache forever. Useful for built-in packages.
-func new_package_file_cache_forever(name, defalias string) *package_file_cache {
- m := new(package_file_cache)
- m.name = name
- m.mtime = -1
- m.defalias = defalias
- return m
-}
-
-func (m *package_file_cache) find_file() string {
- if file_exists(m.name) {
- return m.name
- }
-
- n := len(m.name)
- filename := m.name[:n-1] + "6"
- if file_exists(filename) {
- return filename
- }
-
- filename = m.name[:n-1] + "8"
- if file_exists(filename) {
- return filename
- }
-
- filename = m.name[:n-1] + "5"
- if file_exists(filename) {
- return filename
- }
- return m.name
-}
-
-func (m *package_file_cache) update_cache() {
- if m.mtime == -1 {
- return
- }
- fname := m.find_file()
- stat, err := os.Stat(fname)
- if err != nil {
- return
- }
-
- statmtime := stat.ModTime().UnixNano()
- if m.mtime != statmtime {
- m.mtime = statmtime
-
- data, err := file_reader.read_file(fname)
- if err != nil {
- return
- }
- m.process_package_data(data)
- }
-}
-
-func (m *package_file_cache) process_package_data(data []byte) {
- m.scope = new_named_scope(g_universe_scope, m.name)
-
- // find import section
- i := bytes.Index(data, []byte{'\n', '$', '$'})
- if i == -1 {
- panic(fmt.Sprintf("Can't find the import section in the package file %s", m.name))
- }
- data = data[i+len("\n$$"):]
-
- // main package
- m.main = new_decl(m.name, decl_package, nil)
- // create map for other packages
- m.others = make(map[string]*decl)
-
- var pp package_parser
- if data[0] == 'B' {
- // binary format, skip 'B\n'
- data = data[2:]
- var p gc_bin_parser
- p.init(data, m)
- pp = &p
- } else {
- // textual format, find the beginning of the package clause
- i = bytes.Index(data, []byte{'p', 'a', 'c', 'k', 'a', 'g', 'e'})
- if i == -1 {
- panic("Can't find the package clause")
- }
- data = data[i:]
-
- var p gc_parser
- p.init(data, m)
- pp = &p
- }
-
- prefix := "!" + m.name + "!"
- pp.parse_export(func(pkg string, decl ast.Decl) {
- anonymify_ast(decl, decl_foreign, m.scope)
- if pkg == "" || strings.HasPrefix(pkg, prefix) {
- // main package
- add_ast_decl_to_package(m.main, decl, m.scope)
- } else {
- // others
- if _, ok := m.others[pkg]; !ok {
- m.others[pkg] = new_decl(pkg, decl_package, nil)
- }
- add_ast_decl_to_package(m.others[pkg], decl, m.scope)
- }
- })
-
- // hack, add ourselves to the package scope
- mainName := "!" + m.name + "!" + m.defalias
- m.add_package_to_scope(mainName, m.name)
-
- // replace dummy package decls in package scope to actual packages
- for key := range m.scope.entities {
- if !strings.HasPrefix(key, "!") {
- continue
- }
- pkg, ok := m.others[key]
- if !ok && key == mainName {
- pkg = m.main
- }
- m.scope.replace_decl(key, pkg)
- }
-}
-
-func (m *package_file_cache) add_package_to_scope(alias, realname string) {
- d := new_decl(realname, decl_package, nil)
- m.scope.add_decl(alias, d)
-}
-
-func add_ast_decl_to_package(pkg *decl, decl ast.Decl, scope *scope) {
- foreach_decl(decl, func(data *foreach_decl_struct) {
- class := ast_decl_class(data.decl)
- for i, name := range data.names {
- typ, v, vi := data.type_value_index(i)
-
- d := new_decl_full(name.Name, class, decl_foreign|ast_decl_flags(data.decl), typ, v, vi, scope)
- if d == nil {
- return
- }
-
- if !name.IsExported() && d.class != decl_type {
- return
- }
-
- methodof := method_of(data.decl)
- if methodof != "" {
- decl := pkg.find_child(methodof)
- if decl != nil {
- decl.add_child(d)
- } else {
- decl = new_decl(methodof, decl_methods_stub, scope)
- decl.add_child(d)
- pkg.add_child(decl)
- }
- } else {
- decl := pkg.find_child(d.name)
- if decl != nil {
- decl.expand_or_replace(d)
- } else {
- pkg.add_child(d)
- }
- }
- }
- })
-}
-
-//-------------------------------------------------------------------------
-// package_cache
-//-------------------------------------------------------------------------
-
-type package_cache map[string]*package_file_cache
-
-func new_package_cache() package_cache {
- m := make(package_cache)
-
- // add built-in "unsafe" package
- m.add_builtin_unsafe_package()
-
- return m
-}
-
-// Function fills 'ps' set with packages from 'packages' import information.
-// In case if package is not in the cache, it creates one and adds one to the cache.
-func (c package_cache) append_packages(ps map[string]*package_file_cache, pkgs []package_import) {
- for _, m := range pkgs {
- if _, ok := ps[m.abspath]; ok {
- continue
- }
-
- if mod, ok := c[m.abspath]; ok {
- ps[m.abspath] = mod
- } else {
- mod = new_package_file_cache(m.abspath, m.path)
- ps[m.abspath] = mod
- c[m.abspath] = mod
- }
- }
-}
-
-var g_builtin_unsafe_package = []byte(`
-import
-$$
-package unsafe
- type @"".Pointer uintptr
- func @"".Offsetof (? any) uintptr
- func @"".Sizeof (? any) uintptr
- func @"".Alignof (? any) uintptr
-
-$$
-`)
-
-func (c package_cache) add_builtin_unsafe_package() {
- pkg := new_package_file_cache_forever("unsafe", "unsafe")
- pkg.process_package_data(g_builtin_unsafe_package)
- c["unsafe"] = pkg
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/package_bin.go b/src/disposa.blue/margo/golang/internal/gocode/package_bin.go
deleted file mode 100644
index 576f1ba0..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/package_bin.go
+++ /dev/null
@@ -1,829 +0,0 @@
-package gocode
-
-import (
- "encoding/binary"
- "fmt"
- "go/ast"
- "go/token"
- "strconv"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-//-------------------------------------------------------------------------
-// gc_bin_parser
-//
-// The following part of the code may contain portions of the code from the Go
-// standard library, which tells me to retain their copyright notice:
-//
-// Copyright (c) 2012 The Go Authors. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//-------------------------------------------------------------------------
-
-type gc_bin_parser struct {
- data []byte
- buf []byte // for reading strings
- version int // export format version
-
- // object lists
- strList []string // in order of appearance
- pathList []string // in order of appearance
- pkgList []string // in order of appearance
- typList []ast.Expr // in order of appearance
- callback func(pkg string, decl ast.Decl)
- pfc *package_file_cache
- trackAllTypes bool
-
- // position encoding
- posInfoFormat bool
- prevFile string
- prevLine int
-
- // debugging support
- debugFormat bool
- read int // bytes read
-
-}
-
-func (p *gc_bin_parser) init(data []byte, pfc *package_file_cache) {
- p.data = data
- p.version = -1 // unknown version
- p.strList = []string{""} // empty string is mapped to 0
- p.pathList = []string{""} // empty string is mapped to 0
- p.pfc = pfc
-}
-
-func (p *gc_bin_parser) parse_export(callback func(string, ast.Decl)) {
- p.callback = callback
-
- // read version info
- var versionstr string
- if b := p.rawByte(); b == 'c' || b == 'd' {
- // Go1.7 encoding; first byte encodes low-level
- // encoding format (compact vs debug).
- // For backward-compatibility only (avoid problems with
- // old installed packages). Newly compiled packages use
- // the extensible format string.
- // TODO(gri) Remove this support eventually; after Go1.8.
- if b == 'd' {
- p.debugFormat = true
- }
- p.trackAllTypes = p.rawByte() == 'a'
- p.posInfoFormat = p.int() != 0
- versionstr = p.string()
- if versionstr == "v1" {
- p.version = 0
- }
- } else {
- // Go1.8 extensible encoding
- // read version string and extract version number (ignore anything after the version number)
- versionstr = p.rawStringln(b)
- if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
- if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
- p.version = v
- }
- }
- }
-
- // read version specific flags - extend as necessary
- switch p.version {
- // case 6:
- // ...
- // fallthrough
- case 5, 4, 3, 2, 1:
- p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
- p.trackAllTypes = p.int() != 0
- p.posInfoFormat = p.int() != 0
- case 0:
- // Go1.7 encoding format - nothing to do here
- default:
- panic(fmt.Errorf("unknown export format version %d (%q)", p.version, versionstr))
- }
-
- // --- generic export data ---
-
- // populate typList with predeclared "known" types
- p.typList = append(p.typList, predeclared...)
-
- // read package data
- pkgName := p.pkg()
- p.pfc.defalias = pkgName[strings.LastIndex(pkgName, "!")+1:]
-
- // read objects of phase 1 only (see cmd/compiler/internal/gc/bexport.go)
- objcount := 0
- for {
- tag := p.tagOrIndex()
- if tag == endTag {
- break
- }
- p.obj(tag)
- objcount++
- }
-
- // self-verification
- if count := p.int(); count != objcount {
- panic(fmt.Sprintf("got %d objects; want %d", objcount, count))
- }
-}
-
-func (p *gc_bin_parser) pkg() string {
- // if the package was seen before, i is its index (>= 0)
- i := p.tagOrIndex()
- if i >= 0 {
- return p.pkgList[i]
- }
-
- // otherwise, i is the package tag (< 0)
- if i != packageTag {
- panic(fmt.Sprintf("unexpected package tag %d version %d", i, p.version))
- }
-
- // read package data
- name := p.string()
- var path string
- if p.version >= 5 {
- path = p.path()
- } else {
- path = p.string()
- }
-
- // we should never see an empty package name
- if name == "" {
- panic("empty package name in import")
- }
-
- // an empty path denotes the package we are currently importing;
- // it must be the first package we see
- if (path == "") != (len(p.pkgList) == 0) {
- panic(fmt.Sprintf("package path %q for pkg index %d", path, len(p.pkgList)))
- }
-
- var fullName string
- if path != "" {
- fullName = "!" + path + "!" + name
- p.pfc.add_package_to_scope(fullName, path)
- } else {
- fullName = "!" + p.pfc.name + "!" + name
- }
-
- // if the package was imported before, use that one; otherwise create a new one
- p.pkgList = append(p.pkgList, fullName)
- return p.pkgList[len(p.pkgList)-1]
-}
-
-func (p *gc_bin_parser) obj(tag int) {
- switch tag {
- case constTag:
- p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ("")
- p.skipValue() // ignore const value, gocode's not interested
- p.callback(pkg, &ast.GenDecl{
- Tok: token.CONST,
- Specs: []ast.Spec{
- &ast.ValueSpec{
- Names: []*ast.Ident{ast.NewIdent(name)},
- Type: typ,
- Values: []ast.Expr{&ast.BasicLit{Kind: token.INT, Value: "0"}},
- },
- },
- })
-
- case aliasTag:
- // TODO(gri) verify type alias hookup is correct
- p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ("")
- p.callback(pkg, &ast.GenDecl{
- Tok: token.TYPE,
- Specs: []ast.Spec{typeAliasSpec(name, typ)},
- })
-
- case typeTag:
- _ = p.typ("")
-
- case varTag:
- p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ("")
- p.callback(pkg, &ast.GenDecl{
- Tok: token.VAR,
- Specs: []ast.Spec{
- &ast.ValueSpec{
- Names: []*ast.Ident{ast.NewIdent(name)},
- Type: typ,
- },
- },
- })
-
- case funcTag:
- p.pos()
- pkg, name := p.qualifiedName()
- params := p.paramList()
- results := p.paramList()
- p.callback(pkg, &ast.FuncDecl{
- Name: ast.NewIdent(name),
- Type: &ast.FuncType{Params: params, Results: results},
- })
-
- default:
- panic(fmt.Sprintf("unexpected object tag %d", tag))
- }
-}
-
-const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
-
-func (p *gc_bin_parser) pos() {
- if !p.posInfoFormat {
- return
- }
-
- file := p.prevFile
- line := p.prevLine
- delta := p.int()
- line += delta
- if p.version >= 5 {
- if delta == deltaNewFile {
- if n := p.int(); n >= 0 {
- // file changed
- file = p.path()
- line = n
- }
- }
- } else {
- if delta == 0 {
- if n := p.int(); n >= 0 {
- // file changed
- file = p.prevFile[:n] + p.string()
- line = p.int()
- }
- }
- }
- p.prevFile = file
- p.prevLine = line
-
- // TODO(gri) register new position
-}
-
-func (p *gc_bin_parser) qualifiedName() (pkg string, name string) {
- name = p.string()
- pkg = p.pkg()
- return pkg, name
-}
-
-func (p *gc_bin_parser) reserveMaybe() int {
- if p.trackAllTypes {
- p.typList = append(p.typList, nil)
- return len(p.typList) - 1
- } else {
- return -1
- }
-}
-
-func (p *gc_bin_parser) recordMaybe(idx int, t ast.Expr) ast.Expr {
- if idx == -1 {
- return t
- }
- p.typList[idx] = t
- return t
-}
-
-func (p *gc_bin_parser) record(t ast.Expr) {
- p.typList = append(p.typList, t)
-}
-
-// parent is the package which declared the type; parent == nil means
-// the package currently imported. The parent package is needed for
-// exported struct fields and interface methods which don't contain
-// explicit package information in the export data.
-func (p *gc_bin_parser) typ(parent string) ast.Expr {
- // if the type was seen before, i is its index (>= 0)
- i := p.tagOrIndex()
- if i >= 0 {
- return p.typList[i]
- }
-
- // otherwise, i is the type tag (< 0)
- switch i {
- case namedTag:
- // read type object
- p.pos()
- parent, name := p.qualifiedName()
- tdecl := &ast.GenDecl{
- Tok: token.TYPE,
- Specs: []ast.Spec{
- &ast.TypeSpec{
- Name: ast.NewIdent(name),
- },
- },
- }
-
- // record it right away (underlying type can contain refs to t)
- t := &ast.SelectorExpr{X: ast.NewIdent(parent), Sel: ast.NewIdent(name)}
- p.record(t)
-
- // parse underlying type
- t0 := p.typ(parent)
- tdecl.Specs[0].(*ast.TypeSpec).Type = t0
-
- p.callback(parent, tdecl)
-
- // interfaces have no methods
- if _, ok := t0.(*ast.InterfaceType); ok {
- return t
- }
-
- // read associated methods
- for i := p.int(); i > 0; i-- {
- // TODO(gri) replace this with something closer to fieldName
- p.pos()
- name := p.string()
- if !exported(name) {
- p.pkg()
- }
-
- recv := p.paramList()
- params := p.paramList()
- results := p.paramList()
- p.int() // go:nointerface pragma - discarded
-
- strip_method_receiver(recv)
- p.callback(parent, &ast.FuncDecl{
- Recv: recv,
- Name: ast.NewIdent(name),
- Type: &ast.FuncType{Params: params, Results: results},
- })
- }
- return t
- case arrayTag:
- i := p.reserveMaybe()
- n := p.int64()
- elt := p.typ(parent)
- return p.recordMaybe(i, &ast.ArrayType{
- Len: &ast.BasicLit{Kind: token.INT, Value: fmt.Sprint(n)},
- Elt: elt,
- })
-
- case sliceTag:
- i := p.reserveMaybe()
- elt := p.typ(parent)
- return p.recordMaybe(i, &ast.ArrayType{Len: nil, Elt: elt})
-
- case dddTag:
- i := p.reserveMaybe()
- elt := p.typ(parent)
- return p.recordMaybe(i, &ast.Ellipsis{Elt: elt})
-
- case structTag:
- i := p.reserveMaybe()
- return p.recordMaybe(i, p.structType(parent))
-
- case pointerTag:
- i := p.reserveMaybe()
- elt := p.typ(parent)
- return p.recordMaybe(i, &ast.StarExpr{X: elt})
-
- case signatureTag:
- i := p.reserveMaybe()
- params := p.paramList()
- results := p.paramList()
- return p.recordMaybe(i, &ast.FuncType{Params: params, Results: results})
-
- case interfaceTag:
- i := p.reserveMaybe()
- var embeddeds []*ast.SelectorExpr
- for n := p.int(); n > 0; n-- {
- p.pos()
- if named, ok := p.typ(parent).(*ast.SelectorExpr); ok {
- embeddeds = append(embeddeds, named)
- }
- }
- methods := p.methodList(parent)
- for _, field := range embeddeds {
- methods = append(methods, &ast.Field{Type: field})
- }
- return p.recordMaybe(i, &ast.InterfaceType{Methods: &ast.FieldList{List: methods}})
-
- case mapTag:
- i := p.reserveMaybe()
- key := p.typ(parent)
- val := p.typ(parent)
- return p.recordMaybe(i, &ast.MapType{Key: key, Value: val})
-
- case chanTag:
- i := p.reserveMaybe()
- dir := ast.SEND | ast.RECV
- switch d := p.int(); d {
- case 1:
- dir = ast.RECV
- case 2:
- dir = ast.SEND
- case 3:
- // already set
- default:
- panic(fmt.Sprintf("unexpected channel dir %d", d))
- }
- elt := p.typ(parent)
- return p.recordMaybe(i, &ast.ChanType{Dir: dir, Value: elt})
-
- default:
- panic(fmt.Sprintf("unexpected type tag %d", i))
- }
-}
-
-func (p *gc_bin_parser) structType(parent string) *ast.StructType {
- var fields []*ast.Field
- if n := p.int(); n > 0 {
- fields = make([]*ast.Field, n)
- for i := range fields {
- fields[i], _ = p.field(parent) // (*ast.Field, tag), not interested in tags
- }
- }
- return &ast.StructType{Fields: &ast.FieldList{List: fields}}
-}
-
-func (p *gc_bin_parser) field(parent string) (*ast.Field, string) {
- p.pos()
- _, name, _ := p.fieldName(parent)
- typ := p.typ(parent)
- tag := p.string()
-
- var names []*ast.Ident
- if name != "" {
- names = []*ast.Ident{ast.NewIdent(name)}
- }
- return &ast.Field{
- Names: names,
- Type: typ,
- }, tag
-}
-
-func (p *gc_bin_parser) methodList(parent string) (methods []*ast.Field) {
- if n := p.int(); n > 0 {
- methods = make([]*ast.Field, n)
- for i := range methods {
- methods[i] = p.method(parent)
- }
- }
- return
-}
-
-func (p *gc_bin_parser) method(parent string) *ast.Field {
- p.pos()
- _, name, _ := p.fieldName(parent)
- params := p.paramList()
- results := p.paramList()
- return &ast.Field{
- Names: []*ast.Ident{ast.NewIdent(name)},
- Type: &ast.FuncType{Params: params, Results: results},
- }
-}
-
-func (p *gc_bin_parser) fieldName(parent string) (string, string, bool) {
- name := p.string()
- pkg := parent
- if p.version == 0 && name == "_" {
- // version 0 didn't export a package for _ fields
- return pkg, name, false
- }
- var alias bool
- switch name {
- case "":
- // 1) field name matches base type name and is exported: nothing to do
- case "?":
- // 2) field name matches base type name and is not exported: need package
- name = ""
- pkg = p.pkg()
- case "@":
- // 3) field name doesn't match type name (alias)
- name = p.string()
- alias = true
- fallthrough
- default:
- if !exported(name) {
- pkg = p.pkg()
- }
- }
- return pkg, name, alias
-}
-
-func (p *gc_bin_parser) paramList() *ast.FieldList {
- n := p.int()
- if n == 0 {
- return nil
- }
- // negative length indicates unnamed parameters
- named := true
- if n < 0 {
- n = -n
- named = false
- }
- // n > 0
- flds := make([]*ast.Field, n)
- for i := range flds {
- flds[i] = p.param(named)
- }
- return &ast.FieldList{List: flds}
-}
-
-func (p *gc_bin_parser) param(named bool) *ast.Field {
- t := p.typ("")
-
- name := "?"
- if named {
- name = p.string()
- if name == "" {
- panic("expected named parameter")
- }
- if name != "_" {
- p.pkg()
- }
- if i := strings.Index(name, "·"); i > 0 {
- name = name[:i] // cut off gc-specific parameter numbering
- }
- }
-
- // read and discard compiler-specific info
- p.string()
-
- return &ast.Field{
- Names: []*ast.Ident{ast.NewIdent(name)},
- Type: t,
- }
-}
-
-func exported(name string) bool {
- ch, _ := utf8.DecodeRuneInString(name)
- return unicode.IsUpper(ch)
-}
-
-func (p *gc_bin_parser) skipValue() {
- switch tag := p.tagOrIndex(); tag {
- case falseTag, trueTag:
- case int64Tag:
- p.int64()
- case floatTag:
- p.float()
- case complexTag:
- p.float()
- p.float()
- case stringTag:
- p.string()
- default:
- panic(fmt.Sprintf("unexpected value tag %d", tag))
- }
-}
-
-func (p *gc_bin_parser) float() {
- sign := p.int()
- if sign == 0 {
- return
- }
-
- p.int() // exp
- p.string() // mant
-}
-
-// ----------------------------------------------------------------------------
-// Low-level decoders
-
-func (p *gc_bin_parser) tagOrIndex() int {
- if p.debugFormat {
- p.marker('t')
- }
-
- return int(p.rawInt64())
-}
-
-func (p *gc_bin_parser) int() int {
- x := p.int64()
- if int64(int(x)) != x {
- panic("exported integer too large")
- }
- return int(x)
-}
-
-func (p *gc_bin_parser) int64() int64 {
- if p.debugFormat {
- p.marker('i')
- }
-
- return p.rawInt64()
-}
-
-func (p *gc_bin_parser) path() string {
- if p.debugFormat {
- p.marker('p')
- }
- // if the path was seen before, i is its index (>= 0)
- // (the empty string is at index 0)
- i := p.rawInt64()
- if i >= 0 {
- return p.pathList[i]
- }
- // otherwise, i is the negative path length (< 0)
- a := make([]string, -i)
- for n := range a {
- a[n] = p.string()
- }
- s := strings.Join(a, "/")
- p.pathList = append(p.pathList, s)
- return s
-}
-
-func (p *gc_bin_parser) string() string {
- if p.debugFormat {
- p.marker('s')
- }
- // if the string was seen before, i is its index (>= 0)
- // (the empty string is at index 0)
- i := p.rawInt64()
- if i >= 0 {
- return p.strList[i]
- }
- // otherwise, i is the negative string length (< 0)
- if n := int(-i); n <= cap(p.buf) {
- p.buf = p.buf[:n]
- } else {
- p.buf = make([]byte, n)
- }
- for i := range p.buf {
- p.buf[i] = p.rawByte()
- }
- s := string(p.buf)
- p.strList = append(p.strList, s)
- return s
-}
-
-func (p *gc_bin_parser) marker(want byte) {
- if got := p.rawByte(); got != want {
- panic(fmt.Sprintf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read))
- }
-
- pos := p.read
- if n := int(p.rawInt64()); n != pos {
- panic(fmt.Sprintf("incorrect position: got %d; want %d", n, pos))
- }
-}
-
-// rawInt64 should only be used by low-level decoders.
-func (p *gc_bin_parser) rawInt64() int64 {
- i, err := binary.ReadVarint(p)
- if err != nil {
- panic(fmt.Sprintf("read error: %v", err))
- }
- return i
-}
-
-// rawStringln should only be used to read the initial version string.
-func (p *gc_bin_parser) rawStringln(b byte) string {
- p.buf = p.buf[:0]
- for b != '\n' {
- p.buf = append(p.buf, b)
- b = p.rawByte()
- }
- return string(p.buf)
-}
-
-// needed for binary.ReadVarint in rawInt64
-func (p *gc_bin_parser) ReadByte() (byte, error) {
- return p.rawByte(), nil
-}
-
-// byte is the bottleneck interface for reading p.data.
-// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
-// rawByte should only be used by low-level decoders.
-func (p *gc_bin_parser) rawByte() byte {
- b := p.data[0]
- r := 1
- if b == '|' {
- b = p.data[1]
- r = 2
- switch b {
- case 'S':
- b = '$'
- case '|':
- // nothing to do
- default:
- panic("unexpected escape sequence in export data")
- }
- }
- p.data = p.data[r:]
- p.read += r
- return b
-
-}
-
-// ----------------------------------------------------------------------------
-// Export format
-
-// Tags. Must be < 0.
-const (
- // Objects
- packageTag = -(iota + 1)
- constTag
- typeTag
- varTag
- funcTag
- endTag
-
- // Types
- namedTag
- arrayTag
- sliceTag
- dddTag
- structTag
- pointerTag
- signatureTag
- interfaceTag
- mapTag
- chanTag
-
- // Values
- falseTag
- trueTag
- int64Tag
- floatTag
- fractionTag // not used by gc
- complexTag
- stringTag
- nilTag // only used by gc (appears in exported inlined function bodies)
- unknownTag // not used by gc (only appears in packages with errors)
-
- // Type aliases
- aliasTag
-)
-
-var predeclared = []ast.Expr{
- // basic types
- ast.NewIdent("bool"),
- ast.NewIdent("int"),
- ast.NewIdent("int8"),
- ast.NewIdent("int16"),
- ast.NewIdent("int32"),
- ast.NewIdent("int64"),
- ast.NewIdent("uint"),
- ast.NewIdent("uint8"),
- ast.NewIdent("uint16"),
- ast.NewIdent("uint32"),
- ast.NewIdent("uint64"),
- ast.NewIdent("uintptr"),
- ast.NewIdent("float32"),
- ast.NewIdent("float64"),
- ast.NewIdent("complex64"),
- ast.NewIdent("complex128"),
- ast.NewIdent("string"),
-
- // basic type aliases
- ast.NewIdent("byte"),
- ast.NewIdent("rune"),
-
- // error
- ast.NewIdent("error"),
-
- // TODO(nsf): don't think those are used in just package type info,
- // maybe for consts, but we are not interested in that
- // untyped types
- ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedBool],
- ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedInt],
- ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedRune],
- ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedFloat],
- ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedComplex],
- ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedString],
- ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedNil],
-
- // package unsafe
- &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")},
-
- // invalid type
- ast.NewIdent(">_<"), // TODO: types.Typ[types.Invalid], // only appears in packages with errors
-
- // used internally by gc; never used by this package or in .a files
- ast.NewIdent("any"),
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/package_text.go b/src/disposa.blue/margo/golang/internal/gocode/package_text.go
deleted file mode 100644
index 9d4b5629..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/package_text.go
+++ /dev/null
@@ -1,678 +0,0 @@
-package gocode
-
-import (
- "bytes"
- "errors"
- "fmt"
- "go/ast"
- "go/token"
- "strconv"
- "text/scanner"
-)
-
-//-------------------------------------------------------------------------
-// gc_parser
-//
-// The following part of the code may contain portions of the code from the Go
-// standard library, which tells me to retain their copyright notice:
-//
-// Copyright (c) 2009 The Go Authors. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//-------------------------------------------------------------------------
-
-type gc_parser struct {
- scanner scanner.Scanner
- tok rune
- lit string
- path_to_name map[string]string
- beautify bool
- pfc *package_file_cache
-}
-
-func (p *gc_parser) init(data []byte, pfc *package_file_cache) {
- p.scanner.Init(bytes.NewReader(data))
- p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
- p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanStrings |
- scanner.ScanComments | scanner.ScanChars | scanner.SkipComments
- p.scanner.Whitespace = 1<<'\t' | 1<<' ' | 1<<'\r' | 1<<'\v' | 1<<'\f'
- p.scanner.Filename = "package.go"
- p.next()
- // and the built-in "unsafe" package to the path_to_name map
- p.path_to_name = map[string]string{"unsafe": "unsafe"}
- p.pfc = pfc
-}
-
-func (p *gc_parser) next() {
- p.tok = p.scanner.Scan()
- switch p.tok {
- case scanner.Ident, scanner.Int, scanner.String:
- p.lit = p.scanner.TokenText()
- default:
- p.lit = ""
- }
-}
-
-func (p *gc_parser) error(msg string) {
- panic(errors.New(msg))
-}
-
-func (p *gc_parser) errorf(format string, args ...interface{}) {
- p.error(fmt.Sprintf(format, args...))
-}
-
-func (p *gc_parser) expect(tok rune) string {
- lit := p.lit
- if p.tok != tok {
- p.errorf("expected %s, got %s (%q)", scanner.TokenString(tok),
- scanner.TokenString(p.tok), lit)
- }
- p.next()
- return lit
-}
-
-func (p *gc_parser) expect_keyword(keyword string) {
- lit := p.expect(scanner.Ident)
- if lit != keyword {
- p.errorf("expected keyword: %s, got: %q", keyword, lit)
- }
-}
-
-func (p *gc_parser) expect_special(what string) {
- i := 0
- for i < len(what) {
- if p.tok != rune(what[i]) {
- break
- }
-
- nc := p.scanner.Peek()
- if i != len(what)-1 && nc <= ' ' {
- break
- }
-
- p.next()
- i++
- }
-
- if i < len(what) {
- p.errorf("expected: %q, got: %q", what, what[0:i])
- }
-}
-
-// dotIdentifier = "?" | ( ident | '·' ) { ident | int | '·' } .
-// we're doing lexer job here, kind of
-func (p *gc_parser) parse_dot_ident() string {
- if p.tok == '?' {
- p.next()
- return "?"
- }
-
- ident := ""
- sep := 'x'
- i, j := 0, -1
- for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
- ident += p.lit
- if p.tok == '·' {
- ident += "·"
- j = i
- i++
- }
- i += len(p.lit)
- sep = p.scanner.Peek()
- p.next()
- }
- // middot = \xc2\xb7
- if j != -1 && i > j+1 {
- c := ident[j+2]
- if c >= '0' && c <= '9' {
- ident = ident[0:j]
- }
- }
- return ident
-}
-
-// ImportPath = string_lit .
-// quoted name of the path, but we return it as an identifier, taking an alias
-// from 'pathToAlias' map, it is filled by import statements
-func (p *gc_parser) parse_package() *ast.Ident {
- path, err := strconv.Unquote(p.expect(scanner.String))
- if err != nil {
- panic(err)
- }
-
- return ast.NewIdent(path)
-}
-
-// ExportedName = "@" ImportPath "." dotIdentifier .
-func (p *gc_parser) parse_exported_name() *ast.SelectorExpr {
- p.expect('@')
- pkg := p.parse_package()
- if pkg.Name == "" {
- pkg.Name = "!" + p.pfc.name + "!" + p.pfc.defalias
- } else {
- pkg.Name = p.path_to_name[pkg.Name]
- }
- p.expect('.')
- name := ast.NewIdent(p.parse_dot_ident())
- return &ast.SelectorExpr{X: pkg, Sel: name}
-}
-
-// Name = identifier | "?" | ExportedName .
-func (p *gc_parser) parse_name() (string, ast.Expr) {
- switch p.tok {
- case scanner.Ident:
- name := p.lit
- p.next()
- return name, ast.NewIdent(name)
- case '?':
- p.next()
- return "?", ast.NewIdent("?")
- case '@':
- en := p.parse_exported_name()
- return en.Sel.Name, en
- }
- p.error("name expected")
- return "", nil
-}
-
-// Field = Name Type [ string_lit ] .
-func (p *gc_parser) parse_field() *ast.Field {
- var tag string
- name, _ := p.parse_name()
- typ := p.parse_type()
- if p.tok == scanner.String {
- tag = p.expect(scanner.String)
- }
-
- var names []*ast.Ident
- if name != "?" {
- names = []*ast.Ident{ast.NewIdent(name)}
- }
-
- return &ast.Field{
- Names: names,
- Type: typ,
- Tag: &ast.BasicLit{Kind: token.STRING, Value: tag},
- }
-}
-
-// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
-func (p *gc_parser) parse_parameter() *ast.Field {
- // name
- name, _ := p.parse_name()
-
- // type
- var typ ast.Expr
- if p.tok == '.' {
- p.expect_special("...")
- typ = &ast.Ellipsis{Elt: p.parse_type()}
- } else {
- typ = p.parse_type()
- }
-
- var tag string
- if p.tok == scanner.String {
- tag = p.expect(scanner.String)
- }
-
- return &ast.Field{
- Names: []*ast.Ident{ast.NewIdent(name)},
- Type: typ,
- Tag: &ast.BasicLit{Kind: token.STRING, Value: tag},
- }
-}
-
-// Parameters = "(" [ ParameterList ] ")" .
-// ParameterList = { Parameter "," } Parameter .
-func (p *gc_parser) parse_parameters() *ast.FieldList {
- flds := []*ast.Field{}
- parse_parameter := func() {
- par := p.parse_parameter()
- flds = append(flds, par)
- }
-
- p.expect('(')
- if p.tok != ')' {
- parse_parameter()
- for p.tok == ',' {
- p.next()
- parse_parameter()
- }
- }
- p.expect(')')
- return &ast.FieldList{List: flds}
-}
-
-// Signature = Parameters [ Result ] .
-// Result = Type | Parameters .
-func (p *gc_parser) parse_signature() *ast.FuncType {
- var params *ast.FieldList
- var results *ast.FieldList
-
- params = p.parse_parameters()
- switch p.tok {
- case scanner.Ident, '[', '*', '<', '@':
- fld := &ast.Field{Type: p.parse_type()}
- results = &ast.FieldList{List: []*ast.Field{fld}}
- case '(':
- results = p.parse_parameters()
- }
- return &ast.FuncType{Params: params, Results: results}
-}
-
-// MethodOrEmbedSpec = Name [ Signature ] .
-func (p *gc_parser) parse_method_or_embed_spec() *ast.Field {
- name, nameexpr := p.parse_name()
- if p.tok == '(' {
- typ := p.parse_signature()
- return &ast.Field{
- Names: []*ast.Ident{ast.NewIdent(name)},
- Type: typ,
- }
- }
-
- return &ast.Field{
- Type: nameexpr,
- }
-}
-
-// int_lit = [ "-" | "+" ] { "0" ... "9" } .
-func (p *gc_parser) parse_int() {
- switch p.tok {
- case '-', '+':
- p.next()
- }
- p.expect(scanner.Int)
-}
-
-// number = int_lit [ "p" int_lit ] .
-func (p *gc_parser) parse_number() {
- p.parse_int()
- if p.lit == "p" {
- p.next()
- p.parse_int()
- }
-}
-
-//-------------------------------------------------------------------------------
-// gc_parser.types
-//-------------------------------------------------------------------------------
-
-// InterfaceType = "interface" "{" [ MethodOrEmbedList ] "}" .
-// MethodOrEmbedList = MethodOrEmbedSpec { ";" MethodOrEmbedSpec } .
-func (p *gc_parser) parse_interface_type() ast.Expr {
- var methods []*ast.Field
- parse_method := func() {
- meth := p.parse_method_or_embed_spec()
- methods = append(methods, meth)
- }
-
- p.expect_keyword("interface")
- p.expect('{')
- if p.tok != '}' {
- parse_method()
- for p.tok == ';' {
- p.next()
- parse_method()
- }
- }
- p.expect('}')
- return &ast.InterfaceType{Methods: &ast.FieldList{List: methods}}
-}
-
-// StructType = "struct" "{" [ FieldList ] "}" .
-// FieldList = Field { ";" Field } .
-func (p *gc_parser) parse_struct_type() ast.Expr {
- var fields []*ast.Field
- parse_field := func() {
- fld := p.parse_field()
- fields = append(fields, fld)
- }
-
- p.expect_keyword("struct")
- p.expect('{')
- if p.tok != '}' {
- parse_field()
- for p.tok == ';' {
- p.next()
- parse_field()
- }
- }
- p.expect('}')
- return &ast.StructType{Fields: &ast.FieldList{List: fields}}
-}
-
-// MapType = "map" "[" Type "]" Type .
-func (p *gc_parser) parse_map_type() ast.Expr {
- p.expect_keyword("map")
- p.expect('[')
- key := p.parse_type()
- p.expect(']')
- elt := p.parse_type()
- return &ast.MapType{Key: key, Value: elt}
-}
-
-// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
-func (p *gc_parser) parse_chan_type() ast.Expr {
- dir := ast.SEND | ast.RECV
- if p.tok == scanner.Ident {
- p.expect_keyword("chan")
- if p.tok == '<' {
- p.expect_special("<-")
- dir = ast.SEND
- }
- } else {
- p.expect_special("<-")
- p.expect_keyword("chan")
- dir = ast.RECV
- }
-
- elt := p.parse_type()
- return &ast.ChanType{Dir: dir, Value: elt}
-}
-
-// ArrayOrSliceType = ArrayType | SliceType .
-// ArrayType = "[" int_lit "]" Type .
-// SliceType = "[" "]" Type .
-func (p *gc_parser) parse_array_or_slice_type() ast.Expr {
- p.expect('[')
- if p.tok == ']' {
- // SliceType
- p.next() // skip ']'
- return &ast.ArrayType{Len: nil, Elt: p.parse_type()}
- }
-
- // ArrayType
- lit := p.expect(scanner.Int)
- p.expect(']')
- return &ast.ArrayType{
- Len: &ast.BasicLit{Kind: token.INT, Value: lit},
- Elt: p.parse_type(),
- }
-}
-
-// Type =
-// BasicType | TypeName | ArrayType | SliceType | StructType |
-// PointerType | FuncType | InterfaceType | MapType | ChanType |
-// "(" Type ")" .
-// BasicType = ident .
-// TypeName = ExportedName .
-// SliceType = "[" "]" Type .
-// PointerType = "*" Type .
-// FuncType = "func" Signature .
-func (p *gc_parser) parse_type() ast.Expr {
- switch p.tok {
- case scanner.Ident:
- switch p.lit {
- case "struct":
- return p.parse_struct_type()
- case "func":
- p.next()
- return p.parse_signature()
- case "interface":
- return p.parse_interface_type()
- case "map":
- return p.parse_map_type()
- case "chan":
- return p.parse_chan_type()
- default:
- lit := p.lit
- p.next()
- return ast.NewIdent(lit)
- }
- case '@':
- return p.parse_exported_name()
- case '[':
- return p.parse_array_or_slice_type()
- case '*':
- p.next()
- return &ast.StarExpr{X: p.parse_type()}
- case '<':
- return p.parse_chan_type()
- case '(':
- p.next()
- typ := p.parse_type()
- p.expect(')')
- return typ
- }
- p.errorf("unexpected token: %s", scanner.TokenString(p.tok))
- return nil
-}
-
-//-------------------------------------------------------------------------------
-// gc_parser.declarations
-//-------------------------------------------------------------------------------
-
-// ImportDecl = "import" identifier string_lit .
-func (p *gc_parser) parse_import_decl() {
- p.expect_keyword("import")
- alias := p.expect(scanner.Ident)
- path := p.parse_package()
- fullName := "!" + path.Name + "!" + alias
- p.path_to_name[path.Name] = fullName
- p.pfc.add_package_to_scope(fullName, path.Name)
-}
-
-// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
-// Literal = bool_lit | int_lit | float_lit | complex_lit | string_lit .
-// bool_lit = "true" | "false" .
-// complex_lit = "(" float_lit "+" float_lit ")" .
-// rune_lit = "(" int_lit "+" int_lit ")" .
-// string_lit = `"` { unicode_char } `"` .
-func (p *gc_parser) parse_const_decl() (string, *ast.GenDecl) {
- // TODO: do we really need actual const value? gocode doesn't use this
- p.expect_keyword("const")
- name := p.parse_exported_name()
-
- var typ ast.Expr
- if p.tok != '=' {
- typ = p.parse_type()
- }
-
- p.expect('=')
-
- // skip the value
- switch p.tok {
- case scanner.Ident:
- // must be bool, true or false
- p.next()
- case '-', '+', scanner.Int:
- // number
- p.parse_number()
- case '(':
- // complex_lit or rune_lit
- p.next() // skip '('
- if p.tok == scanner.Char {
- p.next()
- } else {
- p.parse_number()
- }
- p.expect('+')
- p.parse_number()
- p.expect(')')
- case scanner.Char:
- p.next()
- case scanner.String:
- p.next()
- default:
- p.error("expected literal")
- }
-
- return name.X.(*ast.Ident).Name, &ast.GenDecl{
- Tok: token.CONST,
- Specs: []ast.Spec{
- &ast.ValueSpec{
- Names: []*ast.Ident{name.Sel},
- Type: typ,
- Values: []ast.Expr{&ast.BasicLit{Kind: token.INT, Value: "0"}},
- },
- },
- }
-}
-
-// TypeDecl = "type" ExportedName Type .
-func (p *gc_parser) parse_type_decl() (string, *ast.GenDecl) {
- p.expect_keyword("type")
- name := p.parse_exported_name()
- typ := p.parse_type()
- return name.X.(*ast.Ident).Name, &ast.GenDecl{
- Tok: token.TYPE,
- Specs: []ast.Spec{
- &ast.TypeSpec{
- Name: name.Sel,
- Type: typ,
- },
- },
- }
-}
-
-// VarDecl = "var" ExportedName Type .
-func (p *gc_parser) parse_var_decl() (string, *ast.GenDecl) {
- p.expect_keyword("var")
- name := p.parse_exported_name()
- typ := p.parse_type()
- return name.X.(*ast.Ident).Name, &ast.GenDecl{
- Tok: token.VAR,
- Specs: []ast.Spec{
- &ast.ValueSpec{
- Names: []*ast.Ident{name.Sel},
- Type: typ,
- },
- },
- }
-}
-
-// FuncBody = "{" ... "}" .
-func (p *gc_parser) parse_func_body() {
- p.expect('{')
- for i := 1; i > 0; p.next() {
- switch p.tok {
- case '{':
- i++
- case '}':
- i--
- }
- }
-}
-
-// FuncDecl = "func" ExportedName Signature [ FuncBody ] .
-func (p *gc_parser) parse_func_decl() (string, *ast.FuncDecl) {
- // "func" was already consumed by lookahead
- name := p.parse_exported_name()
- typ := p.parse_signature()
- if p.tok == '{' {
- p.parse_func_body()
- }
- return name.X.(*ast.Ident).Name, &ast.FuncDecl{
- Name: name.Sel,
- Type: typ,
- }
-}
-
-func strip_method_receiver(recv *ast.FieldList) string {
- var sel *ast.SelectorExpr
-
- // find selector expression
- typ := recv.List[0].Type
- switch t := typ.(type) {
- case *ast.StarExpr:
- sel = t.X.(*ast.SelectorExpr)
- case *ast.SelectorExpr:
- sel = t
- }
-
- // extract package path
- pkg := sel.X.(*ast.Ident).Name
-
- // write back stripped type
- switch t := typ.(type) {
- case *ast.StarExpr:
- t.X = sel.Sel
- case *ast.SelectorExpr:
- recv.List[0].Type = sel.Sel
- }
-
- return pkg
-}
-
-// MethodDecl = "func" Receiver Name Signature .
-// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" [ FuncBody ] .
-func (p *gc_parser) parse_method_decl() (string, *ast.FuncDecl) {
- recv := p.parse_parameters()
- pkg := strip_method_receiver(recv)
- name, _ := p.parse_name()
- typ := p.parse_signature()
- if p.tok == '{' {
- p.parse_func_body()
- }
- return pkg, &ast.FuncDecl{
- Recv: recv,
- Name: ast.NewIdent(name),
- Type: typ,
- }
-}
-
-// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
-func (p *gc_parser) parse_decl() (pkg string, decl ast.Decl) {
- switch p.lit {
- case "import":
- p.parse_import_decl()
- case "const":
- pkg, decl = p.parse_const_decl()
- case "type":
- pkg, decl = p.parse_type_decl()
- case "var":
- pkg, decl = p.parse_var_decl()
- case "func":
- p.next()
- if p.tok == '(' {
- pkg, decl = p.parse_method_decl()
- } else {
- pkg, decl = p.parse_func_decl()
- }
- }
- p.expect('\n')
- return
-}
-
-// Export = PackageClause { Decl } "$$" .
-// PackageClause = "package" identifier [ "safe" ] "\n" .
-func (p *gc_parser) parse_export(callback func(string, ast.Decl)) {
- p.expect_keyword("package")
- p.pfc.defalias = p.expect(scanner.Ident)
- if p.tok != '\n' {
- p.expect_keyword("safe")
- }
- p.expect('\n')
-
- for p.tok != '$' && p.tok != scanner.EOF {
- pkg, decl := p.parse_decl()
- if decl != nil {
- callback(pkg, decl)
- }
- }
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/pre_go17.go b/src/disposa.blue/margo/golang/internal/gocode/pre_go17.go
deleted file mode 100644
index d961a0c5..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/pre_go17.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !go1.7,!go1.8
-
-package gocode
-
-func init() {
- knownPackageIdents["context"] = "golang.org/x/net/context"
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/ripper.go b/src/disposa.blue/margo/golang/internal/gocode/ripper.go
deleted file mode 100644
index 05310572..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/ripper.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package gocode
-
-import (
- "go/scanner"
- "go/token"
-)
-
-// All the code in this file serves single purpose:
-// It separates a function with the cursor inside and the rest of the code. I'm
-// doing that, because sometimes parser is not able to recover itself from an
-// error and the autocompletion results become less complete.
-
-type tok_pos_pair struct {
- tok token.Token
- pos token.Pos
-}
-
-type tok_collection struct {
- tokens []tok_pos_pair
- fset *token.FileSet
-}
-
-func (this *tok_collection) next(s *scanner.Scanner) bool {
- pos, tok, _ := s.Scan()
- if tok == token.EOF {
- return false
- }
-
- this.tokens = append(this.tokens, tok_pos_pair{tok, pos})
- return true
-}
-
-func (this *tok_collection) find_decl_beg(pos int) int {
- lowest := 0
- lowpos := -1
- lowi := -1
- cur := 0
- for i := pos; i >= 0; i-- {
- t := this.tokens[i]
- switch t.tok {
- case token.RBRACE:
- cur++
- case token.LBRACE:
- cur--
- }
-
- if cur < lowest {
- lowest = cur
- lowpos = this.fset.Position(t.pos).Offset
- lowi = i
- }
- }
-
- cur = lowest
- for i := lowi - 1; i >= 0; i-- {
- t := this.tokens[i]
- switch t.tok {
- case token.RBRACE:
- cur++
- case token.LBRACE:
- cur--
- }
- if t.tok == token.SEMICOLON && cur == lowest {
- lowpos = this.fset.Position(t.pos).Offset
- break
- }
- }
-
- return lowpos
-}
-
-func (this *tok_collection) find_decl_end(pos int) int {
- highest := 0
- highpos := -1
- cur := 0
-
- if this.tokens[pos].tok == token.LBRACE {
- pos++
- }
-
- for i := pos; i < len(this.tokens); i++ {
- t := this.tokens[i]
- switch t.tok {
- case token.RBRACE:
- cur++
- case token.LBRACE:
- cur--
- }
-
- if cur > highest {
- highest = cur
- highpos = this.fset.Position(t.pos).Offset
- }
- }
-
- return highpos
-}
-
-func (this *tok_collection) find_outermost_scope(cursor int) (int, int) {
- pos := 0
-
- for i, t := range this.tokens {
- if cursor <= this.fset.Position(t.pos).Offset {
- break
- }
- pos = i
- }
-
- return this.find_decl_beg(pos), this.find_decl_end(pos)
-}
-
-// return new cursor position, file without ripped part and the ripped part itself
-// variants:
-// new-cursor, file-without-ripped-part, ripped-part
-// old-cursor, file, nil
-func (this *tok_collection) rip_off_decl(file []byte, cursor int) (int, []byte, []byte) {
- this.fset = token.NewFileSet()
- var s scanner.Scanner
- s.Init(this.fset.AddFile("", this.fset.Base(), len(file)), file, nil, scanner.ScanComments)
- for this.next(&s) {
- }
-
- beg, end := this.find_outermost_scope(cursor)
- if beg == -1 || end == -1 {
- return cursor, file, nil
- }
-
- ripped := make([]byte, end+1-beg)
- copy(ripped, file[beg:end+1])
-
- newfile := make([]byte, len(file)-len(ripped))
- copy(newfile, file[:beg])
- copy(newfile[beg:], file[end+1:])
-
- return cursor - beg, newfile, ripped
-}
-
-func rip_off_decl(file []byte, cursor int) (int, []byte, []byte) {
- var tc tok_collection
- return tc.rip_off_decl(file, cursor)
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/rpc.go b/src/disposa.blue/margo/golang/internal/gocode/rpc.go
deleted file mode 100644
index 7144d723..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/rpc.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// WARNING! Autogenerated by goremote, don't touch.
-
-package gocode
-
-import (
- "net/rpc"
-)
-
-type RPC struct {
-}
-
-// wrapper for: server_auto_complete
-
-type Args_auto_complete struct {
- Arg0 []byte
- Arg1 string
- Arg2 int
- Arg3 go_build_context
-}
-type Reply_auto_complete struct {
- Arg0 []candidate
- Arg1 int
-}
-
-func (r *RPC) RPC_auto_complete(args *Args_auto_complete, reply *Reply_auto_complete) error {
- reply.Arg0, reply.Arg1 = server_auto_complete(args.Arg0, args.Arg1, args.Arg2, args.Arg3)
- return nil
-}
-func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 go_build_context) (c []candidate, d int) {
- var args Args_auto_complete
- var reply Reply_auto_complete
- args.Arg0 = Arg0
- args.Arg1 = Arg1
- args.Arg2 = Arg2
- args.Arg3 = Arg3
- err := cli.Call("RPC.RPC_auto_complete", &args, &reply)
- if err != nil {
- panic(err)
- }
- return reply.Arg0, reply.Arg1
-}
-
-// wrapper for: server_close
-
-type Args_close struct {
- Arg0 int
-}
-type Reply_close struct {
- Arg0 int
-}
-
-func (r *RPC) RPC_close(args *Args_close, reply *Reply_close) error {
- reply.Arg0 = server_close(args.Arg0)
- return nil
-}
-func client_close(cli *rpc.Client, Arg0 int) int {
- var args Args_close
- var reply Reply_close
- args.Arg0 = Arg0
- err := cli.Call("RPC.RPC_close", &args, &reply)
- if err != nil {
- panic(err)
- }
- return reply.Arg0
-}
-
-// wrapper for: server_status
-
-type Args_status struct {
- Arg0 int
-}
-type Reply_status struct {
- Arg0 string
-}
-
-func (r *RPC) RPC_status(args *Args_status, reply *Reply_status) error {
- reply.Arg0 = server_status(args.Arg0)
- return nil
-}
-func client_status(cli *rpc.Client, Arg0 int) string {
- var args Args_status
- var reply Reply_status
- args.Arg0 = Arg0
- err := cli.Call("RPC.RPC_status", &args, &reply)
- if err != nil {
- panic(err)
- }
- return reply.Arg0
-}
-
-// wrapper for: server_drop_cache
-
-type Args_drop_cache struct {
- Arg0 int
-}
-type Reply_drop_cache struct {
- Arg0 int
-}
-
-func (r *RPC) RPC_drop_cache(args *Args_drop_cache, reply *Reply_drop_cache) error {
- reply.Arg0 = server_drop_cache(args.Arg0)
- return nil
-}
-func client_drop_cache(cli *rpc.Client, Arg0 int) int {
- var args Args_drop_cache
- var reply Reply_drop_cache
- args.Arg0 = Arg0
- err := cli.Call("RPC.RPC_drop_cache", &args, &reply)
- if err != nil {
- panic(err)
- }
- return reply.Arg0
-}
-
-// wrapper for: server_set
-
-type Args_set struct {
- Arg0, Arg1 string
-}
-type Reply_set struct {
- Arg0 string
-}
-
-func (r *RPC) RPC_set(args *Args_set, reply *Reply_set) error {
- reply.Arg0 = server_set(args.Arg0, args.Arg1)
- return nil
-}
-func client_set(cli *rpc.Client, Arg0, Arg1 string) string {
- var args Args_set
- var reply Reply_set
- args.Arg0 = Arg0
- args.Arg1 = Arg1
- err := cli.Call("RPC.RPC_set", &args, &reply)
- if err != nil {
- panic(err)
- }
- return reply.Arg0
-}
-
-// wrapper for: server_options
-
-type Args_options struct {
- Arg0 int
-}
-type Reply_options struct {
- Arg0 string
-}
-
-func (r *RPC) RPC_options(args *Args_options, reply *Reply_options) error {
- reply.Arg0 = server_options(args.Arg0)
- return nil
-}
-func client_options(cli *rpc.Client, Arg0 int) string {
- var args Args_options
- var reply Reply_options
- args.Arg0 = Arg0
- err := cli.Call("RPC.RPC_options", &args, &reply)
- if err != nil {
- panic(err)
- }
- return reply.Arg0
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/scope.go b/src/disposa.blue/margo/golang/internal/gocode/scope.go
deleted file mode 100644
index 14527148..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/scope.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package gocode
-
-//-------------------------------------------------------------------------
-// scope
-//-------------------------------------------------------------------------
-
-type scope struct {
- // the package name that this scope resides in
- pkgname string
- parent *scope // nil for universe scope
- entities map[string]*decl
-}
-
-func new_named_scope(outer *scope, name string) *scope {
- s := new_scope(outer)
- s.pkgname = name
- return s
-}
-
-func new_scope(outer *scope) *scope {
- s := new(scope)
- if outer != nil {
- s.pkgname = outer.pkgname
- }
- s.parent = outer
- s.entities = make(map[string]*decl)
- return s
-}
-
-// returns: new, prev
-func advance_scope(s *scope) (*scope, *scope) {
- if len(s.entities) == 0 {
- return s, s.parent
- }
- return new_scope(s), s
-}
-
-// adds declaration or returns an existing one
-func (s *scope) add_named_decl(d *decl) *decl {
- return s.add_decl(d.name, d)
-}
-
-func (s *scope) add_decl(name string, d *decl) *decl {
- decl, ok := s.entities[name]
- if !ok {
- s.entities[name] = d
- return d
- }
- return decl
-}
-
-func (s *scope) replace_decl(name string, d *decl) {
- s.entities[name] = d
-}
-
-func (s *scope) merge_decl(d *decl) {
- decl, ok := s.entities[d.name]
- if !ok {
- s.entities[d.name] = d
- } else {
- decl := decl.deep_copy()
- decl.expand_or_replace(d)
- s.entities[d.name] = decl
- }
-}
-
-func (s *scope) lookup(name string) *decl {
- decl, ok := s.entities[name]
- if !ok {
- if s.parent != nil {
- return s.parent.lookup(name)
- } else {
- return nil
- }
- }
- return decl
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/server.go b/src/disposa.blue/margo/golang/internal/gocode/server.go
deleted file mode 100644
index d2e738c6..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/server.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package gocode
-
-import (
- "bytes"
- "fmt"
- "go/build"
- "log"
- "net"
- "net/rpc"
- "os"
- "path/filepath"
- "reflect"
- "runtime"
- "time"
-)
-
-func do_server() int {
- g_config.read()
- if g_config.ForceDebugOutput != "" {
- // forcefully enable debugging and redirect logging into the
- // specified file
- *g_debug = true
- f, err := os.Create(g_config.ForceDebugOutput)
- if err != nil {
- panic(err)
- }
- log.SetOutput(f)
- }
-
- addr := *g_addr
- if *g_sock == "unix" {
- addr = get_socket_filename()
- if file_exists(addr) {
- log.Printf("unix socket: '%s' already exists\n", addr)
- return 1
- }
- }
- g_daemon = new_daemon(*g_sock, addr)
- if *g_sock == "unix" {
- // cleanup unix socket file
- defer os.Remove(addr)
- }
-
- rpc.Register(new(RPC))
-
- g_daemon.loop()
- return 0
-}
-
-//-------------------------------------------------------------------------
-// daemon
-//-------------------------------------------------------------------------
-
-type daemon struct {
- listener net.Listener
- cmd_in chan int
- autocomplete *auto_complete_context
- pkgcache package_cache
- declcache *decl_cache
- context package_lookup_context
-}
-
-func new_daemon(network, address string) *daemon {
- var err error
-
- d := new(daemon)
- d.listener, err = net.Listen(network, address)
- if err != nil {
- panic(err)
- }
-
- d.cmd_in = make(chan int, 1)
- d.pkgcache = new_package_cache()
- d.declcache = new_decl_cache(&d.context)
- d.autocomplete = new_auto_complete_context(d.pkgcache, d.declcache)
- return d
-}
-
-func (this *daemon) drop_cache() {
- this.pkgcache = new_package_cache()
- this.declcache = new_decl_cache(&this.context)
- this.autocomplete = new_auto_complete_context(this.pkgcache, this.declcache)
-}
-
-const (
- daemon_close = iota
-)
-
-func (this *daemon) loop() {
- conn_in := make(chan net.Conn)
- go func() {
- for {
- c, err := this.listener.Accept()
- if err != nil {
- panic(err)
- }
- conn_in <- c
- }
- }()
-
- timeout := time.Duration(g_config.CloseTimeout) * time.Second
- countdown := time.NewTimer(timeout)
-
- for {
- // handle connections or server CMDs (currently one CMD)
- select {
- case c := <-conn_in:
- rpc.ServeConn(c)
- countdown.Reset(timeout)
- runtime.GC()
- case cmd := <-this.cmd_in:
- switch cmd {
- case daemon_close:
- return
- }
- case <-countdown.C:
- return
- }
- }
-}
-
-func (this *daemon) close() {
- this.cmd_in <- daemon_close
-}
-
-var g_daemon *daemon
-
-//-------------------------------------------------------------------------
-// server_* functions
-//
-// Corresponding client_* functions are autogenerated by goremote.
-//-------------------------------------------------------------------------
-
-func server_auto_complete(file []byte, filename string, cursor int, context_packed go_build_context) (c []candidate, d int) {
- context := unpack_build_context(&context_packed)
- defer func() {
- if err := recover(); err != nil {
- print_backtrace(err)
- c = []candidate{
- {"PANIC", "PANIC", decl_invalid, "panic"},
- }
-
- // drop cache
- g_daemon.drop_cache()
- }
- }()
- // TODO: Probably we don't care about comparing all the fields, checking GOROOT and GOPATH
- // should be enough.
- if !reflect.DeepEqual(g_daemon.context.Context, context.Context) {
- g_daemon.context = context
- g_daemon.drop_cache()
- }
- switch g_config.PackageLookupMode {
- case "bzl":
- // when package lookup mode is bzl, we set GOPATH to "" explicitly and
- // BzlProjectRoot becomes valid (or empty)
- var err error
- g_daemon.context.GOPATH = ""
- g_daemon.context.BzlProjectRoot, err = find_bzl_project_root(g_config.LibPath, filename)
- if *g_debug && err != nil {
- log.Printf("Bzl project root not found: %s", err)
- }
- case "gb":
- // when package lookup mode is gb, we set GOPATH to "" explicitly and
- // GBProjectRoot becomes valid (or empty)
- var err error
- g_daemon.context.GOPATH = ""
- g_daemon.context.GBProjectRoot, err = find_gb_project_root(filename)
- if *g_debug && err != nil {
- log.Printf("Gb project root not found: %s", err)
- }
- case "go":
- // get current package path for GO15VENDOREXPERIMENT hack
- g_daemon.context.CurrentPackagePath = ""
- pkg, err := g_daemon.context.ImportDir(filepath.Dir(filename), build.FindOnly)
- if err == nil {
- if *g_debug {
- log.Printf("Go project path: %s", pkg.ImportPath)
- }
- g_daemon.context.CurrentPackagePath = pkg.ImportPath
- } else if *g_debug {
- log.Printf("Go project path not found: %s", err)
- }
- }
- if *g_debug {
- var buf bytes.Buffer
- log.Printf("Got autocompletion request for '%s'\n", filename)
- log.Printf("Cursor at: %d\n", cursor)
- if cursor > len(file) || cursor < 0 {
- log.Println("ERROR! Cursor is outside of the boundaries of the buffer, " +
- "this is most likely a text editor plugin bug. Text editor is responsible " +
- "for passing the correct cursor position to gocode.")
- } else {
- buf.WriteString("-------------------------------------------------------\n")
- buf.Write(file[:cursor])
- buf.WriteString("#")
- buf.Write(file[cursor:])
- log.Print(buf.String())
- log.Println("-------------------------------------------------------")
- }
- }
- candidates, d := g_daemon.autocomplete.apropos(file, filename, cursor)
- if *g_debug {
- log.Printf("Offset: %d\n", d)
- log.Printf("Number of candidates found: %d\n", len(candidates))
- log.Printf("Candidates are:\n")
- for _, c := range candidates {
- abbr := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
- if c.Class == decl_func {
- abbr = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
- }
- log.Printf(" %s\n", abbr)
- }
- log.Println("=======================================================")
- }
- return candidates, d
-}
-
-func server_close(notused int) int {
- g_daemon.close()
- return 0
-}
-
-func server_status(notused int) string {
- return g_daemon.autocomplete.status()
-}
-
-func server_drop_cache(notused int) int {
- // drop cache
- g_daemon.drop_cache()
- return 0
-}
-
-func server_set(key, value string) string {
- if key == "\x00" {
- return g_config.list()
- } else if value == "\x00" {
- return g_config.list_option(key)
- }
- // drop cache on settings changes
- g_daemon.drop_cache()
- return g_config.set_option(key, value)
-}
-
-func server_options(notused int) string {
- return g_config.options()
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/type_alias_build_hack_18.go b/src/disposa.blue/margo/golang/internal/gocode/type_alias_build_hack_18.go
deleted file mode 100644
index 945e6ba7..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/type_alias_build_hack_18.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !go1.9,!go1.8.typealias
-
-package gocode
-
-import (
- "go/ast"
-)
-
-func typeAliasSpec(name string, typ ast.Expr) *ast.TypeSpec {
- return &ast.TypeSpec{
- Name: ast.NewIdent(name),
- Type: typ,
- }
-}
-
-func isAliasTypeSpec(t *ast.TypeSpec) bool {
- return false
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/type_alias_build_hack_19.go b/src/disposa.blue/margo/golang/internal/gocode/type_alias_build_hack_19.go
deleted file mode 100644
index 4fc034d2..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/type_alias_build_hack_19.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.9 go1.8.typealias
-
-package gocode
-
-import (
- "go/ast"
-)
-
-func typeAliasSpec(name string, typ ast.Expr) *ast.TypeSpec {
- return &ast.TypeSpec{
- Name: ast.NewIdent(name),
- Assign: 1,
- Type: typ,
- }
-}
-
-func isAliasTypeSpec(t *ast.TypeSpec) bool {
- return t.Assign != 0
-}
diff --git a/src/disposa.blue/margo/golang/internal/gocode/utils.go b/src/disposa.blue/margo/golang/internal/gocode/utils.go
deleted file mode 100644
index afee81eb..00000000
--- a/src/disposa.blue/margo/golang/internal/gocode/utils.go
+++ /dev/null
@@ -1,296 +0,0 @@
-package gocode
-
-import (
- "bytes"
- "fmt"
- "go/build"
- "io/ioutil"
- "os"
- "path/filepath"
- "runtime"
- "strings"
- "sync"
- "unicode/utf8"
-)
-
-// our own readdir, which skips the files it cannot lstat
-func readdir_lstat(name string) ([]os.FileInfo, error) {
- f, err := os.Open(name)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- names, err := f.Readdirnames(-1)
- if err != nil {
- return nil, err
- }
-
- out := make([]os.FileInfo, 0, len(names))
- for _, lname := range names {
- s, err := os.Lstat(filepath.Join(name, lname))
- if err != nil {
- continue
- }
- out = append(out, s)
- }
- return out, nil
-}
-
-// our other readdir function, only opens and reads
-func readdir(dirname string) []os.FileInfo {
- f, err := os.Open(dirname)
- if err != nil {
- return nil
- }
- fi, err := f.Readdir(-1)
- f.Close()
- if err != nil {
- panic(err)
- }
- return fi
-}
-
-// returns truncated 'data' and amount of bytes skipped (for cursor pos adjustment)
-func filter_out_shebang(data []byte) ([]byte, int) {
- if len(data) > 2 && data[0] == '#' && data[1] == '!' {
- newline := bytes.Index(data, []byte("\n"))
- if newline != -1 && len(data) > newline+1 {
- return data[newline+1:], newline + 1
- }
- }
- return data, 0
-}
-
-func file_exists(filename string) bool {
- _, err := os.Stat(filename)
- if err != nil {
- return false
- }
- return true
-}
-
-func is_dir(path string) bool {
- fi, err := os.Stat(path)
- return err == nil && fi.IsDir()
-}
-
-func char_to_byte_offset(s []byte, offset_c int) (offset_b int) {
- for offset_b = 0; offset_c > 0 && offset_b < len(s); offset_b++ {
- if utf8.RuneStart(s[offset_b]) {
- offset_c--
- }
- }
- return offset_b
-}
-
-func xdg_home_dir() string {
- xdghome := os.Getenv("XDG_CONFIG_HOME")
- if xdghome == "" {
- xdghome = filepath.Join(os.Getenv("HOME"), ".config")
- }
- return xdghome
-}
-
-func has_prefix(s, prefix string, ignorecase bool) bool {
- if ignorecase {
- s = strings.ToLower(s)
- prefix = strings.ToLower(prefix)
- }
- return strings.HasPrefix(s, prefix)
-}
-
-func find_bzl_project_root(libpath, path string) (string, error) {
- if libpath == "" {
- return "", fmt.Errorf("could not find project root, libpath is empty")
- }
-
- pathMap := map[string]struct{}{}
- for _, lp := range strings.Split(libpath, ":") {
- lp := strings.TrimSpace(lp)
- pathMap[filepath.Clean(lp)] = struct{}{}
- }
-
- path = filepath.Dir(path)
- if path == "" {
- return "", fmt.Errorf("project root is blank")
- }
-
- start := path
- for path != "/" {
- if _, ok := pathMap[filepath.Clean(path)]; ok {
- return path, nil
- }
- path = filepath.Dir(path)
- }
- return "", fmt.Errorf("could not find project root in %q or its parents", start)
-}
-
-// Code taken directly from `gb`, I hope author doesn't mind.
-func find_gb_project_root(path string) (string, error) {
- path = filepath.Dir(path)
- if path == "" {
- return "", fmt.Errorf("project root is blank")
- }
- start := path
- for path != "/" {
- root := filepath.Join(path, "src")
- if _, err := os.Stat(root); err != nil {
- if os.IsNotExist(err) {
- path = filepath.Dir(path)
- continue
- }
- return "", err
- }
- path, err := filepath.EvalSymlinks(path)
- if err != nil {
- return "", err
- }
- return path, nil
- }
- return "", fmt.Errorf("could not find project root in %q or its parents", start)
-}
-
-// vendorlessImportPath returns the devendorized version of the provided import path.
-// e.g. "foo/bar/vendor/a/b" => "a/b"
-func vendorlessImportPath(ipath string, currentPackagePath string) (string, bool) {
- split := strings.Split(ipath, "vendor/")
- // no vendor in path
- if len(split) == 1 {
- return ipath, true
- }
- // this import path does not belong to the current package
- if currentPackagePath != "" && !strings.Contains(currentPackagePath, split[0]) {
- return "", false
- }
- // Devendorize for use in import statement.
- if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 {
- return ipath[i+len("/vendor/"):], true
- }
- if strings.HasPrefix(ipath, "vendor/") {
- return ipath[len("vendor/"):], true
- }
- return ipath, true
-}
-
-//-------------------------------------------------------------------------
-// print_backtrace
-//
-// a nicer backtrace printer than the default one
-//-------------------------------------------------------------------------
-
-var g_backtrace_mutex sync.Mutex
-
-func print_backtrace(err interface{}) {
- g_backtrace_mutex.Lock()
- defer g_backtrace_mutex.Unlock()
- fmt.Printf("panic: %v\n", err)
- i := 2
- for {
- pc, file, line, ok := runtime.Caller(i)
- if !ok {
- break
- }
- f := runtime.FuncForPC(pc)
- fmt.Printf("%d(%s): %s:%d\n", i-1, f.Name(), file, line)
- i++
- }
- fmt.Println("")
-}
-
-//-------------------------------------------------------------------------
-// File reader goroutine
-//
-// It's a bad idea to block multiple goroutines on file I/O. Creates many
-// threads which fight for HDD. Therefore only single goroutine should read HDD
-// at the same time.
-//-------------------------------------------------------------------------
-
-type file_read_request struct {
- filename string
- out chan file_read_response
-}
-
-type file_read_response struct {
- data []byte
- error error
-}
-
-type file_reader_type struct {
- in chan file_read_request
-}
-
-func new_file_reader() *file_reader_type {
- this := new(file_reader_type)
- this.in = make(chan file_read_request)
- go func() {
- var rsp file_read_response
- for {
- req := <-this.in
- rsp.data, rsp.error = ioutil.ReadFile(req.filename)
- req.out <- rsp
- }
- }()
- return this
-}
-
-func (this *file_reader_type) read_file(filename string) ([]byte, error) {
- req := file_read_request{
- filename,
- make(chan file_read_response),
- }
- this.in <- req
- rsp := <-req.out
- return rsp.data, rsp.error
-}
-
-var file_reader = new_file_reader()
-
-//-------------------------------------------------------------------------
-// copy of the build.Context without func fields
-//-------------------------------------------------------------------------
-
-type go_build_context struct {
- GOARCH string
- GOOS string
- GOROOT string
- GOPATH string
- CgoEnabled bool
- UseAllFiles bool
- Compiler string
- BuildTags []string
- ReleaseTags []string
- InstallSuffix string
-}
-
-func pack_build_context(ctx *build.Context) go_build_context {
- return go_build_context{
- GOARCH: ctx.GOARCH,
- GOOS: ctx.GOOS,
- GOROOT: ctx.GOROOT,
- GOPATH: ctx.GOPATH,
- CgoEnabled: ctx.CgoEnabled,
- UseAllFiles: ctx.UseAllFiles,
- Compiler: ctx.Compiler,
- BuildTags: ctx.BuildTags,
- ReleaseTags: ctx.ReleaseTags,
- InstallSuffix: ctx.InstallSuffix,
- }
-}
-
-func unpack_build_context(ctx *go_build_context) package_lookup_context {
- return package_lookup_context{
- Context: build.Context{
- GOARCH: ctx.GOARCH,
- GOOS: ctx.GOOS,
- GOROOT: ctx.GOROOT,
- GOPATH: ctx.GOPATH,
- CgoEnabled: ctx.CgoEnabled,
- UseAllFiles: ctx.UseAllFiles,
- Compiler: ctx.Compiler,
- BuildTags: ctx.BuildTags,
- ReleaseTags: ctx.ReleaseTags,
- InstallSuffix: ctx.InstallSuffix,
- },
- }
-}
diff --git a/src/disposa.blue/margo/golang/lint.go b/src/disposa.blue/margo/golang/lint.go
deleted file mode 100644
index 498fc42d..00000000
--- a/src/disposa.blue/margo/golang/lint.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package golang
-
-import (
- "bytes"
- "disposa.blue/margo/mg"
- "os"
- "os/exec"
- "regexp"
- "sync"
-)
-
-type LintArgs struct {
- Writer *mg.IssueWriter
- Env mg.EnvMap
- Dir string
-}
-
-type LintFunc func(LintArgs) error
-
-type LinterOpts struct {
- Log *mg.Logger
- Actions []mg.Action
- Patterns []*regexp.Regexp
- Lint LintFunc
- Label string
-}
-
-type linterSupport struct {
- nCh chan struct{}
- mu sync.RWMutex
- dir string
- issues mg.IssueSet
-}
-
-func (ls *linterSupport) Reduce(lo LinterOpts, mx *mg.Ctx) *mg.State {
- ls.mu.RLock()
- defer ls.mu.RUnlock()
-
- if mx.ActionIs(mg.Started{}) {
- ls.start(lo, mx.Store)
- }
- dir := mx.View.Dir()
- if mx.ActionIs(lo.Actions...) && IsPkgDir(dir) {
- ls.notify()
- }
-
- if ls.dir == dir {
- return mx.State.AddIssues(ls.issues...)
- }
- return mx.State
-}
-
-func (ls *linterSupport) Command(la LintArgs, name string, args ...string) *exec.Cmd {
- cmd := exec.Command(name, args...)
- cmd.Env = la.Env.Environ()
- cmd.Stdout = la.Writer
- cmd.Stderr = la.Writer
- cmd.Dir = la.Dir
- return cmd
-}
-
-func (ls *linterSupport) notify() {
- select {
- case ls.nCh <- struct{}{}:
- default:
- }
-}
-
-func (ls *linterSupport) start(lo LinterOpts, sto *mg.Store) {
- ls.nCh = make(chan struct{}, 1)
- go ls.loop(lo, sto)
-}
-
-func (ls *linterSupport) loop(lo LinterOpts, sto *mg.Store) {
- for range ls.nCh {
- st := sto.State()
- dir := st.View.Dir()
- if IsPkgDir(dir) {
- ls.lint(lo, sto.Dispatch, st, dir)
- }
- }
-}
-
-func (ls *linterSupport) lint(lo LinterOpts, dispatch mg.Dispatcher, st *mg.State, dir string) {
- defer dispatch(mg.Render)
-
- buf := bytes.NewBuffer(nil)
- w := &mg.IssueWriter{
- Writer: buf,
- Patterns: lo.Patterns,
- Base: mg.Issue{Tag: mg.IssueError, Label: lo.Label},
- Dir: dir,
- }
- if len(w.Patterns) == 0 {
- w.Patterns = CommonPatterns
- }
- err := lo.Lint(LintArgs{
- Writer: w,
- Env: st.Env,
- Dir: dir,
- })
- w.Flush()
- issues := w.Issues()
- if len(issues) == 0 && err != nil {
- out := bytes.TrimSpace(buf.Bytes())
- lo.Log.Printf("golang.linterSupport: '%s' in '%s' failed: %s\n%s\n", lo.Label, dir, err, out)
- }
-
- ls.mu.Lock()
- ls.dir = dir
- ls.issues = issues
- ls.mu.Unlock()
-}
-
-type Linter struct {
- linterSupport
-
- Name string
- Args []string
- Label string
- TempDir []string
-}
-
-func (lt *Linter) Reduce(mx *mg.Ctx) *mg.State {
- return lt.linterSupport.Reduce(LinterOpts{
- Log: mx.Log,
- Actions: []mg.Action{mg.ViewSaved{}},
- Patterns: CommonPatterns,
- Lint: lt.lint,
- Label: lt.Label,
- }, mx)
-}
-
-func (lt *Linter) lint(la LintArgs) error {
- if len(lt.TempDir) != 0 {
- tmpDir, err := mg.MkTempDir(lt.Label)
- if err != nil {
- return err
- }
- defer os.RemoveAll(tmpDir)
- for _, k := range lt.TempDir {
- la.Env = la.Env.Add(k, tmpDir)
- }
- }
- return lt.Command(la, lt.Name, lt.Args...).Run()
-}
-
-func GoInstall(args ...string) *Linter {
- return &Linter{
- Name: "go",
- Args: append([]string{"install"}, args...),
- Label: "Go/Install",
- }
-}
-
-func GoInstallDiscardBinaries(args ...string) *Linter {
- lt := GoInstall(args...)
- lt.TempDir = append(lt.TempDir, "GOBIN")
- return lt
-}
-
-func GoVet(args ...string) *Linter {
- return &Linter{
- Name: "go",
- Args: append([]string{"vet"}, args...),
- Label: "Go/Vet",
- }
-}
-
-func GoTest(args ...string) *Linter {
- return &Linter{
- Name: "go",
- Args: append([]string{"test"}, args...),
- Label: "Go/Test",
- }
-}
diff --git a/src/disposa.blue/margo/golang/parse.go b/src/disposa.blue/margo/golang/parse.go
deleted file mode 100644
index 29257cbf..00000000
--- a/src/disposa.blue/margo/golang/parse.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package golang
-
-import (
- "disposa.blue/margo/mg"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "io/ioutil"
-)
-
-const (
- ParseFileMode = parser.ParseComments | parser.DeclarationErrors | parser.AllErrors
-)
-
-var (
- NilFset = token.NewFileSet()
- NilAstFile, _ = parser.ParseFile(NilFset, "", `package _`, 0)
- NilTokenFile = NilFset.File(NilAstFile.Pos())
-)
-
-type ParsedFile struct {
- Fset *token.FileSet
- AstFile *ast.File
- TokenFile *token.File
- Error error
- ErrorList scanner.ErrorList
-}
-
-func ParseFile(kvs mg.KVStore, fn string, src []byte) *ParsedFile {
- mode := ParseFileMode
- if len(src) == 0 {
- var err error
- if fn != "" {
- src, err = ioutil.ReadFile(fn)
- }
- if len(src) == 0 {
- return &ParsedFile{
- Fset: NilFset,
- AstFile: NilAstFile,
- TokenFile: NilTokenFile,
- Error: err,
- }
- }
- }
-
- type key struct{ hash string }
- k := key{mg.SrcHash(src)}
- if kvs != nil {
- if pf, ok := kvs.Get(k).(*ParsedFile); ok {
- return pf
- }
- }
-
- pf := &ParsedFile{Fset: token.NewFileSet()}
- pf.AstFile, pf.Error = parser.ParseFile(pf.Fset, fn, src, mode)
- pf.TokenFile = pf.Fset.File(pf.AstFile.Pos())
- pf.ErrorList, _ = pf.Error.(scanner.ErrorList)
- if pf.AstFile == nil {
- pf.AstFile = NilAstFile
- }
- if pf.TokenFile == nil {
- pf.TokenFile = NilTokenFile
- }
-
- if kvs != nil {
- kvs.Put(k, pf)
- }
-
- return pf
-}
diff --git a/src/disposa.blue/margo/golang/snippets.go b/src/disposa.blue/margo/golang/snippets.go
deleted file mode 100644
index ba8b5354..00000000
--- a/src/disposa.blue/margo/golang/snippets.go
+++ /dev/null
@@ -1,355 +0,0 @@
-package golang
-
-import (
- "disposa.blue/margo/mg"
- "go/ast"
- "unicode"
- "unicode/utf8"
-)
-
-var (
- Snippets = SnippetFuncs{
- PackageNameSnippet,
- MainFuncSnippet,
- InitFuncSnippet,
- FuncSnippet,
- MethodSnippet,
- GenDeclSnippet,
- MapSnippet,
- TypeSnippet,
- }
-)
-
-type SnippetFuncs []func(*CompletionCtx) []mg.Completion
-
-func (sf SnippetFuncs) Reduce(mx *mg.Ctx) *mg.State {
- if !mx.LangIs("go") || !mx.ActionIs(mg.QueryCompletions{}) {
- return mx.State
- }
-
- src, _ := mx.View.ReadAll()
- pos := mx.View.Pos
- for {
- r, n := utf8.DecodeLastRune(src[:pos])
- if !IsLetter(r) {
- break
- }
- pos -= n
- }
- cx := NewCompletionCtx(mx, src, pos)
- if cx.Scope.Any(StringScope, ImportPathScope, CommentScope) {
- return mx.State
- }
-
- var cl []mg.Completion
- for _, f := range sf {
- cl = append(cl, f(cx)...)
- }
- for i, _ := range cl {
- sf.fixCompletion(&cl[i])
- }
- return mx.State.AddCompletions(cl...)
-}
-
-func (sf SnippetFuncs) fixCompletion(c *mg.Completion) {
- c.Src = DedentCompletion(c.Src)
- if c.Tag == "" {
- c.Tag = mg.SnippetTag
- }
-}
-
-func PackageNameSnippet(cx *CompletionCtx) []mg.Completion {
- if cx.PkgName != "" || !cx.Scope.Is(PackageScope) {
- return nil
- }
-
- name := "main"
- bx := BuildContext(cx.Ctx)
- pkg, _ := bx.ImportDir(cx.View.Dir(), 0)
- if pkg != nil && pkg.Name != "" {
- name = pkg.Name
- }
-
- return []mg.Completion{{
- Query: `package ` + name,
- Src: `
- package ` + name + `
-
- $0
- `,
- }}
-}
-
-func MainFuncSnippet(cx *CompletionCtx) []mg.Completion {
- if !cx.Scope.Is(FileScope) || cx.PkgName != "main" {
- return nil
- }
-
- for _, x := range cx.AstFile.Decls {
- x, ok := x.(*ast.FuncDecl)
- if ok && x.Name != nil && x.Name.String() == "main" {
- return nil
- }
- }
-
- return []mg.Completion{{
- Query: `func main`,
- Title: `main() {...}`,
- Src: `
- func main() {
- $0
- }
- `,
- }}
-}
-
-func InitFuncSnippet(cx *CompletionCtx) []mg.Completion {
- if !cx.Scope.Is(FileScope) {
- return nil
- }
-
- for _, x := range cx.AstFile.Decls {
- x, ok := x.(*ast.FuncDecl)
- if ok && x.Name != nil && x.Name.String() == "init" {
- return nil
- }
- }
-
- return []mg.Completion{{
- Query: `func init`,
- Title: `init() {...}`,
- Src: `
- func init() {
- $0
- }
- `,
- }}
-}
-
-func FuncSnippet(cx *CompletionCtx) []mg.Completion {
- if cx.Scope.Is(FileScope) {
- comp := mg.Completion{
- Query: `func`,
- Title: `name() {...}`,
- Src: `
- func ${1:name}($2)$3 {
- $0
- }
- `,
- }
- if !cx.IsTestFile {
- return []mg.Completion{comp}
- }
- return []mg.Completion{
- {
- Query: `func Test`,
- Title: `Test() {...}`,
- Src: `
- func Test${1:name}(t *testing.T) {
- $0
- }
- `,
- },
- {
- Query: `func Benchmark`,
- Title: `Benchmark() {...}`,
- Src: `
- func Benchmark${1:name}(b *testing.B) {
- $0
- }
- `,
- },
- {
- Query: `func Example`,
- Title: `Example() {...}`,
- Src: `
- func Example${1:name}() {
- $0
-
- // Output:
- }
- `,
- },
- }
- }
-
- if cx.Scope.Any(BlockScope, VarScope) {
- return []mg.Completion{{
- Query: `func`,
- Title: `func() {...}`,
- Src: `
- func($1)$2 {
- $3
- }$0
- `,
- }}
- }
-
- return nil
-}
-
-func receiverName(typeName string) string {
- name := make([]rune, 0, 4)
- for _, r := range typeName {
- if len(name) == 0 || unicode.IsUpper(r) {
- name = append(name, unicode.ToLower(r))
- }
- }
- return string(name)
-}
-
-func MethodSnippet(cx *CompletionCtx) []mg.Completion {
- if cx.IsTestFile || !cx.Scope.Is(FileScope) {
- return nil
- }
-
- type field struct {
- nm string
- typ string
- }
- fields := map[string]field{}
- types := []string{}
-
- for _, x := range cx.AstFile.Decls {
- switch x := x.(type) {
- case *ast.FuncDecl:
- if x.Recv == nil || len(x.Recv.List) == 0 {
- continue
- }
-
- r := x.Recv.List[0]
- if len(r.Names) == 0 {
- continue
- }
-
- name := ""
- if id := r.Names[0]; id != nil {
- name = id.String()
- }
-
- switch x := r.Type.(type) {
- case *ast.Ident:
- typ := x.String()
- fields[typ] = field{nm: name, typ: typ}
- case *ast.StarExpr:
- if id, ok := x.X.(*ast.Ident); ok {
- typ := id.String()
- fields[typ] = field{nm: name, typ: "*" + typ}
- }
- }
- case *ast.GenDecl:
- for _, spec := range x.Specs {
- spec, ok := spec.(*ast.TypeSpec)
- if ok && spec.Name != nil {
- types = append(types, spec.Name.String())
- }
- }
- }
- }
-
- cl := make([]mg.Completion, 0, len(types))
- for _, typ := range types {
- if f, ok := fields[typ]; ok {
- cl = append(cl, mg.Completion{
- Query: `func method ` + f.typ,
- Title: `(` + f.typ + `) method() {...}`,
- Src: `
- func (` + f.nm + ` ` + f.typ + `) ${1:name}($2)$3 {
- $0
- }
- `,
- })
- } else {
- nm := receiverName(typ)
- cl = append(cl, mg.Completion{
- Query: `func method ` + typ,
- Title: `(` + typ + `) method() {...}`,
- Src: `
- func (${1:` + nm + `} ${2:*` + typ + `}) ${3:name}($4)$5 {
- $0
- }
- `,
- })
- }
- }
-
- return cl
-}
-func (sf SnippetFuncs) name() {
-
-}
-
-func GenDeclSnippet(cx *CompletionCtx) []mg.Completion {
- if !cx.Scope.Is(FileScope) {
- return nil
- }
- return []mg.Completion{
- {
- Query: `import`,
- Title: `(...)`,
- Src: `
- import (
- "$0"
- )
- `,
- },
- {
- Query: `var`,
- Title: `(...)`,
- Src: `
- var (
- ${1:name} = ${2:value}
- )
- `,
- },
- {
- Query: `const`,
- Title: `(...)`,
- Src: `
- const (
- ${1:name} = ${2:value}
- )
- `,
- },
- }
-}
-
-func MapSnippet(cx *CompletionCtx) []mg.Completion {
- if !cx.Scope.Any(VarScope, BlockScope) {
- return nil
- }
- return []mg.Completion{
- {
- Query: `map`,
- Title: `map[T]T`,
- Src: `map[${1:T}]${2:T}`,
- },
- {
- Query: `map`,
- Title: `map[T]T{...}`,
- Src: `map[${1:T}]${2:T}{$0}`,
- },
- }
-}
-
-func TypeSnippet(cx *CompletionCtx) []mg.Completion {
- if !cx.Scope.Any(FileScope, BlockScope) {
- return nil
- }
- return []mg.Completion{
- {
- Query: `type struct`,
- Title: `struct {}`,
- Src: `
- type ${1:T} struct {
- ${2:V}
- }
- `,
- },
- {
- Query: `type`,
- Title: `type T`,
- Src: `type ${1:T} ${2:V}`,
- },
- }
-}
diff --git a/src/disposa.blue/margo/golang/syntaxcheck.go b/src/disposa.blue/margo/golang/syntaxcheck.go
deleted file mode 100644
index 137a4e48..00000000
--- a/src/disposa.blue/margo/golang/syntaxcheck.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package golang
-
-import (
- "disposa.blue/margo/mg"
- "go/scanner"
-)
-
-type SyntaxCheck struct{}
-
-func (sc *SyntaxCheck) Reduce(mx *mg.Ctx) *mg.State {
- st := mx.State
- if !st.View.LangIs("go") {
- return st
- }
-
- v := st.View
- src, err := v.ReadAll()
- if err != nil {
- return st.Errorf("cannot read: %s: %s", v.Filename(), err)
- }
-
- type key struct{ hash string }
- k := key{mg.SrcHash(src)}
- if issues, ok := mx.Store.Get(k).(mg.IssueSet); ok {
- return st.AddIssues(issues...)
- }
-
- if !mx.ActionIs(mg.ViewActivated{}, mg.ViewModified{}, mg.ViewSaved{}, mg.QueryIssues{}) {
- return st
- }
-
- pf := ParseFile(mx.Store, v.Filename(), src)
- issues := sc.errsToIssues(v, pf.ErrorList)
- mx.Store.Put(k, issues)
-
- return st.AddIssues(issues...)
-}
-
-func (_ *SyntaxCheck) errsToIssues(v *mg.View, el scanner.ErrorList) mg.IssueSet {
- issues := make(mg.IssueSet, len(el))
- for i, e := range el {
- issues[i] = mg.Issue{
- Path: v.Path,
- Name: v.Name,
- Row: e.Pos.Line - 1,
- Col: e.Pos.Column - 1,
- Message: e.Msg,
- Tag: mg.IssueError,
- Label: "Go/SyntaxCheck",
- }
- }
- return issues
-}
diff --git a/src/disposa.blue/margo/mg/action.go b/src/disposa.blue/margo/mg/action.go
deleted file mode 100644
index 7747c3e5..00000000
--- a/src/disposa.blue/margo/mg/action.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package mg
-
-var (
- actionCreators = map[string]actionCreator{
- "QueryCompletions": func() Action { return QueryCompletions{} },
- "QueryIssues": func() Action { return QueryIssues{} },
- "QueryTooltips": func() Action { return QueryTooltips{} },
- "Restart": func() Action { return Restart{} },
- "Shutdown": func() Action { return Shutdown{} },
- "ViewActivated": func() Action { return ViewActivated{} },
- "ViewClosed": func() Action { return ViewClosed{} },
- "ViewFmt": func() Action { return ViewFmt{} },
- "ViewLoaded": func() Action { return ViewLoaded{} },
- "ViewModified": func() Action { return ViewModified{} },
- "ViewPosChanged": func() Action { return ViewPosChanged{} },
- "ViewPreSave": func() Action { return ViewPreSave{} },
- "ViewSaved": func() Action { return ViewSaved{} },
- }
-)
-
-type actionCreator func() Action
-
-type ActionType struct{}
-
-func (act ActionType) Type() ActionType {
- return act
-}
-
-type Action interface {
- Type() ActionType
-}
-
-var Render Action = nil
-
-// Started is dispatched to indicate the start of IPC communication.
-// It's the first action that is dispatched.
-// Reducers may do lazy initialization during this action.
-type Started struct{ ActionType }
-
-type QueryCompletions struct{ ActionType }
-
-type QueryIssues struct{ ActionType }
-
-type QueryTooltips struct{ ActionType }
-
-type Restart struct{ ActionType }
-
-type Shutdown struct{ ActionType }
-
-type ViewActivated struct{ ActionType }
-
-type ViewModified struct{ ActionType }
-
-type ViewPosChanged struct{ ActionType }
-
-type ViewFmt struct{ ActionType }
-
-type ViewPreSave struct{ ActionType }
-
-type ViewSaved struct{ ActionType }
-
-type ViewLoaded struct{ ActionType }
-
-type ViewClosed struct{ ActionType }
diff --git a/src/disposa.blue/margo/mg/agent.go b/src/disposa.blue/margo/mg/agent.go
deleted file mode 100644
index 2120dc40..00000000
--- a/src/disposa.blue/margo/mg/agent.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package mg
-
-import (
- "bufio"
- "fmt"
- "github.com/ugorji/go/codec"
- "io"
- "log"
- "os"
- "sort"
- "strings"
- "sync"
-)
-
-var (
- // DefaultCodec is the name of the default codec used for IPC communication
- DefaultCodec = "json"
-
- // codecHandles is the map of all valid codec handles
- codecHandles = func() map[string]codec.Handle {
- m := map[string]codec.Handle{
- "cbor": &codec.CborHandle{},
- "json": &codec.JsonHandle{
- Indent: 2,
- TermWhitespace: true,
- },
- "msgpack": &codec.MsgpackHandle{},
- }
- m[""] = m[DefaultCodec]
- return m
- }()
-
- // CodecNames is the list of names of all valid codec handles
- CodecNames = func() []string {
- l := make([]string, 0, len(codecHandles))
- for k, _ := range codecHandles {
- if k != "" {
- l = append(l, k)
- }
- }
- sort.Strings(l)
- return l
- }()
-
- // CodecNamesStr is the list of names of all valid codec handles in the form `a, b or c`
- CodecNamesStr = func() string {
- i := len(CodecNames) - 1
- return strings.Join(CodecNames[:i], ", ") + " or " + CodecNames[i]
-
- }()
-)
-
-type AgentConfig struct {
- // Codec is the name of the codec to use for IPC
- // Valid values are json, cbor or msgpack
- // Default: json
- Codec string
-
- Stdin io.ReadCloser
- Stdout io.WriteCloser
- Stderr io.WriteCloser
-}
-
-type agentReq struct {
- Cookie string
- Action struct {
- Name string
- }
- Props clientProps
-}
-
-func newAgentReq() *agentReq {
- return &agentReq{Props: makeClientProps()}
-}
-
-func (rq *agentReq) finalize(ag *Agent) {
- rq.Props.finalize(ag)
-}
-
-type agentRes struct {
- Cookie string
- Error string
- State *State
-}
-
-func (rs agentRes) finalize() interface{} {
- v := struct {
- agentRes
- State struct {
- State
- Config interface{}
- ClientActions []clientAction
- }
- }{}
- v.agentRes = rs
- v.State.State = *rs.State
- v.State.ClientActions = rs.State.clientActions
-
- if v.Error == "" {
- v.Error = strings.Join([]string(v.State.Errors), "\n")
- }
-
- if v.State.View.changed == 0 {
- v.State.View = nil
- }
-
- if ec := rs.State.Config; ec != nil {
- v.State.Config = ec.EditorConfig()
- }
-
- return v
-}
-
-type Agent struct {
- Log *Logger
- Store *Store
-
- mu sync.Mutex
-
- stdin io.ReadCloser
- stdout io.WriteCloser
- stderr io.WriteCloser
-
- handle codec.Handle
- enc *codec.Encoder
- encWr *bufio.Writer
- dec *codec.Decoder
-}
-
-func (ag *Agent) Run() error {
- defer ag.shutdownIPC()
- return ag.communicate()
-}
-
-func (ag *Agent) communicate() error {
- ag.Log.Println("started")
- ag.Store.dispatch(Started{})
- ag.Store.ready()
-
- for {
- rq := newAgentReq()
- if err := ag.dec.Decode(rq); err != nil {
- if err == io.EOF {
- return nil
- }
- return fmt.Errorf("ipc.decode: %s", err)
- }
- rq.finalize(ag)
- // TODO: put this on a channel in the future.
- // at the moment we lock the store and block new requests to maintain request/response order
- // but decoding time could become a problem if we start sending large requests from the client
- // we currently only have 1 client (GoSublime) that we also control so it's ok for now...
- ag.Store.syncRq(ag, rq)
- }
- return nil
-}
-
-func (ag *Agent) createAction(name string) Action {
- if f := actionCreators[name]; f != nil {
- return f()
- }
- return nil
-}
-
-func (ag *Agent) listener(st *State) {
- err := ag.send(agentRes{State: st})
- if err != nil {
- ag.Log.Println("agent.send failed. shutting down ipc:", err)
- go ag.shutdownIPC()
- }
-}
-
-func (ag *Agent) send(res agentRes) error {
- ag.mu.Lock()
- defer ag.mu.Unlock()
-
- defer ag.encWr.Flush()
- return ag.enc.Encode(res.finalize())
-}
-
-func (ag *Agent) shutdownIPC() {
- defer ag.stdin.Close()
- defer ag.stdout.Close()
-}
-
-func NewAgent(cfg AgentConfig) (*Agent, error) {
- ag := &Agent{
- stdin: cfg.Stdin,
- stdout: cfg.Stdout,
- stderr: cfg.Stderr,
- handle: codecHandles[cfg.Codec],
- }
- if ag.stdin == nil {
- ag.stdin = os.Stdin
- }
- if ag.stdout == nil {
- ag.stdout = os.Stdout
- }
- if ag.stderr == nil {
- ag.stderr = os.Stderr
- }
- ag.stdin = &LockedReadCloser{ReadCloser: ag.stdin}
- ag.stdout = &LockedWriteCloser{WriteCloser: ag.stdout}
- ag.stderr = &LockedWriteCloser{WriteCloser: ag.stderr}
- ag.Log = &Logger{
- Logger: log.New(ag.stderr, "", log.Lshortfile),
- Dbg: log.New(ag.stderr, "DBG: ", log.Lshortfile),
- }
- ag.Store = newStore(ag, ag.listener).
- Before(defaultReducers.before...).
- Use(defaultReducers.use...).
- After(defaultReducers.after...)
-
- if e := os.Getenv("MARGO_SUBLIME_INSTALL_FAILED"); e != "" {
- ag.Store.Use(Reduce(func(mx *Ctx) *State {
- return mx.AddStatus(e)
- }))
- }
-
- if ag.handle == nil {
- return ag, fmt.Errorf("Invalid codec '%s'. Expected %s", cfg.Codec, CodecNamesStr)
- }
- ag.encWr = bufio.NewWriter(ag.stdout)
- ag.enc = codec.NewEncoder(ag.encWr, ag.handle)
- ag.dec = codec.NewDecoder(bufio.NewReader(ag.stdin), ag.handle)
-
- return ag, nil
-}
-
-func (ag *Agent) Args() Args {
- return Args{
- Store: ag.Store,
- Log: ag.Log,
- }
-}
diff --git a/src/disposa.blue/margo/mg/agent_test.go b/src/disposa.blue/margo/mg/agent_test.go
deleted file mode 100644
index 8feba5ee..00000000
--- a/src/disposa.blue/margo/mg/agent_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package mg
-
-import (
- "os"
- "strings"
- "testing"
-)
-
-// TestDefaults tries to verify some assumptions that are, or will be, made throughout the code-base
-// the following should hold true regardless of what configuration is exposed in the future
-// * the default codec should be json
-// * logs should go to os.Stderr by default
-// * IPC communication should be done on os.Stdin and os.Stdout by default
-func TestDefaults(t *testing.T) {
- ag, err := NewAgent(AgentConfig{})
- if err != nil {
- t.Errorf("agent creation failed: %s", err)
- return
- }
-
- stdin := ag.stdin
- if w, ok := stdin.(*LockedReadCloser); ok {
- stdin = w.ReadCloser
- }
- stdout := ag.stdout
- if w, ok := stdout.(*LockedWriteCloser); ok {
- stdout = w.WriteCloser
- }
- stderr := ag.stderr
- if w, ok := stderr.(*LockedWriteCloser); ok {
- stderr = w.WriteCloser
- }
-
- cases := []struct {
- name string
- expect interface{}
- got interface{}
- }{
- {`DefaultCodec == json`, true, DefaultCodec == "json"},
- {`codecHandles[DefaultCodec] exists`, true, codecHandles[DefaultCodec] != nil},
- {`codecHandles[""] == codecHandles[DefaultCodec]`, true, codecHandles[""] == codecHandles[DefaultCodec]},
- {`default Agent.stdin`, os.Stdin, stdin},
- {`default Agent.stdout`, os.Stdout, stdout},
- {`default Agent.stderr`, os.Stderr, stderr},
- }
-
- for _, c := range cases {
- if c.expect != c.got {
- t.Errorf("%s? expected '%v', got '%v'", c.name, c.expect, c.got)
- }
- }
-}
-
-func TestFirstAction(t *testing.T) {
- nrwc := NopReadWriteCloser{
- Reader: strings.NewReader("{}\n"),
- }
- ag, err := NewAgent(AgentConfig{
- Stdin: nrwc,
- Stdout: nrwc,
- Stderr: nrwc,
- })
- if err != nil {
- t.Errorf("agent creation failed: %s", err)
- return
- }
-
- actions := make(chan Action, 1)
- ag.Store.Use(Reduce(func(mx *Ctx) *State {
- select {
- case actions <- mx.Action:
- default:
- }
- return mx.State
- }))
-
- // there is a small chance that some other package might dispatch an action
- // before we're ready e.g. in init()
- type impossibru struct{ ActionType }
- ag.Store.Dispatch(impossibru{})
-
- go ag.Run()
- act := <-actions
- switch act.(type) {
- case Started:
- default:
- t.Errorf("Expected first action to be `%T`, but it was %T\n", Started{}, act)
- }
-}
diff --git a/src/disposa.blue/margo/mg/client-actions.go b/src/disposa.blue/margo/mg/client-actions.go
deleted file mode 100644
index a0e495c5..00000000
--- a/src/disposa.blue/margo/mg/client-actions.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package mg
-
-var (
- clientRestart = clientActionType{Name: "restart"}
- clientShutdown = clientActionType{Name: "shutdown"}
-)
-
-type clientAction interface {
- Type() clientActionType
-}
-
-type clientActionType struct {
- Name string
- Data interface{}
-}
-
-func (t clientActionType) Type() clientActionType {
- return t
-}
diff --git a/src/disposa.blue/margo/mg/common.go b/src/disposa.blue/margo/mg/common.go
deleted file mode 100644
index a467bbe8..00000000
--- a/src/disposa.blue/margo/mg/common.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package mg
-
-import (
- "io"
- "path/filepath"
- "strings"
- "sync"
-)
-
-type StrSet []string
-
-func (s StrSet) Add(l ...string) StrSet {
- res := make(StrSet, 0, len(s)+len(l))
- for _, lst := range [][]string{[]string(s), l} {
- for _, p := range lst {
- if !res.Has(p) {
- res = append(res, p)
- }
- }
- }
- return res
-}
-
-func (s StrSet) Has(p string) bool {
- for _, q := range s {
- if p == q {
- return true
- }
- }
- return false
-}
-
-type EnvMap map[string]string
-
-func (e EnvMap) Add(k, v string) EnvMap {
- m := make(EnvMap, len(e)+1)
- for k, v := range e {
- m[k] = v
- }
- m[k] = v
- return m
-}
-
-func (e EnvMap) Environ() []string {
- l := make([]string, 0, len(e))
- for k, v := range e {
- l = append(l, k+"="+v)
- }
- return l
-}
-
-func (e EnvMap) Get(k, def string) string {
- if v := e[k]; v != "" {
- return v
- }
- return def
-}
-
-func (e EnvMap) List(k string) []string {
- return strings.Split(e[k], string(filepath.ListSeparator))
-}
-
-type LockedWriteCloser struct {
- io.WriteCloser
- mu sync.Mutex
-}
-
-func (w *LockedWriteCloser) Write(p []byte) (int, error) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- return w.WriteCloser.Write(p)
-}
-
-func (w *LockedWriteCloser) Close() error {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- return w.WriteCloser.Close()
-}
-
-type LockedReadCloser struct {
- io.ReadCloser
- mu sync.Mutex
-}
-
-func (r *LockedReadCloser) Read(p []byte) (int, error) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- return r.ReadCloser.Read(p)
-}
-
-func (r *LockedReadCloser) Close() error {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- return r.ReadCloser.Close()
-}
-
-type NopReadWriteCloser struct {
- io.Reader
- io.Writer
- io.Closer
-}
-
-func (n NopReadWriteCloser) Read(p []byte) (int, error) {
- if n.Reader != nil {
- return n.Reader.Read(p)
- }
- return 0, io.EOF
-}
-
-func (n NopReadWriteCloser) Write(p []byte) (int, error) {
- if n.Writer != nil {
- return n.Writer.Write(p)
- }
- return len(p), nil
-}
-
-func (n NopReadWriteCloser) Close() error {
- if n.Closer != nil {
- return n.Closer.Close()
- }
- return nil
-}
-
-func IsParentDir(parentDir, childPath string) bool {
- p, err := filepath.Rel(parentDir, childPath)
- return err == nil && p != "." && !strings.HasPrefix(p, ".."+string(filepath.Separator))
-}
diff --git a/src/disposa.blue/margo/mg/db.go b/src/disposa.blue/margo/mg/db.go
deleted file mode 100644
index 6399efef..00000000
--- a/src/disposa.blue/margo/mg/db.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package mg
-
-type KVStore interface {
- Put(k interface{}, v interface{})
- Get(k interface{}) interface{}
- Del(k interface{})
-}
diff --git a/src/disposa.blue/margo/mg/issue.go b/src/disposa.blue/margo/mg/issue.go
deleted file mode 100644
index 07ea1528..00000000
--- a/src/disposa.blue/margo/mg/issue.go
+++ /dev/null
@@ -1,282 +0,0 @@
-package mg
-
-import (
- "bytes"
- "fmt"
- "io"
- "path/filepath"
- "regexp"
- "strconv"
- "sync"
-)
-
-var (
- CommonPatterns = []*regexp.Regexp{
- regexp.MustCompile(`^\s*(?P.+?\.\w+):(?P\d+:)(?P\d+:?)?(?P.+)$`),
- regexp.MustCompile(`^\s*(?P.+?\.\w+)\((?P\d+)(?:,(?P\d+))?\):(?P.+)$`),
- }
-)
-
-type IssueTag string
-
-const (
- IssueError = IssueTag("error")
- IssueWarning = IssueTag("warning")
-)
-
-type Issue struct {
- Path string
- Name string
- Row int
- Col int
- End int
- Tag IssueTag
- Label string
- Message string
-}
-
-func (isu Issue) Equal(p Issue) bool {
- return isu.SameFile(p) && isu.Row == p.Row && isu.Message == p.Message
-}
-
-func (isu Issue) SameFile(p Issue) bool {
- if isu.Path != "" {
- return isu.Path == p.Path
- }
- return isu.Name == p.Name
-}
-
-func (isu Issue) InView(v *View) bool {
- if isu.Path != "" {
- return v.Path == isu.Path
- }
- return isu.Name == v.Name
-}
-
-func (isu Issue) Valid() bool {
- return (isu.Name != "" || isu.Path != "") && isu.Message != ""
-}
-
-type IssueSet []Issue
-
-func (s IssueSet) Equal(issues IssueSet) bool {
- if len(s) != len(issues) {
- return false
- }
- for _, p := range s {
- if !issues.Has(p) {
- return false
- }
- }
- return true
-}
-
-func (s IssueSet) Add(l ...Issue) IssueSet {
- res := make(IssueSet, 0, len(s)+len(l))
- for _, lst := range []IssueSet{s, IssueSet(l)} {
- for _, p := range lst {
- if !res.Has(p) {
- res = append(res, p)
- }
- }
- }
- return res
-}
-
-func (s IssueSet) Remove(l ...Issue) IssueSet {
- res := make(IssueSet, 0, len(s)+len(l))
- q := IssueSet(l)
- for _, p := range s {
- if !q.Has(p) {
- res = append(res, p)
- }
- }
- return res
-}
-
-func (s IssueSet) Has(p Issue) bool {
- for _, q := range s {
- if p.Equal(q) {
- return true
- }
- }
- return false
-}
-
-func (is IssueSet) AllInView(v *View) IssueSet {
- issues := make(IssueSet, 0, len(is))
- for _, i := range is {
- if i.InView(v) {
- issues = append(issues, i)
- }
- }
- return issues
-}
-
-type issueSupport struct{}
-
-func (_ issueSupport) Reduce(mx *Ctx) *State {
- if len(mx.Issues) == 0 {
- return mx.State
- }
-
- status := make([]string, 0, 3)
- status = append(status, "placeholder")
- inview := 0
- for _, isu := range mx.Issues {
- if !isu.InView(mx.View) {
- continue
- }
- inview++
- if len(status) > 1 || isu.Message == "" || isu.Row != mx.View.Row {
- continue
- }
- if isu.Label != "" {
- status = append(status, isu.Label)
- }
- status = append(status, isu.Message)
- }
- status[0] = fmt.Sprintf("Issues (%d/%d)", inview, len(mx.Issues))
- return mx.AddStatus(status...)
-}
-
-type IssueWriter struct {
- Writer io.Writer
- Patterns []*regexp.Regexp
- Base Issue
- Dir string
-
- buf []byte
- mu sync.Mutex
- issues IssueSet
- isu *Issue
- pfx []byte
-}
-
-func (w *IssueWriter) Write(p []byte) (n int, err error) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- w.buf = append(w.buf, p...)
- w.scan(false)
-
- if w.Writer != nil {
- return w.Writer.Write(p)
- }
- return len(p), nil
-}
-
-func (w *IssueWriter) Flush() error {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- w.flush()
- return nil
-}
-
-func (w *IssueWriter) Issues() IssueSet {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- w.scan(true)
- issues := make(IssueSet, len(w.issues))
- copy(issues, w.issues)
- return issues
-}
-
-func (w *IssueWriter) scan(scanTail bool) {
- lines := bytes.Split(w.buf, []byte{'\n'})
- var tail []byte
- if !scanTail {
- n := len(lines) - 1
- tail, lines = lines[n], lines[:n]
- }
-
- for _, ln := range lines {
- w.scanLine(bytes.TrimRight(ln, "\r"))
- }
-
- w.buf = append(w.buf[:0], tail...)
-}
-
-func (w *IssueWriter) scanLine(ln []byte) {
- pfx := ln[:len(ln)-len(bytes.TrimLeft(ln, " \t"))]
- ind := bytes.TrimPrefix(pfx, w.pfx)
- if n := len(ind); n > 0 && w.isu != nil {
- w.isu.Message += "\n" + string(ln[len(pfx)-n:])
- return
- }
- w.flush()
-
- w.pfx = pfx
- ln = ln[len(pfx):]
- w.isu = w.match(ln)
-}
-
-func (w *IssueWriter) flush() {
- if w.isu == nil {
- return
- }
- isu := *w.isu
- w.isu = nil
- if isu.Valid() && !w.issues.Has(isu) {
- w.issues = append(w.issues, isu)
- }
-}
-
-func (w *IssueWriter) match(s []byte) *Issue {
- for _, p := range w.Patterns {
- if isu := w.matchOne(p, s); isu != nil {
- return isu
- }
- }
- return nil
-}
-
-func (w *IssueWriter) matchOne(p *regexp.Regexp, s []byte) *Issue {
- submatch := p.FindSubmatch(s)
- if submatch == nil {
- return nil
- }
-
- str := func(s []byte) string {
- return string(bytes.Trim(s, ": \t\r\n"))
- }
- num := func(s []byte) int {
- if n, _ := strconv.Atoi(str(s)); n > 0 {
- return n - 1
- }
- return 0
- }
-
- isu := w.Base
- for i, k := range p.SubexpNames() {
- v := submatch[i]
- switch k {
- case "path":
- isu.Path = str(v)
- if isu.Path != "" && w.Dir != "" && !filepath.IsAbs(isu.Path) {
- isu.Path = filepath.Join(w.Dir, isu.Path)
- }
- case "line":
- isu.Row = num(v)
- case "column":
- isu.Col = num(v)
- case "end":
- isu.End = num(v)
- case "label":
- isu.Label = str(v)
- case "error", "warning":
- isu.Tag = IssueTag(k)
- isu.Message = str(v)
- case "message":
- isu.Message = str(v)
- case "tag":
- tag := IssueTag(str(v))
- if tag == IssueWarning || tag == IssueError {
- isu.Tag = tag
- }
- }
- }
- return &isu
-}
diff --git a/src/disposa.blue/margo/mg/issue_test.go b/src/disposa.blue/margo/mg/issue_test.go
deleted file mode 100644
index 5e703fb8..00000000
--- a/src/disposa.blue/margo/mg/issue_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package mg
-
-import (
- "fmt"
- "testing"
-)
-
-func TestIssueWriter(t *testing.T) {
- base := Issue{Label: "lbl", Tag: IssueWarning}
- w := &IssueWriter{
- Dir: "/abc",
- Base: base,
- Patterns: CommonPatterns,
- }
- fmt.Fprintln(w, "abc.go:555:666: hello world")
- fmt.Fprintln(w, "no match")
- fmt.Fprint(w, "abc.go:555:")
- fmt.Fprint(w, "666: hello\n")
- fmt.Fprintln(w, " world")
- fmt.Fprintln(w, "no match")
- w.Flush()
-
- expect := IssueSet{
- Issue{Path: "/abc/abc.go", Row: 555 - 1, Col: 666 - 1, Tag: base.Tag, Label: base.Label, Message: "hello world"},
- Issue{Path: "/abc/abc.go", Row: 555 - 1, Col: 666 - 1, Tag: base.Tag, Label: base.Label, Message: "hello\n world"},
- }
- issues := w.Issues()
- if !expect.Equal(issues) {
- t.Errorf("IssueWriter parsing failed. Expected %#v, got %#v", expect, issues)
- }
-}
diff --git a/src/disposa.blue/margo/mg/log.go b/src/disposa.blue/margo/mg/log.go
deleted file mode 100644
index dd9e7b23..00000000
--- a/src/disposa.blue/margo/mg/log.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package mg
-
-import (
- "log"
-)
-
-type Logger struct {
- *log.Logger
- Dbg *log.Logger
-}
diff --git a/src/disposa.blue/margo/mg/reducers.go b/src/disposa.blue/margo/mg/reducers.go
deleted file mode 100644
index aad63333..00000000
--- a/src/disposa.blue/margo/mg/reducers.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package mg
-
-import (
- "go/build"
- "os/exec"
- "path/filepath"
- "strings"
-)
-
-var (
- defaultReducers = struct {
- before, use, after []Reducer
- }{
- before: []Reducer{
- restartSupport{},
- },
- after: []Reducer{
- issueSupport{},
- },
- }
-)
-
-type restartSupport struct{}
-
-func (r restartSupport) Reduce(mx *Ctx) *State {
- switch mx.Action.(type) {
- case ViewSaved:
- return r.viewSaved(mx)
- case Restart:
- mx.Log.Printf("%T action dispatched\n", mx.Action)
- return mx.addClientActions(clientRestart)
- case Shutdown:
- mx.Log.Printf("%T action dispatched\n", mx.Action)
- return mx.addClientActions(clientShutdown)
- default:
- }
- return mx.State
-}
-
-func (r restartSupport) viewSaved(mx *Ctx) *State {
- go r.prepRestart(mx)
- return mx.State
-}
-
-func (_ restartSupport) prepRestart(mx *Ctx) {
- dir := filepath.ToSlash(mx.View.Dir())
- if !filepath.IsAbs(dir) {
- return
- }
-
- // if we use build..ImportPath, it will be wrong if we work on the code outside the GS GOPATH
- imp := ""
- if i := strings.LastIndex(dir, "/src/"); i >= 0 {
- imp = dir[i+5:]
- }
- if imp != "margo" && !strings.HasPrefix(imp+"/", "disposa.blue/margo/") {
- return
- }
-
- pkg, _ := build.Default.ImportDir(dir, 0)
- if pkg == nil || pkg.Name == "" {
- return
- }
-
- defer mx.Begin(Task{Title: "prepping margo restart"}).Done()
-
- cmd := exec.Command("go", "test")
- cmd.Dir = mx.View.Dir()
- cmd.Env = mx.Env.Environ()
- out, err := cmd.CombinedOutput()
- msg := "telling margo to restart after " + mx.View.Filename() + " was saved"
- if err == nil {
- mx.Log.Println(msg)
- mx.Store.Dispatch(Restart{})
- } else {
- mx.Log.Printf("not %s: go test failed: %s\n%s\n", msg, err, out)
- }
-}
diff --git a/src/disposa.blue/margo/mg/state.go b/src/disposa.blue/margo/mg/state.go
deleted file mode 100644
index c42dc12d..00000000
--- a/src/disposa.blue/margo/mg/state.go
+++ /dev/null
@@ -1,347 +0,0 @@
-package mg
-
-import (
- "context"
- "disposa.blue/margo/misc/pprof/pprofdo"
- "fmt"
- "github.com/ugorji/go/codec"
- "go/build"
- "os"
- "path/filepath"
- "reflect"
- "runtime"
- "strings"
- "time"
-)
-
-var (
- ErrNoSettings = fmt.Errorf("no editor settings")
-
- _ context.Context = (*Ctx)(nil)
-)
-
-type Ctx struct {
- *State
- Action Action
-
- Store *Store
-
- Log *Logger
-
- Parent *Ctx
- Values map[interface{}]interface{}
- DoneC <-chan struct{}
-
- handle codec.Handle
-}
-
-func (_ *Ctx) Deadline() (time.Time, bool) {
- return time.Time{}, false
-}
-
-func (mx *Ctx) Done() <-chan struct{} {
- return mx.DoneC
-}
-
-func (_ *Ctx) Err() error {
- return nil
-}
-
-func (mx *Ctx) Value(k interface{}) interface{} {
- if v, ok := mx.Values[k]; ok {
- return v
- }
- if mx.Parent != nil {
- return mx.Parent.Value(k)
- }
- return nil
-}
-
-func newCtx(ag *Agent, st *State, act Action, sto *Store) (mx *Ctx, done chan struct{}) {
- if st == nil {
- panic("newCtx: state must not be nil")
- }
- if st == nil {
- panic("newCtx: store must not be nil")
- }
- done = make(chan struct{})
- return &Ctx{
- State: st,
- Action: act,
-
- Store: sto,
-
- Log: ag.Log,
-
- DoneC: done,
-
- handle: ag.handle,
- }, done
-}
-
-func (mx *Ctx) ActionIs(actions ...Action) bool {
- typ := reflect.TypeOf(mx.Action)
- for _, act := range actions {
- if reflect.TypeOf(act) == typ {
- return true
- }
- }
- return false
-}
-
-func (mx *Ctx) LangIs(names ...string) bool {
- return mx.View.LangIs(names...)
-}
-
-func (mx *Ctx) Copy(updaters ...func(*Ctx)) *Ctx {
- x := *mx
- x.Parent = mx
- if len(mx.Values) != 0 {
- x.Values = make(map[interface{}]interface{}, len(mx.Values))
- for k, v := range mx.Values {
- x.Values[k] = v
- }
- }
- for _, f := range updaters {
- f(&x)
- }
- return &x
-}
-
-func (mx *Ctx) Begin(t Task) *TaskTicket {
- return mx.Store.Begin(t)
-}
-
-type Reducer interface {
- Reduce(*Ctx) *State
-}
-
-type ReducerList []Reducer
-
-func (rl ReducerList) ReduceCtx(mx *Ctx) *Ctx {
- for _, r := range rl {
- var st *State
- pprofdo.Do(mx, rl.labels(r), func(_ context.Context) {
- st = r.Reduce(mx)
- })
- mx = mx.Copy(func(mx *Ctx) {
- mx.State = st
- })
- }
- return mx
-}
-
-func (rl ReducerList) labels(r Reducer) []string {
- lbl := ""
- if rf, ok := r.(ReduceFunc); ok {
- lbl = rf.Label
- } else {
- lbl = reflect.TypeOf(r).String()
- }
- return []string{"margo.reduce", lbl}
-}
-
-func (rl ReducerList) Reduce(mx *Ctx) *State {
- return rl.ReduceCtx(mx).State
-}
-
-func (rl ReducerList) Add(reducers ...Reducer) ReducerList {
- return append(rl[:len(rl):len(rl)], reducers...)
-}
-
-type ReduceFunc struct {
- Func func(*Ctx) *State
- Label string
-}
-
-func (rf ReduceFunc) Reduce(mx *Ctx) *State {
- return rf.Func(mx)
-}
-
-func Reduce(f func(*Ctx) *State) ReduceFunc {
- _, fn, line, _ := runtime.Caller(1)
- for _, gp := range strings.Split(build.Default.GOPATH, string(filepath.ListSeparator)) {
- s := strings.TrimPrefix(fn, filepath.Clean(gp)+string(filepath.Separator))
- if s != fn {
- fn = filepath.ToSlash(s)
- break
- }
- }
- return ReduceFunc{
- Func: f,
- Label: fmt.Sprintf("%s:%d", fn, line),
- }
-}
-
-type EditorProps struct {
- Name string
- Version string
-
- handle codec.Handle
- settings codec.Raw
-}
-
-func (ep *EditorProps) Settings(v interface{}) error {
- if ep.handle == nil || len(ep.settings) == 0 {
- return ErrNoSettings
- }
- return codec.NewDecoderBytes(ep.settings, ep.handle).Decode(v)
-}
-
-type EditorConfig interface {
- EditorConfig() interface{}
- EnabledForLangs(langs ...string) EditorConfig
-}
-
-type EphemeralState struct {
- Config EditorConfig
- Status StrSet
- Errors StrSet
- Completions []Completion
- Tooltips []Tooltip
- Issues IssueSet
-}
-
-type State struct {
- EphemeralState
- View *View
- Env EnvMap
- Editor EditorProps
-
- clientActions []clientAction
-}
-
-func NewState() *State {
- return &State{
- View: newView(),
- }
-}
-
-func (st *State) Copy(updaters ...func(*State)) *State {
- x := *st
- for _, f := range updaters {
- f(&x)
- }
- return &x
-}
-
-func (st *State) AddStatusf(format string, a ...interface{}) *State {
- return st.AddStatus(fmt.Sprintf(format, a...))
-}
-
-func (st *State) AddStatus(l ...string) *State {
- if len(l) == 0 {
- return st
- }
- return st.Copy(func(st *State) {
- st.Status = st.Status.Add(l...)
- })
-}
-
-func (st *State) Errorf(format string, a ...interface{}) *State {
- return st.AddError(fmt.Errorf(format, a...))
-}
-
-func (st *State) AddError(l ...error) *State {
- if len(l) == 0 {
- return st
- }
- return st.Copy(func(st *State) {
- for _, e := range l {
- if e != nil {
- st.Errors = st.Errors.Add(e.Error())
- }
- }
- })
-}
-
-func (st *State) SetConfig(c EditorConfig) *State {
- return st.Copy(func(st *State) {
- st.Config = c
- })
-}
-
-func (st *State) SetSrc(src []byte) *State {
- return st.Copy(func(st *State) {
- st.View = st.View.SetSrc(src)
- })
-}
-
-func (st *State) AddCompletions(l ...Completion) *State {
- return st.Copy(func(st *State) {
- st.Completions = append(st.Completions[:len(st.Completions):len(st.Completions)], l...)
- })
-}
-
-func (st *State) AddTooltips(l ...Tooltip) *State {
- return st.Copy(func(st *State) {
- st.Tooltips = append(st.Tooltips[:len(st.Tooltips):len(st.Tooltips)], l...)
- })
-}
-
-func (st *State) AddIssues(l ...Issue) *State {
- if len(l) == 0 {
- return st
- }
- return st.Copy(func(st *State) {
- st.Issues = st.Issues.Add(l...)
- })
-}
-
-func (st *State) addClientActions(l ...clientAction) *State {
- return st.Copy(func(st *State) {
- el := st.clientActions
- st.clientActions = append(el[:len(el):len(el)], l...)
- })
-}
-
-type clientProps struct {
- Editor struct {
- EditorProps
- Settings codec.Raw
- }
- Env EnvMap
- View *View
-}
-
-func (cp *clientProps) finalize(ag *Agent) {
- ce := &cp.Editor
- ep := &cp.Editor.EditorProps
- ep.handle = ag.handle
- ep.settings = ce.Settings
-}
-
-func makeClientProps() clientProps {
- return clientProps{
- Env: EnvMap{},
- View: &View{},
- }
-}
-
-func (c *clientProps) updateCtx(mx *Ctx) *Ctx {
- return mx.Copy(func(mx *Ctx) {
- mx.State = mx.State.Copy(func(st *State) {
- st.Editor = c.Editor.EditorProps
- if c.Env != nil {
- st.Env = c.Env
- }
- if c.View != nil {
- st.View = c.View
- // TODO: convert View.Pos to bytes
- // at moment gocode is most affected,
- // but to fix it here means we have to read the file off-disk
- // so I'd rather not do that until we have some caching in place
- }
- if st.View != nil {
- osGopath := os.Getenv("GOPATH")
- fn := st.View.Filename()
- for _, dir := range strings.Split(osGopath, string(filepath.ListSeparator)) {
- if IsParentDir(dir, fn) {
- st.Env = st.Env.Add("GOPATH", osGopath)
- break
- }
- }
- }
- })
- })
-}
diff --git a/src/disposa.blue/margo/mg/store.go b/src/disposa.blue/margo/mg/store.go
deleted file mode 100644
index e6740557..00000000
--- a/src/disposa.blue/margo/mg/store.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package mg
-
-import (
- "fmt"
- "sync"
-)
-
-var _ Dispatcher = (&Store{}).Dispatch
-
-type Dispatcher func(Action)
-
-type Listener func(*State)
-
-type storeReducers struct {
- before ReducerList
- use ReducerList
- after ReducerList
-}
-
-func (sr storeReducers) Reduce(mx *Ctx) *State {
- mx = sr.before.ReduceCtx(mx)
- mx = sr.use.ReduceCtx(mx)
- mx = sr.after.ReduceCtx(mx)
- return mx.State
-}
-
-func (sr storeReducers) Copy(updaters ...func(*storeReducers)) storeReducers {
- for _, f := range updaters {
- f(&sr)
- }
- return sr
-}
-
-type Store struct {
- mu sync.Mutex
- readyCh chan struct{}
- state *State
- listeners []*struct{ Listener }
- listener Listener
- reducers struct {
- sync.Mutex
- storeReducers
- }
- cfg EditorConfig
- ag *Agent
- tasks *taskTracker
- cache struct {
- sync.RWMutex
- vName string
- vHash string
- m map[interface{}]interface{}
- }
-}
-
-func (sto *Store) ready() {
- close(sto.readyCh)
-}
-
-func (sto *Store) Dispatch(act Action) {
- go func() {
- <-sto.readyCh
- sto.dispatch(act)
- }()
-}
-
-func (sto *Store) dispatch(act Action) {
- sto.mu.Lock()
- defer sto.mu.Unlock()
-
- mx, done := newCtx(sto.ag, sto.prepState(sto.state), act, sto)
- defer close(done)
- st := sto.reducers.Reduce(mx)
- sto.updateState(st, true)
-}
-
-func (sto *Store) syncRq(ag *Agent, rq *agentReq) {
- sto.mu.Lock()
- defer sto.mu.Unlock()
-
- name := rq.Action.Name
- mx, done := newCtx(sto.ag, sto.state, ag.createAction(name), sto)
- defer close(done)
-
- rs := agentRes{Cookie: rq.Cookie}
- rs.State = mx.State
- defer func() { ag.send(rs) }()
-
- if mx.Action == nil {
- rs.Error = fmt.Sprintf("unknown client action: %s", name)
- return
- }
-
- // TODO: add support for unpacking Action.Data
-
- mx = rq.Props.updateCtx(mx)
- sto.initCache(mx.View)
- mx.State = sto.prepState(mx.State)
- st := sto.reducers.Reduce(mx)
- rs.State = sto.updateState(st, false)
-}
-
-func (sto *Store) updateState(st *State, callListener bool) *State {
- if callListener && sto.listener != nil {
- sto.listener(st)
- }
- for _, p := range sto.listeners {
- p.Listener(st)
- }
- sto.state = st
- return st
-}
-
-func (sto *Store) State() *State {
- sto.mu.Lock()
- defer sto.mu.Unlock()
-
- return sto.state
-}
-
-func (sto *Store) prepState(st *State) *State {
- st = st.Copy()
- st.EphemeralState = EphemeralState{}
- if sto.cfg != nil {
- st.Config = sto.cfg
- }
- return st
-}
-
-func newStore(ag *Agent, l Listener) *Store {
- sto := &Store{
- readyCh: make(chan struct{}),
- listener: l,
- state: NewState(),
- ag: ag,
- }
- sto.cache.m = map[interface{}]interface{}{}
- sto.tasks = newTaskTracker(sto.Dispatch)
- sto.After(sto.tasks)
- return sto
-}
-
-func (sto *Store) Subscribe(l Listener) (unsubscribe func()) {
- sto.mu.Lock()
- defer sto.mu.Unlock()
-
- p := &struct{ Listener }{l}
- sto.listeners = append(sto.listeners[:len(sto.listeners):len(sto.listeners)], p)
-
- return func() {
- sto.mu.Lock()
- defer sto.mu.Unlock()
-
- listeners := make([]*struct{ Listener }, 0, len(sto.listeners)-1)
- for _, q := range sto.listeners {
- if p != q {
- listeners = append(listeners, q)
- }
- }
- sto.listeners = listeners
- }
-}
-
-func (sto *Store) updateReducers(updaters ...func(*storeReducers)) *Store {
- sto.reducers.Lock()
- defer sto.reducers.Unlock()
-
- sto.reducers.storeReducers = sto.reducers.Copy(updaters...)
- return sto
-}
-
-func (sto *Store) Before(reducers ...Reducer) *Store {
- return sto.updateReducers(func(sr *storeReducers) {
- sr.before = sr.before.Add(reducers...)
- })
-}
-
-func (sto *Store) Use(reducers ...Reducer) *Store {
- return sto.updateReducers(func(sr *storeReducers) {
- sr.use = sr.use.Add(reducers...)
- })
-}
-
-func (sto *Store) After(reducers ...Reducer) *Store {
- return sto.updateReducers(func(sr *storeReducers) {
- sr.after = sr.after.Add(reducers...)
- })
-}
-
-func (sto *Store) EditorConfig(cfg EditorConfig) *Store {
- sto.mu.Lock()
- defer sto.mu.Unlock()
-
- sto.cfg = cfg
- return sto
-}
-
-func (sto *Store) Begin(t Task) *TaskTicket {
- return sto.tasks.Begin(t)
-}
-
-func (sto *Store) initCache(v *View) {
- cc := &sto.cache
- cc.Lock()
- defer cc.Unlock()
-
- if cc.vHash == v.Hash && cc.vName == v.Name {
- return
- }
-
- cc.m = map[interface{}]interface{}{}
- cc.vHash = v.Hash
- cc.vName = v.Name
-}
-
-func (sto *Store) Put(k interface{}, v interface{}) {
- sto.cache.Lock()
- defer sto.cache.Unlock()
-
- sto.cache.m[k] = v
-}
-
-func (sto *Store) Get(k interface{}) interface{} {
- sto.cache.RLock()
- defer sto.cache.RUnlock()
-
- return sto.cache.m[k]
-}
-
-func (sto *Store) Del(k interface{}) {
- sto.cache.Lock()
- defer sto.cache.Unlock()
-
- delete(sto.cache.m, k)
-}
diff --git a/src/disposa.blue/margo/mg/tasks.go b/src/disposa.blue/margo/mg/tasks.go
deleted file mode 100644
index 6d0c2f82..00000000
--- a/src/disposa.blue/margo/mg/tasks.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package mg
-
-import (
- "bytes"
- "fmt"
- "sync"
- "time"
-)
-
-type taskTick struct{ ActionType }
-
-type Task struct {
- Title string
- Cancel func()
- CancelID string
-}
-
-type TaskTicket struct {
- ID string
- Title string
- Start time.Time
- CancelID string
-
- tracker *taskTracker
- cancel func()
-}
-
-func (ti *TaskTicket) Done() {
- if ti.tracker != nil {
- ti.tracker.done(ti.ID)
- }
-}
-
-func (ti *TaskTicket) Cancel() {
- if ti.cancel != nil {
- ti.cancel()
- }
-}
-
-func (ti *TaskTicket) Cancellable() bool {
- return ti.cancel != nil
-}
-
-type taskTracker struct {
- mu sync.Mutex
- id uint64
- tickets []*TaskTicket
- timer *time.Timer
- dispatch Dispatcher
- buf bytes.Buffer
-}
-
-func newTaskTracker(dispatch Dispatcher) *taskTracker {
- return &taskTracker{
- timer: time.NewTimer(1 * time.Second),
- dispatch: dispatch,
- }
-}
-
-func (tr *taskTracker) Reduce(mx *Ctx) *State {
- tr.mu.Lock()
- defer tr.mu.Unlock()
-
- switch mx.Action.(type) {
- case Started:
- tr.start()
- case taskTick:
- if len(tr.tickets) != 0 {
- tr.resetTimer()
- }
- }
- if s := tr.status(); s != "" {
- return mx.AddStatus(s)
- }
- return mx.State
-}
-
-func (tr *taskTracker) status() string {
- tr.buf.Reset()
- now := time.Now()
- tr.buf.WriteString("Tasks")
- initLen := tr.buf.Len()
- title := ""
- for _, t := range tr.tickets {
- age := now.Sub(t.Start) / time.Second
- switch age {
- case 0:
- case 1:
- tr.buf.WriteString(" ◔")
- case 2:
- tr.buf.WriteString(" ◑")
- case 3:
- tr.buf.WriteString(" ◕")
- default:
- tr.buf.WriteString(" ●")
- }
- if title == "" && t.Title != "" && age >= 1 && age <= 3 {
- title = t.Title
- }
- }
- if tr.buf.Len() == initLen {
- return ""
- }
- if title != "" {
- tr.buf.WriteByte(' ')
- tr.buf.WriteString(title)
- }
- return tr.buf.String()
-}
-
-func (tr *taskTracker) titles() (stale []string, fresh []string) {
- now := time.Now()
- for _, t := range tr.tickets {
- dur := now.Sub(t.Start)
- switch {
- case dur >= 5*time.Second:
- stale = append(stale, t.Title)
- case dur >= 1*time.Second:
- fresh = append(fresh, t.Title)
- }
- }
- for _, t := range tr.tickets {
- dur := now.Sub(t.Start)
- switch {
- case dur >= 5*time.Second:
- stale = append(stale, t.Title)
- case dur >= 1*time.Second:
- fresh = append(fresh, t.Title)
- }
- }
- return stale, fresh
-}
-
-func (tr *taskTracker) start() {
- go func() {
- for range tr.timer.C {
- tr.dispatch(taskTick{})
- }
- }()
-}
-
-func (tr *taskTracker) resetTimer() {
- defer tr.timer.Reset(1 * time.Second)
-}
-
-func (tr *taskTracker) done(id string) {
- tr.mu.Lock()
- defer tr.mu.Unlock()
- defer tr.resetTimer()
-
- l := make([]*TaskTicket, 0, len(tr.tickets)-1)
- for _, t := range tr.tickets {
- if t.ID != id {
- l = append(l, t)
- }
- }
- tr.tickets = l
-}
-
-func (tr *taskTracker) Begin(o Task) *TaskTicket {
- tr.mu.Lock()
- defer tr.mu.Unlock()
- defer tr.resetTimer()
-
- if cid := o.CancelID; cid != "" {
- for _, t := range tr.tickets {
- if t.CancelID == cid {
- t.Cancel()
- }
- }
- }
-
- tr.id++
- t := &TaskTicket{
- ID: fmt.Sprintf("@%d", tr.id),
- CancelID: o.CancelID,
- Title: o.Title,
- Start: time.Now(),
- cancel: o.Cancel,
- tracker: tr,
- }
- tr.tickets = append(tr.tickets, t)
- return t
-}
diff --git a/src/disposa.blue/margo/mg/tooltips.go b/src/disposa.blue/margo/mg/tooltips.go
deleted file mode 100644
index d5344f54..00000000
--- a/src/disposa.blue/margo/mg/tooltips.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package mg
-
-type Tooltip struct{}
diff --git a/src/disposa.blue/margo/mg/view.go b/src/disposa.blue/margo/mg/view.go
deleted file mode 100644
index dd7c059b..00000000
--- a/src/disposa.blue/margo/mg/view.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package mg
-
-import (
- "bytes"
- "encoding/base64"
- "golang.org/x/crypto/blake2b"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "unicode/utf8"
-)
-
-type View struct {
- Path string
- Wd string
- Name string
- Hash string
- Src []byte
- Pos int
- Row int
- Col int
- Dirty bool
- Ext string
- Lang string
-
- changed int
-}
-
-func newView() *View {
- return &View{}
-}
-
-func (v *View) Copy(updaters ...func(*View)) *View {
- x := *v
- for _, f := range updaters {
- f(&x)
- }
- return &x
-}
-
-func (v *View) LangIs(names ...string) bool {
- for _, s := range names {
- if s == v.Lang {
- return true
- }
- if v.Ext != "" && v.Ext[1:] == s {
- return true
- }
- }
- return false
-}
-
-func (v *View) Dir() string {
- if v.Path != "" {
- return filepath.Dir(v.Path)
- }
- return v.Wd
-}
-
-func (v *View) Filename() string {
- if v.Path != "" {
- return v.Path
- }
- return filepath.Join(v.Wd, v.Name)
-}
-
-func (v *View) ReadAll() ([]byte, error) {
- if v.Dirty || len(v.Src) != 0 {
- return v.Src, nil
- }
-
- r, err := v.Open()
- if err != nil {
- return nil, err
- }
- defer r.Close()
-
- return ioutil.ReadAll(r)
-}
-
-func (v *View) Valid() bool {
- return v.Name != ""
-}
-
-func (v *View) Open() (io.ReadCloser, error) {
- if v.Dirty || len(v.Src) != 0 {
- return ioutil.NopCloser(bytes.NewReader(v.Src)), nil
- }
-
- if v.Path == "" {
- return nil, os.ErrNotExist
- }
-
- return os.Open(v.Path)
-}
-
-func (v *View) SetSrc(s []byte) *View {
- return v.Copy(func(v *View) {
- v.Pos = 0
- v.Row = 0
- v.Col = 0
- v.Src = s
- v.Hash = SrcHash(s)
- v.Dirty = true
- v.changed++
- })
-}
-
-func SrcHash(s []byte) string {
- hash := blake2b.Sum512(s)
- return "hash:blake2b/Sum512;base64url," + base64.URLEncoding.EncodeToString(hash[:])
-}
-
-func BytePos(src []byte, charPos int) int {
- for i, c := range src {
- if !utf8.RuneStart(c) {
- continue
- }
- charPos--
- if charPos < 0 {
- return i
- }
- }
- return len(src)
-}
diff --git a/src/disposa.blue/margo/sublime/ext.go b/src/disposa.blue/margo/sublime/ext.go
deleted file mode 100644
index 848ddd23..00000000
--- a/src/disposa.blue/margo/sublime/ext.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package sublime
-
-import (
- "disposa.blue/margo/mg"
-)
-
-func Margo(mo mg.Args) {
-}
diff --git a/src/disposa.blue/margo/sublime/sublime.go b/src/disposa.blue/margo/sublime/sublime.go
deleted file mode 100644
index 46afae4b..00000000
--- a/src/disposa.blue/margo/sublime/sublime.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package sublime
-
-import (
- "disposa.blue/margo/mgcli"
- "fmt"
- "github.com/urfave/cli"
- "go/build"
- "os"
- "os/exec"
- "strings"
-)
-
-var (
- Command = cli.Command{
- Name: "sublime",
- Aliases: []string{"subl"},
- Usage: "",
- Description: "",
- SkipFlagParsing: true,
- SkipArgReorder: true,
- Action: mgcli.Action(mainAction),
- }
-)
-
-type cmdHelper struct {
- name string
- args []string
- outToErr bool
- env []string
-}
-
-func (c cmdHelper) run() error {
- cmd := exec.Command(c.name, c.args...)
- cmd.Stdin = os.Stdin
- if c.outToErr {
- cmd.Stdout = os.Stderr
- } else {
- cmd.Stdout = os.Stdout
- }
- cmd.Stderr = os.Stderr
- cmd.Env = c.env
-
- fmt.Fprintf(os.Stderr, "run%q\n", append([]string{c.name}, c.args...))
- return cmd.Run()
-}
-
-func mainAction(c *cli.Context) error {
- args := c.Args()
- tags := "margo"
- if extensionPkgExists() {
- tags = "margo margo_extension"
- }
- var env []string
- if err := goInstallAgent(os.Getenv("MARGO_SUBLIME_GOPATH"), tags); err != nil {
- env = append(env, "MARGO_SUBLIME_INSTALL_FAILED=margo install failed. check console for errors")
- fmt.Fprintln(os.Stderr, "cannot install margo.sublime:", err)
- }
- name := "margo.sublime"
- if exe, err := exec.LookPath(name); err == nil {
- name = exe
- }
- return cmdHelper{name: name, args: args, env: env}.run()
-}
-
-func goInstallAgent(gp string, tags string) error {
- var env []string
- if gp != "" {
- env = make([]string, 0, len(os.Environ())+1)
- // I don't remember the rules about duplicate env vars...
- for _, s := range os.Environ() {
- if !strings.HasPrefix(s, "GOPATH=") {
- env = append(env, s)
- }
- }
- env = append(env, "GOPATH="+gp)
- }
-
- cmdpath := "disposa.blue/margo/cmd/margo.sublime"
- if s := os.Getenv("MARGO_SUBLIME_CMDPATH"); s != "" {
- cmdpath = s
- }
-
- args := []string{"install", "-v", "-tags=" + tags}
- if os.Getenv("MARGO_INSTALL_FLAGS_RACE") == "1" {
- args = append(args, "-race")
- }
- for _, tag := range build.Default.ReleaseTags {
- if tag == "go1.10" {
- args = append(args, "-i")
- break
- }
- }
- args = append(args, cmdpath)
- return cmdHelper{
- name: "go",
- args: args,
- outToErr: true,
- env: env,
- }.run()
-}
-
-func extensionPkgExists() bool {
- _, err := build.Import("margo", "", 0)
- return err == nil
-}
diff --git a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/README.md b/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/README.md
deleted file mode 100644
index 50d65e55..00000000
--- a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/README.md
+++ /dev/null
@@ -1,206 +0,0 @@
-# Codec
-
-High Performance, Feature-Rich Idiomatic Go codec/encoding library for
-binc, msgpack, cbor, json.
-
-Supported Serialization formats are:
-
- - msgpack: https://github.com/msgpack/msgpack
- - binc: http://github.com/ugorji/binc
- - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- - json: http://json.org http://tools.ietf.org/html/rfc7159
- - simple:
-
-To install:
-
- go get github.com/ugorji/go/codec
-
-This package will carefully use 'unsafe' for performance reasons in specific places.
-You can build without unsafe use by passing the safe or appengine tag
-i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3
-go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from
-go 1.7+ . This is because supporting unsafe requires knowledge of implementation details.
-
-Online documentation: http://godoc.org/github.com/ugorji/go/codec
-Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer
-
-The idiomatic Go support is as seen in other encoding packages in
-the standard library (ie json, xml, gob, etc).
-
-Rich Feature Set includes:
-
- - Simple but extremely powerful and feature-rich API
- - Support for go1.4 and above, while selectively using newer APIs for later releases
- - Excellent code coverage ( > 90% )
- - Very High Performance.
- Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- - Careful selected use of 'unsafe' for targeted performance gains.
- 100% mode exists where 'unsafe' is not used at all.
- - Lock-free (sans mutex) concurrency for scaling to 100's of cores
- - Coerce types where appropriate
- e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc
- - Corner Cases:
- Overflows, nil maps/slices, nil values in streams are handled correctly
- - Standard field renaming via tags
- - Support for omitting empty fields during an encoding
- - Encoding from any value and decoding into pointer to any value
- (struct, slice, map, primitives, pointers, interface{}, etc)
- - Extensions to support efficient encoding/decoding of any named types
- - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
- - Support IsZero() bool to determine if a value is a zero value.
- Analogous to time.Time.IsZero() bool.
- - Decoding without a schema (into a interface{}).
- Includes Options to configure what specific map or slice type to use
- when decoding an encoded list or map into a nil interface{}
- - Mapping a non-interface type to an interface, so we can decode appropriately
- into any interface type with a correctly configured non-interface value.
- - Encode a struct as an array, and decode struct from an array in the data stream
- - Option to encode struct keys as numbers (instead of strings)
- (to support structured streams with fields encoded as numeric codes)
- - Comprehensive support for anonymous fields
- - Fast (no-reflection) encoding/decoding of common maps and slices
- - Code-generation for faster performance.
- - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
- - Support indefinite-length formats to enable true streaming
- (for formats which support it e.g. json, cbor)
- - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
- This mostly applies to maps, where iteration order is non-deterministic.
- - NIL in data stream decoded as zero value
- - Never silently skip data when decoding.
- User decides whether to return an error or silently skip data when keys or indexes
- in the data stream do not map to fields in the struct.
- - Encode/Decode from/to chan types (for iterative streaming support)
- - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- - Provides a RPC Server and Client Codec for net/rpc communication protocol.
- - Handle unique idiosyncrasies of codecs e.g.
- - For messagepack, configure how ambiguities in handling raw bytes are resolved
- - For messagepack, provide rpc server/client codec to support
- msgpack-rpc protocol defined at:
- https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
-
-## Extension Support
-
-Users can register a function to handle the encoding or decoding of
-their custom types.
-
-There are no restrictions on what the custom type can be. Some examples:
-
- type BisSet []int
- type BitSet64 uint64
- type UUID string
- type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
- type GifImage struct { ... }
-
-As an illustration, MyStructWithUnexportedFields would normally be
-encoded as an empty map because it has no exported fields, while UUID
-would be encoded as a string. However, with extension support, you can
-encode any of these however you like.
-
-## Custom Encoding and Decoding
-
-This package maintains symmetry in the encoding and decoding halfs.
-We determine how to encode or decode by walking this decision tree
-
- - is type a codec.Selfer?
- - is there an extension registered for the type?
- - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
- - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
- - is format text-based, and type an encoding.TextMarshaler?
- - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
-
-This symmetry is important to reduce chances of issues happening because the
-encoding and decoding sides are out of sync e.g. decoded via very specific
-encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
-
-Consequently, if a type only defines one-half of the symmetry
-(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
-then that type doesn't satisfy the check and we will continue walking down the
-decision tree.
-
-## RPC
-
-RPC Client and Server Codecs are implemented, so the codecs can be used
-with the standard net/rpc package.
-
-## Usage
-
-Typical usage model:
-
- // create and configure Handle
- var (
- bh codec.BincHandle
- mh codec.MsgpackHandle
- ch codec.CborHandle
- )
-
- mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
-
- // configure extensions
- // e.g. for msgpack, define functions and enable Time support for tag 1
- // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
-
- // create and use decoder/encoder
- var (
- r io.Reader
- w io.Writer
- b []byte
- h = &bh // or mh to use msgpack
- )
-
- dec = codec.NewDecoder(r, h)
- dec = codec.NewDecoderBytes(b, h)
- err = dec.Decode(&v)
-
- enc = codec.NewEncoder(w, h)
- enc = codec.NewEncoderBytes(&b, h)
- err = enc.Encode(v)
-
- //RPC Server
- go func() {
- for {
- conn, err := listener.Accept()
- rpcCodec := codec.GoRpc.ServerCodec(conn, h)
- //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
- rpc.ServeCodec(rpcCodec)
- }
- }()
-
- //RPC Communication (client side)
- conn, err = net.Dial("tcp", "localhost:5555")
- rpcCodec := codec.GoRpc.ClientCodec(conn, h)
- //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
- client := rpc.NewClientWithCodec(rpcCodec)
-
-## Running Tests
-
-To run tests, use the following:
-
- go test
-
-To run the full suite of tests, use the following:
-
- go test -tags alltests -run Suite
-
-You can run the tag 'safe' to run tests or build in safe mode. e.g.
-
- go test -tags safe -run Json
- go test -tags "alltests safe" -run Suite
-
-## Running Benchmarks
-
-Please see http://github.com/ugorji/go-codec-bench .
-
-## Caveats
-
-Struct fields matching the following are ignored during encoding and decoding
-
- - struct tag value set to -
- - func, complex numbers, unsafe pointers
- - unexported and not embedded
- - unexported and embedded and not struct kind
- - unexported and embedded pointers (from go1.10)
-
-Every other field in a struct will be encoded/decoded.
-
-Embedded fields are encoded as if they exist in the top-level struct,
-with some caveats. See Encode documentation.
diff --git a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/cbor.go b/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/cbor.go
deleted file mode 100644
index 7633c04a..00000000
--- a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/cbor.go
+++ /dev/null
@@ -1,756 +0,0 @@
-// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
-// Use of this source code is governed by a MIT license found in the LICENSE file.
-
-package codec
-
-import (
- "math"
- "reflect"
- "time"
-)
-
-const (
- cborMajorUint byte = iota
- cborMajorNegInt
- cborMajorBytes
- cborMajorText
- cborMajorArray
- cborMajorMap
- cborMajorTag
- cborMajorOther
-)
-
-const (
- cborBdFalse byte = 0xf4 + iota
- cborBdTrue
- cborBdNil
- cborBdUndefined
- cborBdExt
- cborBdFloat16
- cborBdFloat32
- cborBdFloat64
-)
-
-const (
- cborBdIndefiniteBytes byte = 0x5f
- cborBdIndefiniteString = 0x7f
- cborBdIndefiniteArray = 0x9f
- cborBdIndefiniteMap = 0xbf
- cborBdBreak = 0xff
-)
-
-// These define some in-stream descriptors for
-// manual encoding e.g. when doing explicit indefinite-length
-const (
- CborStreamBytes byte = 0x5f
- CborStreamString = 0x7f
- CborStreamArray = 0x9f
- CborStreamMap = 0xbf
- CborStreamBreak = 0xff
-)
-
-const (
- cborBaseUint byte = 0x00
- cborBaseNegInt = 0x20
- cborBaseBytes = 0x40
- cborBaseString = 0x60
- cborBaseArray = 0x80
- cborBaseMap = 0xa0
- cborBaseTag = 0xc0
- cborBaseSimple = 0xe0
-)
-
-func cbordesc(bd byte) string {
- switch bd {
- case cborBdNil:
- return "nil"
- case cborBdFalse:
- return "false"
- case cborBdTrue:
- return "true"
- case cborBdFloat16, cborBdFloat32, cborBdFloat64:
- return "float"
- case cborBdIndefiniteBytes:
- return "bytes*"
- case cborBdIndefiniteString:
- return "string*"
- case cborBdIndefiniteArray:
- return "array*"
- case cborBdIndefiniteMap:
- return "map*"
- default:
- switch {
- case bd >= cborBaseUint && bd < cborBaseNegInt:
- return "(u)int"
- case bd >= cborBaseNegInt && bd < cborBaseBytes:
- return "int"
- case bd >= cborBaseBytes && bd < cborBaseString:
- return "bytes"
- case bd >= cborBaseString && bd < cborBaseArray:
- return "string"
- case bd >= cborBaseArray && bd < cborBaseMap:
- return "array"
- case bd >= cborBaseMap && bd < cborBaseTag:
- return "map"
- case bd >= cborBaseTag && bd < cborBaseSimple:
- return "ext"
- default:
- return "unknown"
- }
- }
-}
-
-// -------------------
-
-type cborEncDriver struct {
- noBuiltInTypes
- encDriverNoopContainerWriter
- // encNoSeparator
- e *Encoder
- w encWriter
- h *CborHandle
- x [8]byte
- _ [3]uint64 // padding
-}
-
-func (e *cborEncDriver) EncodeNil() {
- e.w.writen1(cborBdNil)
-}
-
-func (e *cborEncDriver) EncodeBool(b bool) {
- if b {
- e.w.writen1(cborBdTrue)
- } else {
- e.w.writen1(cborBdFalse)
- }
-}
-
-func (e *cborEncDriver) EncodeFloat32(f float32) {
- e.w.writen1(cborBdFloat32)
- bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
-}
-
-func (e *cborEncDriver) EncodeFloat64(f float64) {
- e.w.writen1(cborBdFloat64)
- bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
-}
-
-func (e *cborEncDriver) encUint(v uint64, bd byte) {
- if v <= 0x17 {
- e.w.writen1(byte(v) + bd)
- } else if v <= math.MaxUint8 {
- e.w.writen2(bd+0x18, uint8(v))
- } else if v <= math.MaxUint16 {
- e.w.writen1(bd + 0x19)
- bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
- } else if v <= math.MaxUint32 {
- e.w.writen1(bd + 0x1a)
- bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
- } else { // if v <= math.MaxUint64 {
- e.w.writen1(bd + 0x1b)
- bigenHelper{e.x[:8], e.w}.writeUint64(v)
- }
-}
-
-func (e *cborEncDriver) EncodeInt(v int64) {
- if v < 0 {
- e.encUint(uint64(-1-v), cborBaseNegInt)
- } else {
- e.encUint(uint64(v), cborBaseUint)
- }
-}
-
-func (e *cborEncDriver) EncodeUint(v uint64) {
- e.encUint(v, cborBaseUint)
-}
-
-func (e *cborEncDriver) encLen(bd byte, length int) {
- e.encUint(uint64(length), bd)
-}
-
-func (e *cborEncDriver) EncodeTime(t time.Time) {
- if t.IsZero() {
- e.EncodeNil()
- } else if e.h.TimeRFC3339 {
- e.encUint(0, cborBaseTag)
- e.EncodeString(cUTF8, t.Format(time.RFC3339Nano))
- } else {
- e.encUint(1, cborBaseTag)
- t = t.UTC().Round(time.Microsecond)
- sec, nsec := t.Unix(), uint64(t.Nanosecond())
- if nsec == 0 {
- e.EncodeInt(sec)
- } else {
- e.EncodeFloat64(float64(sec) + float64(nsec)/1e9)
- }
- }
-}
-
-func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
- e.encUint(uint64(xtag), cborBaseTag)
- if v := ext.ConvertExt(rv); v == nil {
- e.EncodeNil()
- } else {
- en.encode(v)
- }
-}
-
-func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
- e.encUint(uint64(re.Tag), cborBaseTag)
- if false && re.Data != nil {
- en.encode(re.Data)
- } else if re.Value != nil {
- en.encode(re.Value)
- } else {
- e.EncodeNil()
- }
-}
-
-func (e *cborEncDriver) WriteArrayStart(length int) {
- if e.h.IndefiniteLength {
- e.w.writen1(cborBdIndefiniteArray)
- } else {
- e.encLen(cborBaseArray, length)
- }
-}
-
-func (e *cborEncDriver) WriteMapStart(length int) {
- if e.h.IndefiniteLength {
- e.w.writen1(cborBdIndefiniteMap)
- } else {
- e.encLen(cborBaseMap, length)
- }
-}
-
-func (e *cborEncDriver) WriteMapEnd() {
- if e.h.IndefiniteLength {
- e.w.writen1(cborBdBreak)
- }
-}
-
-func (e *cborEncDriver) WriteArrayEnd() {
- if e.h.IndefiniteLength {
- e.w.writen1(cborBdBreak)
- }
-}
-
-func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
- e.encStringBytesS(cborBaseString, v)
-}
-
-func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
- if v == nil {
- e.EncodeNil()
- } else if c == cRAW {
- e.encStringBytesS(cborBaseBytes, stringView(v))
- } else {
- e.encStringBytesS(cborBaseString, stringView(v))
- }
-}
-
-func (e *cborEncDriver) encStringBytesS(bb byte, v string) {
- if e.h.IndefiniteLength {
- if bb == cborBaseBytes {
- e.w.writen1(cborBdIndefiniteBytes)
- } else {
- e.w.writen1(cborBdIndefiniteString)
- }
- blen := len(v) / 4
- if blen == 0 {
- blen = 64
- } else if blen > 1024 {
- blen = 1024
- }
- for i := 0; i < len(v); {
- var v2 string
- i2 := i + blen
- if i2 < len(v) {
- v2 = v[i:i2]
- } else {
- v2 = v[i:]
- }
- e.encLen(bb, len(v2))
- e.w.writestr(v2)
- i = i2
- }
- e.w.writen1(cborBdBreak)
- } else {
- e.encLen(bb, len(v))
- e.w.writestr(v)
- }
-}
-
-// ----------------------
-
-type cborDecDriver struct {
- d *Decoder
- h *CborHandle
- r decReader
- // b [scratchByteArrayLen]byte
- br bool // bytes reader
- bdRead bool
- bd byte
- noBuiltInTypes
- // decNoSeparator
- decDriverNoopContainerReader
- _ [3]uint64 // padding
-}
-
-func (d *cborDecDriver) readNextBd() {
- d.bd = d.r.readn1()
- d.bdRead = true
-}
-
-func (d *cborDecDriver) uncacheRead() {
- if d.bdRead {
- d.r.unreadn1()
- d.bdRead = false
- }
-}
-
-func (d *cborDecDriver) ContainerType() (vt valueType) {
- if !d.bdRead {
- d.readNextBd()
- }
- if d.bd == cborBdNil {
- return valueTypeNil
- } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
- return valueTypeBytes
- } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
- return valueTypeString
- } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
- return valueTypeArray
- } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
- return valueTypeMap
- }
- // else {
- // d.d.errorf("isContainerType: unsupported parameter: %v", vt)
- // }
- return valueTypeUnset
-}
-
-func (d *cborDecDriver) TryDecodeAsNil() bool {
- if !d.bdRead {
- d.readNextBd()
- }
- // treat Nil and Undefined as nil values
- if d.bd == cborBdNil || d.bd == cborBdUndefined {
- d.bdRead = false
- return true
- }
- return false
-}
-
-func (d *cborDecDriver) CheckBreak() bool {
- if !d.bdRead {
- d.readNextBd()
- }
- if d.bd == cborBdBreak {
- d.bdRead = false
- return true
- }
- return false
-}
-
-func (d *cborDecDriver) decUint() (ui uint64) {
- v := d.bd & 0x1f
- if v <= 0x17 {
- ui = uint64(v)
- } else {
- if v == 0x18 {
- ui = uint64(d.r.readn1())
- } else if v == 0x19 {
- ui = uint64(bigen.Uint16(d.r.readx(2)))
- } else if v == 0x1a {
- ui = uint64(bigen.Uint32(d.r.readx(4)))
- } else if v == 0x1b {
- ui = uint64(bigen.Uint64(d.r.readx(8)))
- } else {
- d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
- return
- }
- }
- return
-}
-
-func (d *cborDecDriver) decCheckInteger() (neg bool) {
- if !d.bdRead {
- d.readNextBd()
- }
- major := d.bd >> 5
- if major == cborMajorUint {
- } else if major == cborMajorNegInt {
- neg = true
- } else {
- d.d.errorf("not an integer - invalid major %v from descriptor %x/%s", major, d.bd, cbordesc(d.bd))
- return
- }
- return
-}
-
-func (d *cborDecDriver) DecodeInt64() (i int64) {
- neg := d.decCheckInteger()
- ui := d.decUint()
- // check if this number can be converted to an int without overflow
- if neg {
- i = -(chkOvf.SignedIntV(ui + 1))
- } else {
- i = chkOvf.SignedIntV(ui)
- }
- d.bdRead = false
- return
-}
-
-func (d *cborDecDriver) DecodeUint64() (ui uint64) {
- if d.decCheckInteger() {
- d.d.errorf("assigning negative signed value to unsigned type")
- return
- }
- ui = d.decUint()
- d.bdRead = false
- return
-}
-
-func (d *cborDecDriver) DecodeFloat64() (f float64) {
- if !d.bdRead {
- d.readNextBd()
- }
- if bd := d.bd; bd == cborBdFloat16 {
- f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
- } else if bd == cborBdFloat32 {
- f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
- } else if bd == cborBdFloat64 {
- f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
- } else if bd >= cborBaseUint && bd < cborBaseBytes {
- f = float64(d.DecodeInt64())
- } else {
- d.d.errorf("float only valid from float16/32/64 - invalid descriptor %x/%s", bd, cbordesc(bd))
- return
- }
- d.bdRead = false
- return
-}
-
-// bool can be decoded from bool only (single byte).
-func (d *cborDecDriver) DecodeBool() (b bool) {
- if !d.bdRead {
- d.readNextBd()
- }
- if bd := d.bd; bd == cborBdTrue {
- b = true
- } else if bd == cborBdFalse {
- } else {
- d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd))
- return
- }
- d.bdRead = false
- return
-}
-
-func (d *cborDecDriver) ReadMapStart() (length int) {
- if !d.bdRead {
- d.readNextBd()
- }
- d.bdRead = false
- if d.bd == cborBdIndefiniteMap {
- return -1
- }
- return d.decLen()
-}
-
-func (d *cborDecDriver) ReadArrayStart() (length int) {
- if !d.bdRead {
- d.readNextBd()
- }
- d.bdRead = false
- if d.bd == cborBdIndefiniteArray {
- return -1
- }
- return d.decLen()
-}
-
-func (d *cborDecDriver) decLen() int {
- return int(d.decUint())
-}
-
-func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
- d.bdRead = false
- for {
- if d.CheckBreak() {
- break
- }
- if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
- d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+
- " got major %v from descriptor %x/%x", major, d.bd, cbordesc(d.bd))
- return nil
- }
- n := d.decLen()
- oldLen := len(bs)
- newLen := oldLen + n
- if newLen > cap(bs) {
- bs2 := make([]byte, newLen, 2*cap(bs)+n)
- copy(bs2, bs)
- bs = bs2
- } else {
- bs = bs[:newLen]
- }
- d.r.readb(bs[oldLen:newLen])
- // bs = append(bs, d.r.readn()...)
- d.bdRead = false
- }
- d.bdRead = false
- return bs
-}
-
-func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
- if !d.bdRead {
- d.readNextBd()
- }
- if d.bd == cborBdNil || d.bd == cborBdUndefined {
- d.bdRead = false
- return nil
- }
- if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
- d.bdRead = false
- if bs == nil {
- if zerocopy {
- return d.decAppendIndefiniteBytes(d.d.b[:0])
- }
- return d.decAppendIndefiniteBytes(zeroByteSlice)
- }
- return d.decAppendIndefiniteBytes(bs[:0])
- }
- // check if an "array" of uint8's (see ContainerType for how to infer if an array)
- if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
- bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
- return
- }
- clen := d.decLen()
- d.bdRead = false
- if zerocopy {
- if d.br {
- return d.r.readx(clen)
- } else if len(bs) == 0 {
- bs = d.d.b[:]
- }
- }
- return decByteSlice(d.r, clen, d.h.MaxInitLen, bs)
-}
-
-func (d *cborDecDriver) DecodeString() (s string) {
- return string(d.DecodeBytes(d.d.b[:], true))
-}
-
-func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) {
- return d.DecodeBytes(d.d.b[:], true)
-}
-
-func (d *cborDecDriver) DecodeTime() (t time.Time) {
- if !d.bdRead {
- d.readNextBd()
- }
- if d.bd == cborBdNil || d.bd == cborBdUndefined {
- d.bdRead = false
- return
- }
- xtag := d.decUint()
- d.bdRead = false
- return d.decodeTime(xtag)
-}
-
-func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) {
- if !d.bdRead {
- d.readNextBd()
- }
- switch xtag {
- case 0:
- var err error
- if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil {
- d.d.errorv(err)
- }
- case 1:
- // decode an int64 or a float, and infer time.Time from there.
- // for floats, round to microseconds, as that is what is guaranteed to fit well.
- switch {
- case d.bd == cborBdFloat16, d.bd == cborBdFloat32:
- f1, f2 := math.Modf(d.DecodeFloat64())
- t = time.Unix(int64(f1), int64(f2*1e9))
- case d.bd == cborBdFloat64:
- f1, f2 := math.Modf(d.DecodeFloat64())
- t = time.Unix(int64(f1), int64(f2*1e9))
- case d.bd >= cborBaseUint && d.bd < cborBaseNegInt,
- d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
- t = time.Unix(d.DecodeInt64(), 0)
- default:
- d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)")
- }
- default:
- d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag)
- }
- t = t.UTC().Round(time.Microsecond)
- return
-}
-
-func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
- if !d.bdRead {
- d.readNextBd()
- }
- u := d.decUint()
- d.bdRead = false
- realxtag = u
- if ext == nil {
- re := rv.(*RawExt)
- re.Tag = realxtag
- d.d.decode(&re.Value)
- } else if xtag != realxtag {
- d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
- return
- } else {
- var v interface{}
- d.d.decode(&v)
- ext.UpdateExt(rv, v)
- }
- d.bdRead = false
- return
-}
-
-func (d *cborDecDriver) DecodeNaked() {
- if !d.bdRead {
- d.readNextBd()
- }
-
- n := d.d.n
- var decodeFurther bool
-
- switch d.bd {
- case cborBdNil:
- n.v = valueTypeNil
- case cborBdFalse:
- n.v = valueTypeBool
- n.b = false
- case cborBdTrue:
- n.v = valueTypeBool
- n.b = true
- case cborBdFloat16, cborBdFloat32, cborBdFloat64:
- n.v = valueTypeFloat
- n.f = d.DecodeFloat64()
- case cborBdIndefiniteBytes:
- n.v = valueTypeBytes
- n.l = d.DecodeBytes(nil, false)
- case cborBdIndefiniteString:
- n.v = valueTypeString
- n.s = d.DecodeString()
- case cborBdIndefiniteArray:
- n.v = valueTypeArray
- decodeFurther = true
- case cborBdIndefiniteMap:
- n.v = valueTypeMap
- decodeFurther = true
- default:
- switch {
- case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
- if d.h.SignedInteger {
- n.v = valueTypeInt
- n.i = d.DecodeInt64()
- } else {
- n.v = valueTypeUint
- n.u = d.DecodeUint64()
- }
- case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
- n.v = valueTypeInt
- n.i = d.DecodeInt64()
- case d.bd >= cborBaseBytes && d.bd < cborBaseString:
- n.v = valueTypeBytes
- n.l = d.DecodeBytes(nil, false)
- case d.bd >= cborBaseString && d.bd < cborBaseArray:
- n.v = valueTypeString
- n.s = d.DecodeString()
- case d.bd >= cborBaseArray && d.bd < cborBaseMap:
- n.v = valueTypeArray
- decodeFurther = true
- case d.bd >= cborBaseMap && d.bd < cborBaseTag:
- n.v = valueTypeMap
- decodeFurther = true
- case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
- n.v = valueTypeExt
- n.u = d.decUint()
- n.l = nil
- if n.u == 0 || n.u == 1 {
- d.bdRead = false
- n.v = valueTypeTime
- n.t = d.decodeTime(n.u)
- }
- // d.bdRead = false
- // d.d.decode(&re.Value) // handled by decode itself.
- // decodeFurther = true
- default:
- d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
- return
- }
- }
-
- if !decodeFurther {
- d.bdRead = false
- }
- return
-}
-
-// -------------------------
-
-// CborHandle is a Handle for the CBOR encoding format,
-// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
-//
-// CBOR is comprehensively supported, including support for:
-// - indefinite-length arrays/maps/bytes/strings
-// - (extension) tags in range 0..0xffff (0 .. 65535)
-// - half, single and double-precision floats
-// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
-// - nil, true, false, ...
-// - arrays and maps, bytes and text strings
-//
-// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
-// Users can implement them as needed (using SetExt), including spec-documented ones:
-// - timestamp, BigNum, BigFloat, Decimals,
-// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
-type CborHandle struct {
- binaryEncodingType
- noElemSeparators
- BasicHandle
-
- // IndefiniteLength=true, means that we encode using indefinitelength
- IndefiniteLength bool
-
- // TimeRFC3339 says to encode time.Time using RFC3339 format.
- // If unset, we encode time.Time using seconds past epoch.
- TimeRFC3339 bool
-
- // _ [1]uint64 // padding
-}
-
-// Name returns the name of the handle: cbor
-func (h *CborHandle) Name() string { return "cbor" }
-
-// SetInterfaceExt sets an extension
-func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
- return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
-}
-
-func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
- return &cborEncDriver{e: e, w: e.w, h: h}
-}
-
-func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
- return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes}
-}
-
-func (e *cborEncDriver) reset() {
- e.w = e.e.w
-}
-
-func (d *cborDecDriver) reset() {
- d.r, d.br = d.d.r, d.d.bytes
- d.bd, d.bdRead = 0, false
-}
-
-var _ decDriver = (*cborDecDriver)(nil)
-var _ encDriver = (*cborEncDriver)(nil)
diff --git a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/decode.go b/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/decode.go
deleted file mode 100644
index 1c0817aa..00000000
--- a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/decode.go
+++ /dev/null
@@ -1,2552 +0,0 @@
-// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
-// Use of this source code is governed by a MIT license found in the LICENSE file.
-
-package codec
-
-import (
- "encoding"
- "errors"
- "fmt"
- "io"
- "reflect"
- "strconv"
- "sync"
- "time"
-)
-
-// Some tagging information for error messages.
-const (
- msgBadDesc = "unrecognized descriptor byte"
- msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v"
-)
-
-const decDefSliceCap = 8
-const decDefChanCap = 64 // should be large, as cap cannot be expanded
-const decScratchByteArrayLen = cacheLineSize - 8
-
-var (
- errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct"
- errstrCannotDecodeIntoNil = "cannot decode into nil"
-
- errmsgExpandSliceOverflow = "expand slice: slice overflow"
- errmsgExpandSliceCannotChange = "expand slice: cannot change"
-
- errDecoderNotInitialized = errors.New("Decoder not initialized")
-
- errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read")
- errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read")
- errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown")
-)
-
-// decReader abstracts the reading source, allowing implementations that can
-// read from an io.Reader or directly off a byte slice with zero-copying.
-type decReader interface {
- unreadn1()
-
- // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR
- // just return a view of the []byte being decoded from.
- // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control.
- readx(n int) []byte
- readb([]byte)
- readn1() uint8
- numread() int // number of bytes read
- track()
- stopTrack() []byte
-
- // skip will skip any byte that matches, and return the first non-matching byte
- skip(accept *bitset256) (token byte)
- // readTo will read any byte that matches, stopping once no-longer matching.
- readTo(in []byte, accept *bitset256) (out []byte)
- // readUntil will read, only stopping once it matches the 'stop' byte.
- readUntil(in []byte, stop byte) (out []byte)
-}
-
-type decDriver interface {
- // this will check if the next token is a break.
- CheckBreak() bool
- // Note: TryDecodeAsNil should be careful not to share any temporary []byte with
- // the rest of the decDriver. This is because sometimes, we optimize by holding onto
- // a transient []byte, and ensuring the only other call we make to the decDriver
- // during that time is maybe a TryDecodeAsNil() call.
- TryDecodeAsNil() bool
- // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known.
- ContainerType() (vt valueType)
- // IsBuiltinType(rt uintptr) bool
-
- // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
- // For maps and arrays, it will not do the decoding in-band, but will signal
- // the decoder, so that is done later, by setting the decNaked.valueType field.
- //
- // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
- // for extensions, DecodeNaked must read the tag and the []byte if it exists.
- // if the []byte is not read, then kInterfaceNaked will treat it as a Handle
- // that stores the subsequent value in-band, and complete reading the RawExt.
- //
- // extensions should also use readx to decode them, for efficiency.
- // kInterface will extract the detached byte slice if it has to pass it outside its realm.
- DecodeNaked()
-
- // Deprecated: use DecodeInt64 and DecodeUint64 instead
- // DecodeInt(bitsize uint8) (i int64)
- // DecodeUint(bitsize uint8) (ui uint64)
-
- DecodeInt64() (i int64)
- DecodeUint64() (ui uint64)
-
- DecodeFloat64() (f float64)
- DecodeBool() (b bool)
- // DecodeString can also decode symbols.
- // It looks redundant as DecodeBytes is available.
- // However, some codecs (e.g. binc) support symbols and can
- // return a pre-stored string value, meaning that it can bypass
- // the cost of []byte->string conversion.
- DecodeString() (s string)
- DecodeStringAsBytes() (v []byte)
-
- // DecodeBytes may be called directly, without going through reflection.
- // Consequently, it must be designed to handle possible nil.
- DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte)
- // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte)
-
- // decodeExt will decode into a *RawExt or into an extension.
- DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64)
- // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
-
- DecodeTime() (t time.Time)
-
- ReadArrayStart() int
- ReadArrayElem()
- ReadArrayEnd()
- ReadMapStart() int
- ReadMapElemKey()
- ReadMapElemValue()
- ReadMapEnd()
-
- reset()
- uncacheRead()
-}
-
-type decDriverNoopContainerReader struct{}
-
-func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { return }
-func (x decDriverNoopContainerReader) ReadArrayElem() {}
-func (x decDriverNoopContainerReader) ReadArrayEnd() {}
-func (x decDriverNoopContainerReader) ReadMapStart() (v int) { return }
-func (x decDriverNoopContainerReader) ReadMapElemKey() {}
-func (x decDriverNoopContainerReader) ReadMapElemValue() {}
-func (x decDriverNoopContainerReader) ReadMapEnd() {}
-func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return }
-
-// func (x decNoSeparator) uncacheRead() {}
-
-// DecodeOptions captures configuration options during decode.
-type DecodeOptions struct {
- // MapType specifies type to use during schema-less decoding of a map in the stream.
- // If nil (unset), we default to map[string]interface{} iff json handle and MapStringAsKey=true,
- // else map[interface{}]interface{}.
- MapType reflect.Type
-
- // SliceType specifies type to use during schema-less decoding of an array in the stream.
- // If nil (unset), we default to []interface{} for all formats.
- SliceType reflect.Type
-
- // MaxInitLen defines the maxinum initial length that we "make" a collection
- // (string, slice, map, chan). If 0 or negative, we default to a sensible value
- // based on the size of an element in the collection.
- //
- // For example, when decoding, a stream may say that it has 2^64 elements.
- // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash.
- // Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
- MaxInitLen int
-
- // ReaderBufferSize is the size of the buffer used when reading.
- //
- // if > 0, we use a smart buffer internally for performance purposes.
- ReaderBufferSize int
-
- // If ErrorIfNoField, return an error when decoding a map
- // from a codec stream into a struct, and no matching struct field is found.
- ErrorIfNoField bool
-
- // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded.
- // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array,
- // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set).
- ErrorIfNoArrayExpand bool
-
- // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64).
- SignedInteger bool
-
- // MapValueReset controls how we decode into a map value.
- //
- // By default, we MAY retrieve the mapping for a key, and then decode into that.
- // However, especially with big maps, that retrieval may be expensive and unnecessary
- // if the stream already contains all that is necessary to recreate the value.
- //
- // If true, we will never retrieve the previous mapping,
- // but rather decode into a new value and set that in the map.
- //
- // If false, we will retrieve the previous mapping if necessary e.g.
- // the previous mapping is a pointer, or is a struct or array with pre-set state,
- // or is an interface.
- MapValueReset bool
-
- // SliceElementReset: on decoding a slice, reset the element to a zero value first.
- //
- // concern: if the slice already contained some garbage, we will decode into that garbage.
- SliceElementReset bool
-
- // InterfaceReset controls how we decode into an interface.
- //
- // By default, when we see a field that is an interface{...},
- // or a map with interface{...} value, we will attempt decoding into the
- // "contained" value.
- //
- // However, this prevents us from reading a string into an interface{}
- // that formerly contained a number.
- //
- // If true, we will decode into a new "blank" value, and set that in the interface.
- // If false, we will decode into whatever is contained in the interface.
- InterfaceReset bool
-
- // InternString controls interning of strings during decoding.
- //
- // Some handles, e.g. json, typically will read map keys as strings.
- // If the set of keys are finite, it may help reduce allocation to
- // look them up from a map (than to allocate them afresh).
- //
- // Note: Handles will be smart when using the intern functionality.
- // Every string should not be interned.
- // An excellent use-case for interning is struct field names,
- // or map keys where key type is string.
- InternString bool
-
- // PreferArrayOverSlice controls whether to decode to an array or a slice.
- //
- // This only impacts decoding into a nil interface{}.
- // Consequently, it has no effect on codecgen.
- //
- // *Note*: This only applies if using go1.5 and above,
- // as it requires reflect.ArrayOf support which was absent before go1.5.
- PreferArrayOverSlice bool
-
- // DeleteOnNilMapValue controls how to decode a nil value in the stream.
- //
- // If true, we will delete the mapping of the key.
- // Else, just set the mapping to the zero value of the type.
- DeleteOnNilMapValue bool
-}
-
-// ------------------------------------
-
-type bufioDecReader struct {
- buf []byte
- r io.Reader
-
- c int // cursor
- n int // num read
- err error
-
- tr []byte
- trb bool
- b [4]byte
-}
-
-func (z *bufioDecReader) reset(r io.Reader) {
- z.r, z.c, z.n, z.err, z.trb = r, 0, 0, nil, false
- if z.tr != nil {
- z.tr = z.tr[:0]
- }
-}
-
-func (z *bufioDecReader) Read(p []byte) (n int, err error) {
- if z.err != nil {
- return 0, z.err
- }
- p0 := p
- n = copy(p, z.buf[z.c:])
- z.c += n
- if z.c == len(z.buf) {
- z.c = 0
- }
- z.n += n
- if len(p) == n {
- if z.c == 0 {
- z.buf = z.buf[:1]
- z.buf[0] = p[len(p)-1]
- z.c = 1
- }
- if z.trb {
- z.tr = append(z.tr, p0[:n]...)
- }
- return
- }
- p = p[n:]
- var n2 int
- // if we are here, then z.buf is all read
- if len(p) > len(z.buf) {
- n2, err = decReadFull(z.r, p)
- n += n2
- z.n += n2
- z.err = err
- // don't return EOF if some bytes were read. keep for next time.
- if n > 0 && err == io.EOF {
- err = nil
- }
- // always keep last byte in z.buf
- z.buf = z.buf[:1]
- z.buf[0] = p[len(p)-1]
- z.c = 1
- if z.trb {
- z.tr = append(z.tr, p0[:n]...)
- }
- return
- }
- // z.c is now 0, and len(p) <= len(z.buf)
- for len(p) > 0 && z.err == nil {
- // println("len(p) loop starting ... ")
- z.c = 0
- z.buf = z.buf[0:cap(z.buf)]
- n2, err = z.r.Read(z.buf)
- if n2 > 0 {
- if err == io.EOF {
- err = nil
- }
- z.buf = z.buf[:n2]
- n2 = copy(p, z.buf)
- z.c = n2
- n += n2
- z.n += n2
- p = p[n2:]
- }
- z.err = err
- // println("... len(p) loop done")
- }
- if z.c == 0 {
- z.buf = z.buf[:1]
- z.buf[0] = p[len(p)-1]
- z.c = 1
- }
- if z.trb {
- z.tr = append(z.tr, p0[:n]...)
- }
- return
-}
-
-func (z *bufioDecReader) ReadByte() (b byte, err error) {
- z.b[0] = 0
- _, err = z.Read(z.b[:1])
- b = z.b[0]
- return
-}
-
-func (z *bufioDecReader) UnreadByte() (err error) {
- if z.err != nil {
- return z.err
- }
- if z.c > 0 {
- z.c--
- z.n--
- if z.trb {
- z.tr = z.tr[:len(z.tr)-1]
- }
- return
- }
- return errDecUnreadByteNothingToRead
-}
-
-func (z *bufioDecReader) numread() int {
- return z.n
-}
-
-func (z *bufioDecReader) readx(n int) (bs []byte) {
- if n <= 0 || z.err != nil {
- return
- }
- if z.c+n <= len(z.buf) {
- bs = z.buf[z.c : z.c+n]
- z.n += n
- z.c += n
- if z.trb {
- z.tr = append(z.tr, bs...)
- }
- return
- }
- bs = make([]byte, n)
- _, err := z.Read(bs)
- if err != nil {
- panic(err)
- }
- return
-}
-
-func (z *bufioDecReader) readb(bs []byte) {
- _, err := z.Read(bs)
- if err != nil {
- panic(err)
- }
-}
-
-// func (z *bufioDecReader) readn1eof() (b uint8, eof bool) {
-// b, err := z.ReadByte()
-// if err != nil {
-// if err == io.EOF {
-// eof = true
-// } else {
-// panic(err)
-// }
-// }
-// return
-// }
-
-func (z *bufioDecReader) readn1() (b uint8) {
- b, err := z.ReadByte()
- if err != nil {
- panic(err)
- }
- return
-}
-
-func (z *bufioDecReader) search(in []byte, accept *bitset256, stop, flag uint8) (token byte, out []byte) {
- // flag: 1 (skip), 2 (readTo), 4 (readUntil)
- if flag == 4 {
- for i := z.c; i < len(z.buf); i++ {
- if z.buf[i] == stop {
- token = z.buf[i]
- z.n = z.n + (i - z.c) - 1
- i++
- out = z.buf[z.c:i]
- if z.trb {
- z.tr = append(z.tr, z.buf[z.c:i]...)
- }
- z.c = i
- return
- }
- }
- } else {
- for i := z.c; i < len(z.buf); i++ {
- if !accept.isset(z.buf[i]) {
- token = z.buf[i]
- z.n = z.n + (i - z.c) - 1
- if flag == 1 {
- i++
- } else {
- out = z.buf[z.c:i]
- }
- if z.trb {
- z.tr = append(z.tr, z.buf[z.c:i]...)
- }
- z.c = i
- return
- }
- }
- }
- z.n += len(z.buf) - z.c
- if flag != 1 {
- out = append(in, z.buf[z.c:]...)
- }
- if z.trb {
- z.tr = append(z.tr, z.buf[z.c:]...)
- }
- var n2 int
- if z.err != nil {
- return
- }
- for {
- z.c = 0
- z.buf = z.buf[0:cap(z.buf)]
- n2, z.err = z.r.Read(z.buf)
- if n2 > 0 && z.err != nil {
- z.err = nil
- }
- z.buf = z.buf[:n2]
- if flag == 4 {
- for i := 0; i < n2; i++ {
- if z.buf[i] == stop {
- token = z.buf[i]
- z.n += i - 1
- i++
- out = append(out, z.buf[z.c:i]...)
- if z.trb {
- z.tr = append(z.tr, z.buf[z.c:i]...)
- }
- z.c = i
- return
- }
- }
- } else {
- for i := 0; i < n2; i++ {
- if !accept.isset(z.buf[i]) {
- token = z.buf[i]
- z.n += i - 1
- if flag == 1 {
- i++
- }
- if flag != 1 {
- out = append(out, z.buf[z.c:i]...)
- }
- if z.trb {
- z.tr = append(z.tr, z.buf[z.c:i]...)
- }
- z.c = i
- return
- }
- }
- }
- if flag != 1 {
- out = append(out, z.buf[:n2]...)
- }
- z.n += n2
- if z.err != nil {
- return
- }
- if z.trb {
- z.tr = append(z.tr, z.buf[:n2]...)
- }
- }
-}
-
-func (z *bufioDecReader) skip(accept *bitset256) (token byte) {
- token, _ = z.search(nil, accept, 0, 1)
- return
-}
-
-func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) {
- _, out = z.search(in, accept, 0, 2)
- return
-}
-
-func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) {
- _, out = z.search(in, nil, stop, 4)
- return
-}
-
-func (z *bufioDecReader) unreadn1() {
- err := z.UnreadByte()
- if err != nil {
- panic(err)
- }
-}
-
-func (z *bufioDecReader) track() {
- if z.tr != nil {
- z.tr = z.tr[:0]
- }
- z.trb = true
-}
-
-func (z *bufioDecReader) stopTrack() (bs []byte) {
- z.trb = false
- return z.tr
-}
-
-// ioDecReader is a decReader that reads off an io.Reader.
-//
-// It also has a fallback implementation of ByteScanner if needed.
-type ioDecReader struct {
- r io.Reader // the reader passed in
-
- rr io.Reader
- br io.ByteScanner
-
- l byte // last byte
- ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread
- trb bool // tracking bytes turned on
- _ bool
- b [4]byte // tiny buffer for reading single bytes
-
- x [scratchByteArrayLen]byte // for: get struct field name, swallow valueTypeBytes, etc
- n int // num read
- tr []byte // tracking bytes read
-}
-
-func (z *ioDecReader) reset(r io.Reader) {
- z.r = r
- z.rr = r
- z.l, z.ls, z.n, z.trb = 0, 0, 0, false
- if z.tr != nil {
- z.tr = z.tr[:0]
- }
- var ok bool
- if z.br, ok = r.(io.ByteScanner); !ok {
- z.br = z
- z.rr = z
- }
-}
-
-func (z *ioDecReader) Read(p []byte) (n int, err error) {
- if len(p) == 0 {
- return
- }
- var firstByte bool
- if z.ls == 1 {
- z.ls = 2
- p[0] = z.l
- if len(p) == 1 {
- n = 1
- return
- }
- firstByte = true
- p = p[1:]
- }
- n, err = z.r.Read(p)
- if n > 0 {
- if err == io.EOF && n == len(p) {
- err = nil // read was successful, so postpone EOF (till next time)
- }
- z.l = p[n-1]
- z.ls = 2
- }
- if firstByte {
- n++
- }
- return
-}
-
-func (z *ioDecReader) ReadByte() (c byte, err error) {
- n, err := z.Read(z.b[:1])
- if n == 1 {
- c = z.b[0]
- if err == io.EOF {
- err = nil // read was successful, so postpone EOF (till next time)
- }
- }
- return
-}
-
-func (z *ioDecReader) UnreadByte() (err error) {
- switch z.ls {
- case 2:
- z.ls = 1
- case 0:
- err = errDecUnreadByteNothingToRead
- case 1:
- err = errDecUnreadByteLastByteNotRead
- default:
- err = errDecUnreadByteUnknown
- }
- return
-}
-
-func (z *ioDecReader) numread() int {
- return z.n
-}
-
-func (z *ioDecReader) readx(n int) (bs []byte) {
- if n <= 0 {
- return
- }
- if n < len(z.x) {
- bs = z.x[:n]
- } else {
- bs = make([]byte, n)
- }
- if _, err := decReadFull(z.rr, bs); err != nil {
- panic(err)
- }
- z.n += len(bs)
- if z.trb {
- z.tr = append(z.tr, bs...)
- }
- return
-}
-
-func (z *ioDecReader) readb(bs []byte) {
- // if len(bs) == 0 {
- // return
- // }
- if _, err := decReadFull(z.rr, bs); err != nil {
- panic(err)
- }
- z.n += len(bs)
- if z.trb {
- z.tr = append(z.tr, bs...)
- }
-}
-
-func (z *ioDecReader) readn1eof() (b uint8, eof bool) {
- b, err := z.br.ReadByte()
- if err == nil {
- z.n++
- if z.trb {
- z.tr = append(z.tr, b)
- }
- } else if err == io.EOF {
- eof = true
- } else {
- panic(err)
- }
- return
-}
-
-func (z *ioDecReader) readn1() (b uint8) {
- var err error
- if b, err = z.br.ReadByte(); err == nil {
- z.n++
- if z.trb {
- z.tr = append(z.tr, b)
- }
- return
- }
- panic(err)
-}
-
-func (z *ioDecReader) skip(accept *bitset256) (token byte) {
- for {
- var eof bool
- token, eof = z.readn1eof()
- if eof {
- return
- }
- if accept.isset(token) {
- continue
- }
- return
- }
-}
-
-func (z *ioDecReader) readTo(in []byte, accept *bitset256) (out []byte) {
- out = in
- for {
- token, eof := z.readn1eof()
- if eof {
- return
- }
- if accept.isset(token) {
- out = append(out, token)
- } else {
- z.unreadn1()
- return
- }
- }
-}
-
-func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) {
- out = in
- for {
- token, eof := z.readn1eof()
- if eof {
- panic(io.EOF)
- }
- out = append(out, token)
- if token == stop {
- return
- }
- }
-}
-
-func (z *ioDecReader) unreadn1() {
- err := z.br.UnreadByte()
- if err != nil {
- panic(err)
- }
- z.n--
- if z.trb {
- if l := len(z.tr) - 1; l >= 0 {
- z.tr = z.tr[:l]
- }
- }
-}
-
-func (z *ioDecReader) track() {
- if z.tr != nil {
- z.tr = z.tr[:0]
- }
- z.trb = true
-}
-
-func (z *ioDecReader) stopTrack() (bs []byte) {
- z.trb = false
- return z.tr
-}
-
-// ------------------------------------
-
-var errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read")
-
-// bytesDecReader is a decReader that reads off a byte slice with zero copying
-type bytesDecReader struct {
- b []byte // data
- c int // cursor
- a int // available
- t int // track start
-}
-
-func (z *bytesDecReader) reset(in []byte) {
- z.b = in
- z.a = len(in)
- z.c = 0
- z.t = 0
-}
-
-func (z *bytesDecReader) numread() int {
- return z.c
-}
-
-func (z *bytesDecReader) unreadn1() {
- if z.c == 0 || len(z.b) == 0 {
- panic(errBytesDecReaderCannotUnread)
- }
- z.c--
- z.a++
- return
-}
-
-func (z *bytesDecReader) readx(n int) (bs []byte) {
- // slicing from a non-constant start position is more expensive,
- // as more computation is required to decipher the pointer start position.
- // However, we do it only once, and it's better than reslicing both z.b and return value.
-
- if n <= 0 {
- } else if z.a == 0 {
- panic(io.EOF)
- } else if n > z.a {
- panic(io.ErrUnexpectedEOF)
- } else {
- c0 := z.c
- z.c = c0 + n
- z.a = z.a - n
- bs = z.b[c0:z.c]
- }
- return
-}
-
-func (z *bytesDecReader) readb(bs []byte) {
- copy(bs, z.readx(len(bs)))
-}
-
-func (z *bytesDecReader) readn1() (v uint8) {
- if z.a == 0 {
- panic(io.EOF)
- }
- v = z.b[z.c]
- z.c++
- z.a--
- return
-}
-
-// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) {
-// if z.a == 0 {
-// eof = true
-// return
-// }
-// v = z.b[z.c]
-// z.c++
-// z.a--
-// return
-// }
-
-func (z *bytesDecReader) skip(accept *bitset256) (token byte) {
- if z.a == 0 {
- return
- }
- blen := len(z.b)
- for i := z.c; i < blen; i++ {
- if !accept.isset(z.b[i]) {
- token = z.b[i]
- i++
- z.a -= (i - z.c)
- z.c = i
- return
- }
- }
- z.a, z.c = 0, blen
- return
-}
-
-func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) {
- if z.a == 0 {
- return
- }
- blen := len(z.b)
- for i := z.c; i < blen; i++ {
- if !accept.isset(z.b[i]) {
- out = z.b[z.c:i]
- z.a -= (i - z.c)
- z.c = i
- return
- }
- }
- out = z.b[z.c:]
- z.a, z.c = 0, blen
- return
-}
-
-func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) {
- if z.a == 0 {
- panic(io.EOF)
- }
- blen := len(z.b)
- for i := z.c; i < blen; i++ {
- if z.b[i] == stop {
- i++
- out = z.b[z.c:i]
- z.a -= (i - z.c)
- z.c = i
- return
- }
- }
- z.a, z.c = 0, blen
- panic(io.EOF)
-}
-
-func (z *bytesDecReader) track() {
- z.t = z.c
-}
-
-func (z *bytesDecReader) stopTrack() (bs []byte) {
- return z.b[z.t:z.c]
-}
-
-// ----------------------------------------
-
-// func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) {
-// d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv))
-// }
-
-func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) {
- d.d.DecodeExt(rv2i(rv), 0, nil)
-}
-
-func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) {
- d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn)
-}
-
-func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) {
- rv2i(rv).(Selfer).CodecDecodeSelf(d)
-}
-
-func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) {
- bm := rv2i(rv).(encoding.BinaryUnmarshaler)
- xbs := d.d.DecodeBytes(nil, true)
- if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil {
- panic(fnerr)
- }
-}
-
-func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) {
- tm := rv2i(rv).(encoding.TextUnmarshaler)
- fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes())
- if fnerr != nil {
- panic(fnerr)
- }
-}
-
-func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) {
- tm := rv2i(rv).(jsonUnmarshaler)
- // bs := d.d.DecodeBytes(d.b[:], true, true)
- // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
- fnerr := tm.UnmarshalJSON(d.nextValueBytes())
- if fnerr != nil {
- panic(fnerr)
- }
-}
-
-func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) {
- d.errorf("no decoding function defined for kind %v", rv.Kind())
-}
-
-// var kIntfCtr uint64
-
-func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) {
- // nil interface:
- // use some hieristics to decode it appropriately
- // based on the detected next value in the stream.
- n := d.naked()
- d.d.DecodeNaked()
- if n.v == valueTypeNil {
- return
- }
- // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader).
- if f.ti.numMeth > 0 {
- d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth)
- return
- }
- // var useRvn bool
- switch n.v {
- case valueTypeMap:
- // if json, default to a map type with string keys
- mtid := d.mtid
- if mtid == 0 {
- if d.jsms {
- mtid = mapStrIntfTypId
- } else {
- mtid = mapIntfIntfTypId
- }
- }
- if mtid == mapIntfIntfTypId {
- n.initContainers()
- if n.lm < arrayCacheLen {
- n.ma[n.lm] = nil
- rvn = n.rma[n.lm]
- n.lm++
- d.decode(&n.ma[n.lm-1])
- n.lm--
- } else {
- var v2 map[interface{}]interface{}
- d.decode(&v2)
- rvn = reflect.ValueOf(&v2).Elem()
- }
- } else if mtid == mapStrIntfTypId { // for json performance
- n.initContainers()
- if n.ln < arrayCacheLen {
- n.na[n.ln] = nil
- rvn = n.rna[n.ln]
- n.ln++
- d.decode(&n.na[n.ln-1])
- n.ln--
- } else {
- var v2 map[string]interface{}
- d.decode(&v2)
- rvn = reflect.ValueOf(&v2).Elem()
- }
- } else {
- if d.mtr {
- rvn = reflect.New(d.h.MapType)
- d.decode(rv2i(rvn))
- rvn = rvn.Elem()
- } else {
- rvn = reflect.New(d.h.MapType).Elem()
- d.decodeValue(rvn, nil, true)
- }
- }
- case valueTypeArray:
- if d.stid == 0 || d.stid == intfSliceTypId {
- n.initContainers()
- if n.ls < arrayCacheLen {
- n.sa[n.ls] = nil
- rvn = n.rsa[n.ls]
- n.ls++
- d.decode(&n.sa[n.ls-1])
- n.ls--
- } else {
- var v2 []interface{}
- d.decode(&v2)
- rvn = reflect.ValueOf(&v2).Elem()
- }
- if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice {
- rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem()
- reflect.Copy(rvn2, rvn)
- rvn = rvn2
- }
- } else {
- if d.str {
- rvn = reflect.New(d.h.SliceType)
- d.decode(rv2i(rvn))
- rvn = rvn.Elem()
- } else {
- rvn = reflect.New(d.h.SliceType).Elem()
- d.decodeValue(rvn, nil, true)
- }
- }
- case valueTypeExt:
- var v interface{}
- tag, bytes := n.u, n.l // calling decode below might taint the values
- if bytes == nil {
- n.initContainers()
- if n.li < arrayCacheLen {
- n.ia[n.li] = nil
- n.li++
- d.decode(&n.ia[n.li-1])
- // v = *(&n.ia[l])
- n.li--
- v = n.ia[n.li]
- n.ia[n.li] = nil
- } else {
- d.decode(&v)
- }
- }
- bfn := d.h.getExtForTag(tag)
- if bfn == nil {
- var re RawExt
- re.Tag = tag
- re.Data = detachZeroCopyBytes(d.bytes, nil, bytes)
- re.Value = v
- rvn = reflect.ValueOf(&re).Elem()
- } else {
- rvnA := reflect.New(bfn.rt)
- if bytes != nil {
- bfn.ext.ReadExt(rv2i(rvnA), bytes)
- } else {
- bfn.ext.UpdateExt(rv2i(rvnA), v)
- }
- rvn = rvnA.Elem()
- }
- case valueTypeNil:
- // no-op
- case valueTypeInt:
- rvn = n.ri
- case valueTypeUint:
- rvn = n.ru
- case valueTypeFloat:
- rvn = n.rf
- case valueTypeBool:
- rvn = n.rb
- case valueTypeString, valueTypeSymbol:
- rvn = n.rs
- case valueTypeBytes:
- rvn = n.rl
- case valueTypeTime:
- rvn = n.rt
- default:
- panicv.errorf("kInterfaceNaked: unexpected valueType: %d", n.v)
- }
- return
-}
-
-func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) {
- // Note:
- // A consequence of how kInterface works, is that
- // if an interface already contains something, we try
- // to decode into what was there before.
- // We do not replace with a generic value (as got from decodeNaked).
-
- // every interface passed here MUST be settable.
- var rvn reflect.Value
- if rv.IsNil() || d.h.InterfaceReset {
- // check if mapping to a type: if so, initialize it and move on
- rvn = d.h.intf2impl(f.ti.rtid)
- if rvn.IsValid() {
- rv.Set(rvn)
- } else {
- rvn = d.kInterfaceNaked(f)
- if rvn.IsValid() {
- rv.Set(rvn)
- } else if d.h.InterfaceReset {
- // reset to zero value based on current type in there.
- rv.Set(reflect.Zero(rv.Elem().Type()))
- }
- return
- }
- } else {
- // now we have a non-nil interface value, meaning it contains a type
- rvn = rv.Elem()
- }
- if d.d.TryDecodeAsNil() {
- rv.Set(reflect.Zero(rvn.Type()))
- return
- }
-
- // Note: interface{} is settable, but underlying type may not be.
- // Consequently, we MAY have to create a decodable value out of the underlying value,
- // decode into it, and reset the interface itself.
- // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type())
-
- rvn2, canDecode := isDecodeable(rvn)
- if canDecode {
- d.decodeValue(rvn2, nil, true)
- return
- }
-
- rvn2 = reflect.New(rvn.Type()).Elem()
- rvn2.Set(rvn)
- d.decodeValue(rvn2, nil, true)
- rv.Set(rvn2)
-}
-
-func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) {
- // use if-else-if, not switch (which compiles to binary-search)
- // since keyType is typically valueTypeString, branch prediction is pretty good.
-
- if keyType == valueTypeString {
- rvkencname = dd.DecodeStringAsBytes()
- } else if keyType == valueTypeInt {
- rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10)
- } else if keyType == valueTypeUint {
- rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10)
- } else if keyType == valueTypeFloat {
- rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64)
- } else {
- rvkencname = dd.DecodeStringAsBytes()
- }
- return rvkencname
-}
-
-func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) {
- fti := f.ti
- dd := d.d
- elemsep := d.esep
- sfn := structFieldNode{v: rv, update: true}
- ctyp := dd.ContainerType()
- if ctyp == valueTypeMap {
- containerLen := dd.ReadMapStart()
- if containerLen == 0 {
- dd.ReadMapEnd()
- return
- }
- tisfi := fti.sfiSort
- hasLen := containerLen >= 0
-
- var rvkencname []byte
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if elemsep {
- dd.ReadMapElemKey()
- }
- rvkencname = decStructFieldKey(dd, fti.keyType, &d.b)
- if elemsep {
- dd.ReadMapElemValue()
- }
- if k := fti.indexForEncName(rvkencname); k > -1 {
- si := tisfi[k]
- if dd.TryDecodeAsNil() {
- si.setToZeroValue(rv)
- } else {
- d.decodeValue(sfn.field(si), nil, true)
- }
- } else {
- d.structFieldNotFound(-1, stringView(rvkencname))
- }
- // keepAlive4StringView(rvkencnameB) // not needed, as reference is outside loop
- }
- dd.ReadMapEnd()
- } else if ctyp == valueTypeArray {
- containerLen := dd.ReadArrayStart()
- if containerLen == 0 {
- dd.ReadArrayEnd()
- return
- }
- // Not much gain from doing it two ways for array.
- // Arrays are not used as much for structs.
- hasLen := containerLen >= 0
- for j, si := range fti.sfiSrc {
- if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) {
- break
- }
- if elemsep {
- dd.ReadArrayElem()
- }
- if dd.TryDecodeAsNil() {
- si.setToZeroValue(rv)
- } else {
- d.decodeValue(sfn.field(si), nil, true)
- }
- }
- if containerLen > len(fti.sfiSrc) {
- // read remaining values and throw away
- for j := len(fti.sfiSrc); j < containerLen; j++ {
- if elemsep {
- dd.ReadArrayElem()
- }
- d.structFieldNotFound(j, "")
- }
- }
- dd.ReadArrayEnd()
- } else {
- d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct)
- return
- }
-}
-
-func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
- // A slice can be set from a map or array in stream.
- // This way, the order can be kept (as order is lost with map).
- ti := f.ti
- if f.seq == seqTypeChan && ti.chandir&uint8(reflect.SendDir) == 0 {
- d.errorf("receive-only channel cannot be decoded")
- }
- dd := d.d
- rtelem0 := ti.elem
- ctyp := dd.ContainerType()
- if ctyp == valueTypeBytes || ctyp == valueTypeString {
- // you can only decode bytes or string in the stream into a slice or array of bytes
- if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) {
- d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt)
- }
- if f.seq == seqTypeChan {
- bs2 := dd.DecodeBytes(nil, true)
- irv := rv2i(rv)
- ch, ok := irv.(chan<- byte)
- if !ok {
- ch = irv.(chan byte)
- }
- for _, b := range bs2 {
- ch <- b
- }
- } else {
- rvbs := rv.Bytes()
- bs2 := dd.DecodeBytes(rvbs, false)
- // if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) {
- if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) {
- if rv.CanSet() {
- rv.SetBytes(bs2)
- } else if len(rvbs) > 0 && len(bs2) > 0 {
- copy(rvbs, bs2)
- }
- }
- }
- return
- }
-
- // array := f.seq == seqTypeChan
-
- slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map)
-
- // an array can never return a nil slice. so no need to check f.array here.
- if containerLenS == 0 {
- if rv.CanSet() {
- if f.seq == seqTypeSlice {
- if rv.IsNil() {
- rv.Set(reflect.MakeSlice(ti.rt, 0, 0))
- } else {
- rv.SetLen(0)
- }
- } else if f.seq == seqTypeChan {
- if rv.IsNil() {
- rv.Set(reflect.MakeChan(ti.rt, 0))
- }
- }
- }
- slh.End()
- return
- }
-
- rtelem0Size := int(rtelem0.Size())
- rtElem0Kind := rtelem0.Kind()
- rtelem0Mut := !isImmutableKind(rtElem0Kind)
- rtelem := rtelem0
- rtelemkind := rtelem.Kind()
- for rtelemkind == reflect.Ptr {
- rtelem = rtelem.Elem()
- rtelemkind = rtelem.Kind()
- }
-
- var fn *codecFn
-
- var rvCanset = rv.CanSet()
- var rvChanged bool
- var rv0 = rv
- var rv9 reflect.Value
-
- rvlen := rv.Len()
- rvcap := rv.Cap()
- hasLen := containerLenS > 0
- if hasLen && f.seq == seqTypeSlice {
- if containerLenS > rvcap {
- oldRvlenGtZero := rvlen > 0
- rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size()))
- if rvlen <= rvcap {
- if rvCanset {
- rv.SetLen(rvlen)
- }
- } else if rvCanset {
- rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
- rvcap = rvlen
- rvChanged = true
- } else {
- d.errorf("cannot decode into non-settable slice")
- }
- if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) {
- reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap)
- }
- } else if containerLenS != rvlen {
- rvlen = containerLenS
- if rvCanset {
- rv.SetLen(rvlen)
- }
- // else {
- // rv = rv.Slice(0, rvlen)
- // rvChanged = true
- // d.errorf("cannot decode into non-settable slice")
- // }
- }
- }
-
- // consider creating new element once, and just decoding into it.
- var rtelem0Zero reflect.Value
- var rtelem0ZeroValid bool
- var decodeAsNil bool
- var j int
- d.cfer()
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() {
- if hasLen {
- rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size)
- } else if f.seq == seqTypeSlice {
- rvlen = decDefSliceCap
- } else {
- rvlen = decDefChanCap
- }
- if rvCanset {
- if f.seq == seqTypeSlice {
- rv = reflect.MakeSlice(ti.rt, rvlen, rvlen)
- rvChanged = true
- } else { // chan
- // xdebugf(">>>>>> haslen = %v, make chan of type '%v' with length: %v", hasLen, ti.rt, rvlen)
- rv = reflect.MakeChan(ti.rt, rvlen)
- rvChanged = true
- }
- } else {
- d.errorf("cannot decode into non-settable slice")
- }
- }
- slh.ElemContainerState(j)
- decodeAsNil = dd.TryDecodeAsNil()
- if f.seq == seqTypeChan {
- if decodeAsNil {
- rv.Send(reflect.Zero(rtelem0))
- continue
- }
- if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) {
- rv9 = reflect.New(rtelem0).Elem()
- }
- if fn == nil {
- fn = d.cf.get(rtelem, true, true)
- }
- d.decodeValue(rv9, fn, true)
- // xdebugf(">>>> rv9 sent on %v during decode: %v, with len=%v, cap=%v", rv.Type(), rv9, rv.Len(), rv.Cap())
- rv.Send(rv9)
- } else {
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= rvlen {
- if f.seq == seqTypeArray {
- d.arrayCannotExpand(rvlen, j+1)
- decodeIntoBlank = true
- } else { // if f.seq == seqTypeSlice
- // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // append logic + varargs
- var rvcap2 int
- var rvErrmsg2 string
- rv9, rvcap2, rvChanged, rvErrmsg2 =
- expandSliceRV(rv, ti.rt, rvCanset, rtelem0Size, 1, rvlen, rvcap)
- if rvErrmsg2 != "" {
- d.errorf(rvErrmsg2)
- }
- rvlen++
- if rvChanged {
- rv = rv9
- rvcap = rvcap2
- }
- }
- }
- if decodeIntoBlank {
- if !decodeAsNil {
- d.swallow()
- }
- } else {
- rv9 = rv.Index(j)
- if d.h.SliceElementReset || decodeAsNil {
- if !rtelem0ZeroValid {
- rtelem0ZeroValid = true
- rtelem0Zero = reflect.Zero(rtelem0)
- }
- rv9.Set(rtelem0Zero)
- }
- if decodeAsNil {
- continue
- }
-
- if fn == nil {
- fn = d.cf.get(rtelem, true, true)
- }
- d.decodeValue(rv9, fn, true)
- }
- }
- }
- if f.seq == seqTypeSlice {
- if j < rvlen {
- if rv.CanSet() {
- rv.SetLen(j)
- } else if rvCanset {
- rv = rv.Slice(0, j)
- rvChanged = true
- } // else { d.errorf("kSlice: cannot change non-settable slice") }
- rvlen = j
- } else if j == 0 && rv.IsNil() {
- if rvCanset {
- rv = reflect.MakeSlice(ti.rt, 0, 0)
- rvChanged = true
- } // else { d.errorf("kSlice: cannot change non-settable slice") }
- }
- }
- slh.End()
-
- if rvChanged { // infers rvCanset=true, so it can be reset
- rv0.Set(rv)
- }
-}
-
-// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) {
-// // d.decodeValueFn(rv.Slice(0, rv.Len()))
-// f.kSlice(rv.Slice(0, rv.Len()))
-// }
-
-func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
- dd := d.d
- containerLen := dd.ReadMapStart()
- elemsep := d.esep
- ti := f.ti
- if rv.IsNil() {
- rv.Set(makeMapReflect(ti.rt, containerLen))
- }
-
- if containerLen == 0 {
- dd.ReadMapEnd()
- return
- }
-
- ktype, vtype := ti.key, ti.elem
- ktypeId := rt2id(ktype)
- vtypeKind := vtype.Kind()
-
- var keyFn, valFn *codecFn
- var ktypeLo, vtypeLo reflect.Type
-
- for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() {
- }
-
- for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() {
- }
-
- var mapGet, mapSet bool
- rvvImmut := isImmutableKind(vtypeKind)
- if !d.h.MapValueReset {
- // if pointer, mapGet = true
- // if interface, mapGet = true if !DecodeNakedAlways (else false)
- // if builtin, mapGet = false
- // else mapGet = true
- if vtypeKind == reflect.Ptr {
- mapGet = true
- } else if vtypeKind == reflect.Interface {
- if !d.h.InterfaceReset {
- mapGet = true
- }
- } else if !rvvImmut {
- mapGet = true
- }
- }
-
- var rvk, rvkp, rvv, rvz reflect.Value
- rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk.
- ktypeIsString := ktypeId == stringTypId
- ktypeIsIntf := ktypeId == intfTypId
- hasLen := containerLen > 0
- var kstrbs []byte
- d.cfer()
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if rvkMut || !rvkp.IsValid() {
- rvkp = reflect.New(ktype)
- rvk = rvkp.Elem()
- }
- if elemsep {
- dd.ReadMapElemKey()
- }
- if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block
- // Previously, if a nil key, we just ignored the mapped value and continued.
- // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil}
- // to be an empty map.
- // Instead, we treat a nil key as the zero value of the type.
- rvk.Set(reflect.Zero(ktype))
- } else if ktypeIsString {
- kstrbs = dd.DecodeStringAsBytes()
- rvk.SetString(stringView(kstrbs))
- // NOTE: if doing an insert, you MUST use a real string (not stringview)
- } else {
- if keyFn == nil {
- keyFn = d.cf.get(ktypeLo, true, true)
- }
- d.decodeValue(rvk, keyFn, true)
- }
- // special case if a byte array.
- if ktypeIsIntf {
- if rvk2 := rvk.Elem(); rvk2.IsValid() {
- if rvk2.Type() == uint8SliceTyp {
- rvk = reflect.ValueOf(d.string(rvk2.Bytes()))
- } else {
- rvk = rvk2
- }
- }
- }
-
- if elemsep {
- dd.ReadMapElemValue()
- }
-
- // Brittle, but OK per TryDecodeAsNil() contract.
- // i.e. TryDecodeAsNil never shares slices with other decDriver procedures
- if dd.TryDecodeAsNil() {
- if ktypeIsString {
- rvk.SetString(d.string(kstrbs))
- }
- if d.h.DeleteOnNilMapValue {
- rv.SetMapIndex(rvk, reflect.Value{})
- } else {
- rv.SetMapIndex(rvk, reflect.Zero(vtype))
- }
- continue
- }
-
- mapSet = true // set to false if u do a get, and its a non-nil pointer
- if mapGet {
- // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable.
- rvv = rv.MapIndex(rvk)
- if !rvv.IsValid() {
- rvv = reflect.New(vtype).Elem()
- } else if vtypeKind == reflect.Ptr {
- if rvv.IsNil() {
- rvv = reflect.New(vtype).Elem()
- } else {
- mapSet = false
- }
- } else if vtypeKind == reflect.Interface {
- // not addressable, and thus not settable.
- // e MUST create a settable/addressable variant
- rvv2 := reflect.New(rvv.Type()).Elem()
- if !rvv.IsNil() {
- rvv2.Set(rvv)
- }
- rvv = rvv2
- }
- // else it is ~mutable, and we can just decode into it directly
- } else if rvvImmut {
- if !rvz.IsValid() {
- rvz = reflect.New(vtype).Elem()
- }
- rvv = rvz
- } else {
- rvv = reflect.New(vtype).Elem()
- }
-
- // We MUST be done with the stringview of the key, before decoding the value
- // so that we don't bastardize the reused byte array.
- if mapSet && ktypeIsString {
- rvk.SetString(d.string(kstrbs))
- }
- if valFn == nil {
- valFn = d.cf.get(vtypeLo, true, true)
- }
- d.decodeValue(rvv, valFn, true)
- // d.decodeValueFn(rvv, valFn)
- if mapSet {
- rv.SetMapIndex(rvk, rvv)
- }
- // if ktypeIsString {
- // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop
- // }
- }
-
- dd.ReadMapEnd()
-}
-
-// decNaked is used to keep track of the primitives decoded.
-// Without it, we would have to decode each primitive and wrap it
-// in an interface{}, causing an allocation.
-// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
-// so we can rest assured that no other decoding happens while these
-// primitives are being decoded.
-//
-// maps and arrays are not handled by this mechanism.
-// However, RawExt is, and we accommodate for extensions that decode
-// RawExt from DecodeNaked, but need to decode the value subsequently.
-// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
-//
-// However, decNaked also keeps some arrays of default maps and slices
-// used in DecodeNaked. This way, we can get a pointer to it
-// without causing a new heap allocation.
-//
-// kInterfaceNaked will ensure that there is no allocation for the common
-// uses.
-
-type decNakedContainers struct {
- // array/stacks for reducing allocation
- // keep arrays at the bottom? Chance is that they are not used much.
- ia [arrayCacheLen]interface{}
- ma [arrayCacheLen]map[interface{}]interface{}
- na [arrayCacheLen]map[string]interface{}
- sa [arrayCacheLen][]interface{}
-
- // ria [arrayCacheLen]reflect.Value // not needed, as we decode directly into &ia[n]
- rma, rna, rsa [arrayCacheLen]reflect.Value // reflect.Value mapping to above
-}
-
-func (n *decNakedContainers) init() {
- for i := 0; i < arrayCacheLen; i++ {
- // n.ria[i] = reflect.ValueOf(&(n.ia[i])).Elem()
- n.rma[i] = reflect.ValueOf(&(n.ma[i])).Elem()
- n.rna[i] = reflect.ValueOf(&(n.na[i])).Elem()
- n.rsa[i] = reflect.ValueOf(&(n.sa[i])).Elem()
- }
-}
-
-type decNaked struct {
- // r RawExt // used for RawExt, uint, []byte.
-
- // primitives below
- u uint64
- i int64
- f float64
- l []byte
- s string
-
- // ---- cpu cache line boundary?
- t time.Time
- b bool
-
- // state
- v valueType
- li, lm, ln, ls int8
- inited bool
-
- *decNakedContainers
-
- ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above
-
- // _ [6]uint64 // padding // no padding - rt goes into next cache line
-}
-
-func (n *decNaked) init() {
- if n.inited {
- return
- }
- n.ru = reflect.ValueOf(&n.u).Elem()
- n.ri = reflect.ValueOf(&n.i).Elem()
- n.rf = reflect.ValueOf(&n.f).Elem()
- n.rl = reflect.ValueOf(&n.l).Elem()
- n.rs = reflect.ValueOf(&n.s).Elem()
- n.rt = reflect.ValueOf(&n.t).Elem()
- n.rb = reflect.ValueOf(&n.b).Elem()
-
- n.inited = true
- // n.rr[] = reflect.ValueOf(&n.)
-}
-
-func (n *decNaked) initContainers() {
- if n.decNakedContainers == nil {
- n.decNakedContainers = new(decNakedContainers)
- n.decNakedContainers.init()
- }
-}
-
-func (n *decNaked) reset() {
- if n == nil {
- return
- }
- n.li, n.lm, n.ln, n.ls = 0, 0, 0, 0
-}
-
-type rtid2rv struct {
- rtid uintptr
- rv reflect.Value
-}
-
-// --------------
-
-type decReaderSwitch struct {
- rb bytesDecReader
- // ---- cpu cache line boundary?
- ri *ioDecReader
- mtr, str bool // whether maptype or slicetype are known types
-
- be bool // is binary encoding
- bytes bool // is bytes reader
- js bool // is json handle
- jsms bool // is json handle, and MapKeyAsString
- esep bool // has elem separators
-}
-
-// TODO: Uncomment after mid-stack inlining enabled in go 1.11
-//
-// func (z *decReaderSwitch) unreadn1() {
-// if z.bytes {
-// z.rb.unreadn1()
-// } else {
-// z.ri.unreadn1()
-// }
-// }
-// func (z *decReaderSwitch) readx(n int) []byte {
-// if z.bytes {
-// return z.rb.readx(n)
-// }
-// return z.ri.readx(n)
-// }
-// func (z *decReaderSwitch) readb(s []byte) {
-// if z.bytes {
-// z.rb.readb(s)
-// } else {
-// z.ri.readb(s)
-// }
-// }
-// func (z *decReaderSwitch) readn1() uint8 {
-// if z.bytes {
-// return z.rb.readn1()
-// }
-// return z.ri.readn1()
-// }
-// func (z *decReaderSwitch) numread() int {
-// if z.bytes {
-// return z.rb.numread()
-// }
-// return z.ri.numread()
-// }
-// func (z *decReaderSwitch) track() {
-// if z.bytes {
-// z.rb.track()
-// } else {
-// z.ri.track()
-// }
-// }
-// func (z *decReaderSwitch) stopTrack() []byte {
-// if z.bytes {
-// return z.rb.stopTrack()
-// }
-// return z.ri.stopTrack()
-// }
-// func (z *decReaderSwitch) skip(accept *bitset256) (token byte) {
-// if z.bytes {
-// return z.rb.skip(accept)
-// }
-// return z.ri.skip(accept)
-// }
-// func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) {
-// if z.bytes {
-// return z.rb.readTo(in, accept)
-// }
-// return z.ri.readTo(in, accept)
-// }
-// func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) {
-// if z.bytes {
-// return z.rb.readUntil(in, stop)
-// }
-// return z.ri.readUntil(in, stop)
-// }
-
-// A Decoder reads and decodes an object from an input stream in the codec format.
-type Decoder struct {
- panicHdl
- // hopefully, reduce derefencing cost by laying the decReader inside the Decoder.
- // Try to put things that go together to fit within a cache line (8 words).
-
- d decDriver
- // NOTE: Decoder shouldn't call it's read methods,
- // as the handler MAY need to do some coordination.
- r decReader
- h *BasicHandle
- bi *bufioDecReader
- // cache the mapTypeId and sliceTypeId for faster comparisons
- mtid uintptr
- stid uintptr
-
- // ---- cpu cache line boundary?
- decReaderSwitch
-
- // ---- cpu cache line boundary?
- codecFnPooler
- // cr containerStateRecv
- n *decNaked
- nsp *sync.Pool
- err error
-
- // ---- cpu cache line boundary?
- b [decScratchByteArrayLen]byte // scratch buffer, used by Decoder and xxxEncDrivers
- is map[string]string // used for interning strings
-
- // padding - false sharing help // modify 232 if Decoder struct changes.
- // _ [cacheLineSize - 232%cacheLineSize]byte
-}
-
-// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
-//
-// For efficiency, Users are encouraged to pass in a memory buffered reader
-// (eg bufio.Reader, bytes.Buffer).
-func NewDecoder(r io.Reader, h Handle) *Decoder {
- d := newDecoder(h)
- d.Reset(r)
- return d
-}
-
-// NewDecoderBytes returns a Decoder which efficiently decodes directly
-// from a byte slice with zero copying.
-func NewDecoderBytes(in []byte, h Handle) *Decoder {
- d := newDecoder(h)
- d.ResetBytes(in)
- return d
-}
-
-var defaultDecNaked decNaked
-
-func newDecoder(h Handle) *Decoder {
- d := &Decoder{h: h.getBasicHandle(), err: errDecoderNotInitialized}
- d.hh = h
- d.be = h.isBinary()
- // NOTE: do not initialize d.n here. It is lazily initialized in d.naked()
- var jh *JsonHandle
- jh, d.js = h.(*JsonHandle)
- if d.js {
- d.jsms = jh.MapKeyAsString
- }
- d.esep = d.hh.hasElemSeparators()
- if d.h.InternString {
- d.is = make(map[string]string, 32)
- }
- d.d = h.newDecDriver(d)
- // d.cr, _ = d.d.(containerStateRecv)
- return d
-}
-
-func (d *Decoder) resetCommon() {
- d.n.reset()
- d.d.reset()
- d.err = nil
- // reset all things which were cached from the Handle, but could change
- d.mtid, d.stid = 0, 0
- d.mtr, d.str = false, false
- if d.h.MapType != nil {
- d.mtid = rt2id(d.h.MapType)
- d.mtr = fastpathAV.index(d.mtid) != -1
- }
- if d.h.SliceType != nil {
- d.stid = rt2id(d.h.SliceType)
- d.str = fastpathAV.index(d.stid) != -1
- }
-}
-
-// Reset the Decoder with a new Reader to decode from,
-// clearing all state from last run(s).
-func (d *Decoder) Reset(r io.Reader) {
- if r == nil {
- return
- }
- if d.bi == nil {
- d.bi = new(bufioDecReader)
- }
- d.bytes = false
- if d.h.ReaderBufferSize > 0 {
- d.bi.buf = make([]byte, 0, d.h.ReaderBufferSize)
- d.bi.reset(r)
- d.r = d.bi
- } else {
- // d.ri.x = &d.b
- // d.s = d.sa[:0]
- if d.ri == nil {
- d.ri = new(ioDecReader)
- }
- d.ri.reset(r)
- d.r = d.ri
- }
- d.resetCommon()
-}
-
-// ResetBytes resets the Decoder with a new []byte to decode from,
-// clearing all state from last run(s).
-func (d *Decoder) ResetBytes(in []byte) {
- if in == nil {
- return
- }
- d.bytes = true
- d.rb.reset(in)
- d.r = &d.rb
- d.resetCommon()
-}
-
-// naked must be called before each call to .DecodeNaked,
-// as they will use it.
-func (d *Decoder) naked() *decNaked {
- if d.n == nil {
- // consider one of:
- // - get from sync.Pool (if GC is frequent, there's no value here)
- // - new alloc (safest. only init'ed if it a naked decode will be done)
- // - field in Decoder (makes the Decoder struct very big)
- // To support using a decoder where a DecodeNaked is not needed,
- // we prefer #1 or #2.
- // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool
- // d.n.init()
- var v interface{}
- d.nsp, v = pool.decNaked()
- d.n = v.(*decNaked)
- }
- return d.n
-}
-
-// Decode decodes the stream from reader and stores the result in the
-// value pointed to by v. v cannot be a nil pointer. v can also be
-// a reflect.Value of a pointer.
-//
-// Note that a pointer to a nil interface is not a nil pointer.
-// If you do not know what type of stream it is, pass in a pointer to a nil interface.
-// We will decode and store a value in that nil interface.
-//
-// Sample usages:
-// // Decoding into a non-nil typed value
-// var f float32
-// err = codec.NewDecoder(r, handle).Decode(&f)
-//
-// // Decoding into nil interface
-// var v interface{}
-// dec := codec.NewDecoder(r, handle)
-// err = dec.Decode(&v)
-//
-// When decoding into a nil interface{}, we will decode into an appropriate value based
-// on the contents of the stream:
-// - Numbers are decoded as float64, int64 or uint64.
-// - Other values are decoded appropriately depending on the type:
-// bool, string, []byte, time.Time, etc
-// - Extensions are decoded as RawExt (if no ext function registered for the tag)
-// Configurations exist on the Handle to override defaults
-// (e.g. for MapType, SliceType and how to decode raw bytes).
-//
-// When decoding into a non-nil interface{} value, the mode of encoding is based on the
-// type of the value. When a value is seen:
-// - If an extension is registered for it, call that extension function
-// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error
-// - Else decode it based on its reflect.Kind
-//
-// There are some special rules when decoding into containers (slice/array/map/struct).
-// Decode will typically use the stream contents to UPDATE the container.
-// - A map can be decoded from a stream map, by updating matching keys.
-// - A slice can be decoded from a stream array,
-// by updating the first n elements, where n is length of the stream.
-// - A slice can be decoded from a stream map, by decoding as if
-// it contains a sequence of key-value pairs.
-// - A struct can be decoded from a stream map, by updating matching fields.
-// - A struct can be decoded from a stream array,
-// by updating fields as they occur in the struct (by index).
-//
-// When decoding a stream map or array with length of 0 into a nil map or slice,
-// we reset the destination map or slice to a zero-length value.
-//
-// However, when decoding a stream nil, we reset the destination container
-// to its "zero" value (e.g. nil for slice/map, etc).
-//
-// Note: we allow nil values in the stream anywhere except for map keys.
-// A nil value in the encoded stream where a map key is expected is treated as an error.
-func (d *Decoder) Decode(v interface{}) (err error) {
- defer d.deferred(&err)
- d.MustDecode(v)
- return
-}
-
-// MustDecode is like Decode, but panics if unable to Decode.
-// This provides insight to the code location that triggered the error.
-func (d *Decoder) MustDecode(v interface{}) {
- // TODO: Top-level: ensure that v is a pointer and not nil.
- if d.err != nil {
- panic(d.err)
- }
- if d.d.TryDecodeAsNil() {
- setZero(v)
- } else {
- d.decode(v)
- }
- d.alwaysAtEnd()
- // xprintf(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn)
-}
-
-func (d *Decoder) deferred(err1 *error) {
- d.alwaysAtEnd()
- if recoverPanicToErr {
- if x := recover(); x != nil {
- panicValToErr(d, x, err1)
- panicValToErr(d, x, &d.err)
- }
- }
-}
-
-func (d *Decoder) alwaysAtEnd() {
- if d.n != nil {
- // if n != nil, then nsp != nil (they are always set together)
- d.nsp.Put(d.n)
- d.n, d.nsp = nil, nil
- }
- d.codecFnPooler.alwaysAtEnd()
-}
-
-// // this is not a smart swallow, as it allocates objects and does unnecessary work.
-// func (d *Decoder) swallowViaHammer() {
-// var blank interface{}
-// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem())
-// }
-
-func (d *Decoder) swallow() {
- // smarter decode that just swallows the content
- dd := d.d
- if dd.TryDecodeAsNil() {
- return
- }
- elemsep := d.esep
- switch dd.ContainerType() {
- case valueTypeMap:
- containerLen := dd.ReadMapStart()
- hasLen := containerLen >= 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break}
- if elemsep {
- dd.ReadMapElemKey()
- }
- d.swallow()
- if elemsep {
- dd.ReadMapElemValue()
- }
- d.swallow()
- }
- dd.ReadMapEnd()
- case valueTypeArray:
- containerLen := dd.ReadArrayStart()
- hasLen := containerLen >= 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if elemsep {
- dd.ReadArrayElem()
- }
- d.swallow()
- }
- dd.ReadArrayEnd()
- case valueTypeBytes:
- dd.DecodeBytes(d.b[:], true)
- case valueTypeString:
- dd.DecodeStringAsBytes()
- default:
- // these are all primitives, which we can get from decodeNaked
- // if RawExt using Value, complete the processing.
- n := d.naked()
- dd.DecodeNaked()
- if n.v == valueTypeExt && n.l == nil {
- n.initContainers()
- if n.li < arrayCacheLen {
- n.ia[n.li] = nil
- n.li++
- d.decode(&n.ia[n.li-1])
- n.ia[n.li-1] = nil
- n.li--
- } else {
- var v2 interface{}
- d.decode(&v2)
- }
- }
- }
-}
-
-func setZero(iv interface{}) {
- if iv == nil || definitelyNil(iv) {
- return
- }
- var canDecode bool
- switch v := iv.(type) {
- case *string:
- *v = ""
- case *bool:
- *v = false
- case *int:
- *v = 0
- case *int8:
- *v = 0
- case *int16:
- *v = 0
- case *int32:
- *v = 0
- case *int64:
- *v = 0
- case *uint:
- *v = 0
- case *uint8:
- *v = 0
- case *uint16:
- *v = 0
- case *uint32:
- *v = 0
- case *uint64:
- *v = 0
- case *float32:
- *v = 0
- case *float64:
- *v = 0
- case *[]uint8:
- *v = nil
- case *Raw:
- *v = nil
- case *time.Time:
- *v = time.Time{}
- case reflect.Value:
- if v, canDecode = isDecodeable(v); canDecode && v.CanSet() {
- v.Set(reflect.Zero(v.Type()))
- } // TODO: else drain if chan, clear if map, set all to nil if slice???
- default:
- if !fastpathDecodeSetZeroTypeSwitch(iv) {
- v := reflect.ValueOf(iv)
- if v, canDecode = isDecodeable(v); canDecode && v.CanSet() {
- v.Set(reflect.Zero(v.Type()))
- } // TODO: else drain if chan, clear if map, set all to nil if slice???
- }
- }
-}
-
-func (d *Decoder) decode(iv interface{}) {
- // check nil and interfaces explicitly,
- // so that type switches just have a run of constant non-interface types.
- if iv == nil {
- d.errorstr(errstrCannotDecodeIntoNil)
- return
- }
- if v, ok := iv.(Selfer); ok {
- v.CodecDecodeSelf(d)
- return
- }
-
- switch v := iv.(type) {
- // case nil:
- // case Selfer:
-
- case reflect.Value:
- v = d.ensureDecodeable(v)
- d.decodeValue(v, nil, true)
-
- case *string:
- *v = d.d.DecodeString()
- case *bool:
- *v = d.d.DecodeBool()
- case *int:
- *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
- case *int8:
- *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8))
- case *int16:
- *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16))
- case *int32:
- *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
- case *int64:
- *v = d.d.DecodeInt64()
- case *uint:
- *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
- case *uint8:
- *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
- case *uint16:
- *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))
- case *uint32:
- *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))
- case *uint64:
- *v = d.d.DecodeUint64()
- case *float32:
- f64 := d.d.DecodeFloat64()
- if chkOvf.Float32(f64) {
- d.errorf("float32 overflow: %v", f64)
- }
- *v = float32(f64)
- case *float64:
- *v = d.d.DecodeFloat64()
- case *[]uint8:
- *v = d.d.DecodeBytes(*v, false)
- case []uint8:
- b := d.d.DecodeBytes(v, false)
- if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) {
- copy(v, b)
- }
- case *time.Time:
- *v = d.d.DecodeTime()
- case *Raw:
- *v = d.rawBytes()
-
- case *interface{}:
- d.decodeValue(reflect.ValueOf(iv).Elem(), nil, true)
- // d.decodeValueNotNil(reflect.ValueOf(iv).Elem())
-
- default:
- if !fastpathDecodeTypeSwitch(iv, d) {
- v := reflect.ValueOf(iv)
- v = d.ensureDecodeable(v)
- d.decodeValue(v, nil, false)
- // d.decodeValueFallback(v)
- }
- }
-}
-
-func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, chkAll bool) {
- // If stream is not containing a nil value, then we can deref to the base
- // non-pointer value, and decode into that.
- var rvp reflect.Value
- var rvpValid bool
- if rv.Kind() == reflect.Ptr {
- rvpValid = true
- for {
- if rv.IsNil() {
- rv.Set(reflect.New(rv.Type().Elem()))
- }
- rvp = rv
- rv = rv.Elem()
- if rv.Kind() != reflect.Ptr {
- break
- }
- }
- }
-
- if fn == nil {
- // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer
- fn = d.cfer().get(rv.Type(), chkAll, true) // chkAll, chkAll)
- }
- if fn.i.addrD {
- if rvpValid {
- fn.fd(d, &fn.i, rvp)
- } else if rv.CanAddr() {
- fn.fd(d, &fn.i, rv.Addr())
- } else if !fn.i.addrF {
- fn.fd(d, &fn.i, rv)
- } else {
- d.errorf("cannot decode into a non-pointer value")
- }
- } else {
- fn.fd(d, &fn.i, rv)
- }
- // return rv
-}
-
-func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
- // NOTE: rvkencname may be a stringView, so don't pass it to another function.
- if d.h.ErrorIfNoField {
- if index >= 0 {
- d.errorf("no matching struct field found when decoding stream array at index %v", index)
- return
- } else if rvkencname != "" {
- d.errorf("no matching struct field found when decoding stream map with key " + rvkencname)
- return
- }
- }
- d.swallow()
-}
-
-func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) {
- if d.h.ErrorIfNoArrayExpand {
- d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen)
- }
-}
-
-func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) {
- switch rv.Kind() {
- case reflect.Array:
- return rv, true
- case reflect.Ptr:
- if !rv.IsNil() {
- return rv.Elem(), true
- }
- case reflect.Slice, reflect.Chan, reflect.Map:
- if !rv.IsNil() {
- return rv, true
- }
- }
- return
-}
-
-func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) {
- // decode can take any reflect.Value that is a inherently addressable i.e.
- // - array
- // - non-nil chan (we will SEND to it)
- // - non-nil slice (we will set its elements)
- // - non-nil map (we will put into it)
- // - non-nil pointer (we can "update" it)
- rv2, canDecode := isDecodeable(rv)
- if canDecode {
- return
- }
- if !rv.IsValid() {
- d.errorstr(errstrCannotDecodeIntoNil)
- return
- }
- if !rv.CanInterface() {
- d.errorf("cannot decode into a value without an interface: %v", rv)
- return
- }
- rvi := rv2i(rv)
- rvk := rv.Kind()
- d.errorf("cannot decode into value of kind: %v, type: %T, %v", rvk, rvi, rvi)
- return
-}
-
-// Possibly get an interned version of a string
-//
-// This should mostly be used for map keys, where the key type is string.
-// This is because keys of a map/struct are typically reused across many objects.
-func (d *Decoder) string(v []byte) (s string) {
- if d.is == nil {
- return string(v) // don't return stringView, as we need a real string here.
- }
- s, ok := d.is[string(v)] // no allocation here, per go implementation
- if !ok {
- s = string(v) // new allocation here
- d.is[s] = s
- }
- return s
-}
-
-// nextValueBytes returns the next value in the stream as a set of bytes.
-func (d *Decoder) nextValueBytes() (bs []byte) {
- d.d.uncacheRead()
- d.r.track()
- d.swallow()
- bs = d.r.stopTrack()
- return
-}
-
-func (d *Decoder) rawBytes() []byte {
- // ensure that this is not a view into the bytes
- // i.e. make new copy always.
- bs := d.nextValueBytes()
- bs2 := make([]byte, len(bs))
- copy(bs2, bs)
- return bs2
-}
-
-func (d *Decoder) wrapErrstr(v interface{}, err *error) {
- *err = fmt.Errorf("%s decode error [pos %d]: %v", d.hh.Name(), d.r.numread(), v)
-}
-
-// --------------------------------------------------
-
-// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
-// A slice can be set from a map or array in stream. This supports the MapBySlice interface.
-type decSliceHelper struct {
- d *Decoder
- // ct valueType
- array bool
-}
-
-func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) {
- dd := d.d
- ctyp := dd.ContainerType()
- switch ctyp {
- case valueTypeArray:
- x.array = true
- clen = dd.ReadArrayStart()
- case valueTypeMap:
- clen = dd.ReadMapStart() * 2
- default:
- d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp)
- }
- // x.ct = ctyp
- x.d = d
- return
-}
-
-func (x decSliceHelper) End() {
- if x.array {
- x.d.d.ReadArrayEnd()
- } else {
- x.d.d.ReadMapEnd()
- }
-}
-
-func (x decSliceHelper) ElemContainerState(index int) {
- if x.array {
- x.d.d.ReadArrayElem()
- } else if index%2 == 0 {
- x.d.d.ReadMapElemKey()
- } else {
- x.d.d.ReadMapElemValue()
- }
-}
-
-func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) {
- if clen == 0 {
- return zeroByteSlice
- }
- if len(bs) == clen {
- bsOut = bs
- r.readb(bsOut)
- } else if cap(bs) >= clen {
- bsOut = bs[:clen]
- r.readb(bsOut)
- } else {
- // bsOut = make([]byte, clen)
- len2 := decInferLen(clen, maxInitLen, 1)
- bsOut = make([]byte, len2)
- r.readb(bsOut)
- for len2 < clen {
- len3 := decInferLen(clen-len2, maxInitLen, 1)
- bs3 := bsOut
- bsOut = make([]byte, len2+len3)
- copy(bsOut, bs3)
- r.readb(bsOut[len2:])
- len2 += len3
- }
- }
- return
-}
-
-func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) {
- if xlen := len(in); xlen > 0 {
- if isBytesReader || xlen <= scratchByteArrayLen {
- if cap(dest) >= xlen {
- out = dest[:xlen]
- } else {
- out = make([]byte, xlen)
- }
- copy(out, in)
- return
- }
- }
- return in
-}
-
-// decInferLen will infer a sensible length, given the following:
-// - clen: length wanted.
-// - maxlen: max length to be returned.
-// if <= 0, it is unset, and we infer it based on the unit size
-// - unit: number of bytes for each element of the collection
-func decInferLen(clen, maxlen, unit int) (rvlen int) {
- // handle when maxlen is not set i.e. <= 0
- if clen <= 0 {
- return
- }
- if unit == 0 {
- return clen
- }
- if maxlen <= 0 {
- // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items.
- // maxlen = 256 * 1024 / unit
- // if maxlen < (4 * 1024) {
- // maxlen = 4 * 1024
- // }
- if unit < (256 / 4) {
- maxlen = 256 * 1024 / unit
- } else {
- maxlen = 4 * 1024
- }
- }
- if clen > maxlen {
- rvlen = maxlen
- } else {
- rvlen = clen
- }
- return
-}
-
-func expandSliceRV(s reflect.Value, st reflect.Type, canChange bool, stElemSize, num, slen, scap int) (
- s2 reflect.Value, scap2 int, changed bool, err string) {
- l1 := slen + num // new slice length
- if l1 < slen {
- err = errmsgExpandSliceOverflow
- return
- }
- if l1 <= scap {
- if s.CanSet() {
- s.SetLen(l1)
- } else if canChange {
- s2 = s.Slice(0, l1)
- scap2 = scap
- changed = true
- } else {
- err = errmsgExpandSliceCannotChange
- return
- }
- return
- }
- if !canChange {
- err = errmsgExpandSliceCannotChange
- return
- }
- scap2 = growCap(scap, stElemSize, num)
- s2 = reflect.MakeSlice(st, l1, scap2)
- changed = true
- reflect.Copy(s2, s)
- return
-}
-
-func decReadFull(r io.Reader, bs []byte) (n int, err error) {
- var nn int
- for n < len(bs) && err == nil {
- nn, err = r.Read(bs[n:])
- if nn > 0 {
- if err == io.EOF {
- // leave EOF for next time
- err = nil
- }
- n += nn
- }
- }
-
- // do not do this - it serves no purpose
- // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF }
- return
-}
diff --git a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/encode.go b/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/encode.go
deleted file mode 100644
index ef465294..00000000
--- a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/encode.go
+++ /dev/null
@@ -1,1375 +0,0 @@
-// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
-// Use of this source code is governed by a MIT license found in the LICENSE file.
-
-package codec
-
-import (
- "bufio"
- "encoding"
- "errors"
- "fmt"
- "io"
- "reflect"
- "sort"
- "strconv"
- "sync"
- "time"
-)
-
-const defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024
-
-var errEncoderNotInitialized = errors.New("Encoder not initialized")
-
-// encWriter abstracts writing to a byte array or to an io.Writer.
-type encWriter interface {
- writeb([]byte)
- writestr(string)
- writen1(byte)
- writen2(byte, byte)
- atEndOfEncode()
-}
-
-// encDriver abstracts the actual codec (binc vs msgpack, etc)
-type encDriver interface {
- EncodeNil()
- EncodeInt(i int64)
- EncodeUint(i uint64)
- EncodeBool(b bool)
- EncodeFloat32(f float32)
- EncodeFloat64(f float64)
- // encodeExtPreamble(xtag byte, length int)
- EncodeRawExt(re *RawExt, e *Encoder)
- EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder)
- EncodeString(c charEncoding, v string)
- // EncodeSymbol(v string)
- EncodeStringBytes(c charEncoding, v []byte)
- EncodeTime(time.Time)
- //encBignum(f *big.Int)
- //encStringRunes(c charEncoding, v []rune)
- WriteArrayStart(length int)
- WriteArrayElem()
- WriteArrayEnd()
- WriteMapStart(length int)
- WriteMapElemKey()
- WriteMapElemValue()
- WriteMapEnd()
-
- reset()
- atEndOfEncode()
-}
-
-type ioEncStringWriter interface {
- WriteString(s string) (n int, err error)
-}
-
-type encDriverAsis interface {
- EncodeAsis(v []byte)
-}
-
-type encDriverNoopContainerWriter struct{}
-
-func (encDriverNoopContainerWriter) WriteArrayStart(length int) {}
-func (encDriverNoopContainerWriter) WriteArrayElem() {}
-func (encDriverNoopContainerWriter) WriteArrayEnd() {}
-func (encDriverNoopContainerWriter) WriteMapStart(length int) {}
-func (encDriverNoopContainerWriter) WriteMapElemKey() {}
-func (encDriverNoopContainerWriter) WriteMapElemValue() {}
-func (encDriverNoopContainerWriter) WriteMapEnd() {}
-func (encDriverNoopContainerWriter) atEndOfEncode() {}
-
-type encDriverTrackContainerWriter struct {
- c containerState
-}
-
-func (e *encDriverTrackContainerWriter) WriteArrayStart(length int) { e.c = containerArrayStart }
-func (e *encDriverTrackContainerWriter) WriteArrayElem() { e.c = containerArrayElem }
-func (e *encDriverTrackContainerWriter) WriteArrayEnd() { e.c = containerArrayEnd }
-func (e *encDriverTrackContainerWriter) WriteMapStart(length int) { e.c = containerMapStart }
-func (e *encDriverTrackContainerWriter) WriteMapElemKey() { e.c = containerMapKey }
-func (e *encDriverTrackContainerWriter) WriteMapElemValue() { e.c = containerMapValue }
-func (e *encDriverTrackContainerWriter) WriteMapEnd() { e.c = containerMapEnd }
-func (e *encDriverTrackContainerWriter) atEndOfEncode() {}
-
-// type ioEncWriterWriter interface {
-// WriteByte(c byte) error
-// WriteString(s string) (n int, err error)
-// Write(p []byte) (n int, err error)
-// }
-
-// EncodeOptions captures configuration options during encode.
-type EncodeOptions struct {
- // WriterBufferSize is the size of the buffer used when writing.
- //
- // if > 0, we use a smart buffer internally for performance purposes.
- WriterBufferSize int
-
- // ChanRecvTimeout is the timeout used when selecting from a chan.
- //
- // Configuring this controls how we receive from a chan during the encoding process.
- // - If ==0, we only consume the elements currently available in the chan.
- // - if <0, we consume until the chan is closed.
- // - If >0, we consume until this timeout.
- ChanRecvTimeout time.Duration
-
- // StructToArray specifies to encode a struct as an array, and not as a map
- StructToArray bool
-
- // Canonical representation means that encoding a value will always result in the same
- // sequence of bytes.
- //
- // This only affects maps, as the iteration order for maps is random.
- //
- // The implementation MAY use the natural sort order for the map keys if possible:
- //
- // - If there is a natural sort order (ie for number, bool, string or []byte keys),
- // then the map keys are first sorted in natural order and then written
- // with corresponding map values to the strema.
- // - If there is no natural sort order, then the map keys will first be
- // encoded into []byte, and then sorted,
- // before writing the sorted keys and the corresponding map values to the stream.
- //
- Canonical bool
-
- // CheckCircularRef controls whether we check for circular references
- // and error fast during an encode.
- //
- // If enabled, an error is received if a pointer to a struct
- // references itself either directly or through one of its fields (iteratively).
- //
- // This is opt-in, as there may be a performance hit to checking circular references.
- CheckCircularRef bool
-
- // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers
- // when checking if a value is empty.
- //
- // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls.
- RecursiveEmptyCheck bool
-
- // Raw controls whether we encode Raw values.
- // This is a "dangerous" option and must be explicitly set.
- // If set, we blindly encode Raw values as-is, without checking
- // if they are a correct representation of a value in that format.
- // If unset, we error out.
- Raw bool
-
- // // AsSymbols defines what should be encoded as symbols.
- // //
- // // Encoding as symbols can reduce the encoded size significantly.
- // //
- // // However, during decoding, each string to be encoded as a symbol must
- // // be checked to see if it has been seen before. Consequently, encoding time
- // // will increase if using symbols, because string comparisons has a clear cost.
- // //
- // // Sample values:
- // // AsSymbolNone
- // // AsSymbolAll
- // // AsSymbolMapStringKeys
- // // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag
- // AsSymbols AsSymbolFlag
-}
-
-// ---------------------------------------------
-
-// ioEncWriter implements encWriter and can write to an io.Writer implementation
-type ioEncWriter struct {
- w io.Writer
- ww io.Writer
- bw io.ByteWriter
- sw ioEncStringWriter
- fw ioFlusher
- b [8]byte
-}
-
-func (z *ioEncWriter) WriteByte(b byte) (err error) {
- z.b[0] = b
- _, err = z.w.Write(z.b[:1])
- return
-}
-
-func (z *ioEncWriter) WriteString(s string) (n int, err error) {
- return z.w.Write(bytesView(s))
-}
-
-func (z *ioEncWriter) writeb(bs []byte) {
- if _, err := z.ww.Write(bs); err != nil {
- panic(err)
- }
-}
-
-func (z *ioEncWriter) writestr(s string) {
- if _, err := z.sw.WriteString(s); err != nil {
- panic(err)
- }
-}
-
-func (z *ioEncWriter) writen1(b byte) {
- if err := z.bw.WriteByte(b); err != nil {
- panic(err)
- }
-}
-
-func (z *ioEncWriter) writen2(b1, b2 byte) {
- var err error
- if err = z.bw.WriteByte(b1); err == nil {
- if err = z.bw.WriteByte(b2); err == nil {
- return
- }
- }
- panic(err)
-}
-
-// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) {
-// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5
-// if _, err := z.ww.Write(z.b[:5]); err != nil {
-// panic(err)
-// }
-// }
-
-func (z *ioEncWriter) atEndOfEncode() {
- if z.fw != nil {
- if err := z.fw.Flush(); err != nil {
- panic(err)
- }
- }
-}
-
-// ---------------------------------------------
-
-// bytesEncAppender implements encWriter and can write to an byte slice.
-type bytesEncAppender struct {
- b []byte
- out *[]byte
-}
-
-func (z *bytesEncAppender) writeb(s []byte) {
- z.b = append(z.b, s...)
-}
-func (z *bytesEncAppender) writestr(s string) {
- z.b = append(z.b, s...)
-}
-func (z *bytesEncAppender) writen1(b1 byte) {
- z.b = append(z.b, b1)
-}
-func (z *bytesEncAppender) writen2(b1, b2 byte) {
- z.b = append(z.b, b1, b2)
-}
-func (z *bytesEncAppender) atEndOfEncode() {
- *(z.out) = z.b
-}
-func (z *bytesEncAppender) reset(in []byte, out *[]byte) {
- z.b = in[:0]
- z.out = out
-}
-
-// ---------------------------------------------
-
-func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeRawExt(rv2i(rv).(*RawExt), e)
-}
-
-func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e)
-}
-
-func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) {
- rv2i(rv).(Selfer).CodecEncodeSelf(e)
-}
-
-func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) {
- bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary()
- e.marshal(bs, fnerr, false, cRAW)
-}
-
-func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) {
- bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText()
- e.marshal(bs, fnerr, false, cUTF8)
-}
-
-func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) {
- bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON()
- e.marshal(bs, fnerr, true, cUTF8)
-}
-
-func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) {
- e.rawBytes(rv2i(rv).(Raw))
-}
-
-func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeNil()
-}
-
-func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) {
- e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv)
-}
-
-func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) {
- ti := f.ti
- ee := e.e
- // array may be non-addressable, so we have to manage with care
- // (don't call rv.Bytes, rv.Slice, etc).
- // E.g. type struct S{B [2]byte};
- // Encode(S{}) will bomb on "panic: slice of unaddressable array".
- if f.seq != seqTypeArray {
- if rv.IsNil() {
- ee.EncodeNil()
- return
- }
- // If in this method, then there was no extension function defined.
- // So it's okay to treat as []byte.
- if ti.rtid == uint8SliceTypId {
- ee.EncodeStringBytes(cRAW, rv.Bytes())
- return
- }
- }
- if f.seq == seqTypeChan && ti.chandir&uint8(reflect.RecvDir) == 0 {
- e.errorf("send-only channel cannot be encoded")
- }
- elemsep := e.esep
- rtelem := ti.elem
- rtelemIsByte := uint8TypId == rt2id(rtelem) // NOT rtelem.Kind() == reflect.Uint8
- var l int
- // if a slice, array or chan of bytes, treat specially
- if rtelemIsByte {
- switch f.seq {
- case seqTypeSlice:
- ee.EncodeStringBytes(cRAW, rv.Bytes())
- case seqTypeArray:
- l = rv.Len()
- if rv.CanAddr() {
- ee.EncodeStringBytes(cRAW, rv.Slice(0, l).Bytes())
- } else {
- var bs []byte
- if l <= cap(e.b) {
- bs = e.b[:l]
- } else {
- bs = make([]byte, l)
- }
- reflect.Copy(reflect.ValueOf(bs), rv)
- ee.EncodeStringBytes(cRAW, bs)
- }
- case seqTypeChan:
- // do not use range, so that the number of elements encoded
- // does not change, and encoding does not hang waiting on someone to close chan.
- // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) }
- // ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte.
-
- if rv.IsNil() {
- ee.EncodeNil()
- break
- }
- bs := e.b[:0]
- irv := rv2i(rv)
- ch, ok := irv.(<-chan byte)
- if !ok {
- ch = irv.(chan byte)
- }
-
- L1:
- switch timeout := e.h.ChanRecvTimeout; {
- case timeout == 0: // only consume available
- for {
- select {
- case b := <-ch:
- bs = append(bs, b)
- default:
- break L1
- }
- }
- case timeout > 0: // consume until timeout
- tt := time.NewTimer(timeout)
- for {
- select {
- case b := <-ch:
- bs = append(bs, b)
- case <-tt.C:
- // close(tt.C)
- break L1
- }
- }
- default: // consume until close
- for b := range ch {
- bs = append(bs, b)
- }
- }
-
- ee.EncodeStringBytes(cRAW, bs)
- }
- return
- }
-
- // if chan, consume chan into a slice, and work off that slice.
- var rvcs reflect.Value
- if f.seq == seqTypeChan {
- rvcs = reflect.Zero(reflect.SliceOf(rtelem))
- timeout := e.h.ChanRecvTimeout
- if timeout < 0 { // consume until close
- for {
- recv, recvOk := rv.Recv()
- if !recvOk {
- break
- }
- rvcs = reflect.Append(rvcs, recv)
- }
- } else {
- cases := make([]reflect.SelectCase, 2)
- cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv}
- if timeout == 0 {
- cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault}
- } else {
- tt := time.NewTimer(timeout)
- cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)}
- }
- for {
- chosen, recv, recvOk := reflect.Select(cases)
- if chosen == 1 || !recvOk {
- break
- }
- rvcs = reflect.Append(rvcs, recv)
- }
- }
- rv = rvcs // TODO: ensure this doesn't mess up anywhere that rv of kind chan is expected
- }
-
- l = rv.Len()
- if ti.mbs {
- if l%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", l)
- return
- }
- ee.WriteMapStart(l / 2)
- } else {
- ee.WriteArrayStart(l)
- }
-
- if l > 0 {
- var fn *codecFn
- for rtelem.Kind() == reflect.Ptr {
- rtelem = rtelem.Elem()
- }
- // if kind is reflect.Interface, do not pre-determine the
- // encoding type, because preEncodeValue may break it down to
- // a concrete type and kInterface will bomb.
- if rtelem.Kind() != reflect.Interface {
- fn = e.cfer().get(rtelem, true, true)
- }
- for j := 0; j < l; j++ {
- if elemsep {
- if ti.mbs {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- } else {
- ee.WriteArrayElem()
- }
- }
- e.encodeValue(rv.Index(j), fn, true)
- }
- }
-
- if ti.mbs {
- ee.WriteMapEnd()
- } else {
- ee.WriteArrayEnd()
- }
-}
-
-func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) {
- fti := f.ti
- elemsep := e.esep
- tisfi := fti.sfiSrc
- toMap := !(fti.toArray || e.h.StructToArray)
- if toMap {
- tisfi = fti.sfiSort
- }
- ee := e.e
-
- sfn := structFieldNode{v: rv, update: false}
- if toMap {
- ee.WriteMapStart(len(tisfi))
- if elemsep {
- for _, si := range tisfi {
- ee.WriteMapElemKey()
- // ee.EncodeString(cUTF8, si.encName)
- encStructFieldKey(ee, fti.keyType, si.encName)
- ee.WriteMapElemValue()
- e.encodeValue(sfn.field(si), nil, true)
- }
- } else {
- for _, si := range tisfi {
- // ee.EncodeString(cUTF8, si.encName)
- encStructFieldKey(ee, fti.keyType, si.encName)
- e.encodeValue(sfn.field(si), nil, true)
- }
- }
- ee.WriteMapEnd()
- } else {
- ee.WriteArrayStart(len(tisfi))
- if elemsep {
- for _, si := range tisfi {
- ee.WriteArrayElem()
- e.encodeValue(sfn.field(si), nil, true)
- }
- } else {
- for _, si := range tisfi {
- e.encodeValue(sfn.field(si), nil, true)
- }
- }
- ee.WriteArrayEnd()
- }
-}
-
-func encStructFieldKey(ee encDriver, keyType valueType, s string) {
- var m must
-
- // use if-else-if, not switch (which compiles to binary-search)
- // since keyType is typically valueTypeString, branch prediction is pretty good.
-
- if keyType == valueTypeString {
- ee.EncodeString(cUTF8, s)
- } else if keyType == valueTypeInt {
- ee.EncodeInt(m.Int(strconv.ParseInt(s, 10, 64)))
- } else if keyType == valueTypeUint {
- ee.EncodeUint(m.Uint(strconv.ParseUint(s, 10, 64)))
- } else if keyType == valueTypeFloat {
- ee.EncodeFloat64(m.Float(strconv.ParseFloat(s, 64)))
- } else {
- ee.EncodeString(cUTF8, s)
- }
-}
-
-func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) {
- fti := f.ti
- elemsep := e.esep
- tisfi := fti.sfiSrc
- toMap := !(fti.toArray || e.h.StructToArray)
- // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
- if toMap {
- tisfi = fti.sfiSort
- }
- newlen := len(fti.sfiSort)
- ee := e.e
-
- // Use sync.Pool to reduce allocating slices unnecessarily.
- // The cost of sync.Pool is less than the cost of new allocation.
- //
- // Each element of the array pools one of encStructPool(8|16|32|64).
- // It allows the re-use of slices up to 64 in length.
- // A performance cost of encoding structs was collecting
- // which values were empty and should be omitted.
- // We needed slices of reflect.Value and string to collect them.
- // This shared pool reduces the amount of unnecessary creation we do.
- // The cost is that of locking sometimes, but sync.Pool is efficient
- // enough to reduce thread contention.
-
- var spool *sync.Pool
- var poolv interface{}
- var fkvs []stringRv
- // fmt.Printf(">>>>>>>>>>>>>> encode.kStruct: newlen: %d\n", newlen)
- if newlen <= 8 {
- spool, poolv = pool.stringRv8()
- fkvs = poolv.(*[8]stringRv)[:newlen]
- } else if newlen <= 16 {
- spool, poolv = pool.stringRv16()
- fkvs = poolv.(*[16]stringRv)[:newlen]
- } else if newlen <= 32 {
- spool, poolv = pool.stringRv32()
- fkvs = poolv.(*[32]stringRv)[:newlen]
- } else if newlen <= 64 {
- spool, poolv = pool.stringRv64()
- fkvs = poolv.(*[64]stringRv)[:newlen]
- } else if newlen <= 128 {
- spool, poolv = pool.stringRv128()
- fkvs = poolv.(*[128]stringRv)[:newlen]
- } else {
- fkvs = make([]stringRv, newlen)
- }
-
- newlen = 0
- var kv stringRv
- recur := e.h.RecursiveEmptyCheck
- sfn := structFieldNode{v: rv, update: false}
- for _, si := range tisfi {
- // kv.r = si.field(rv, false)
- kv.r = sfn.field(si)
- if toMap {
- if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) {
- continue
- }
- kv.v = si.encName
- } else {
- // use the zero value.
- // if a reference or struct, set to nil (so you do not output too much)
- if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) {
- switch kv.r.Kind() {
- case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice:
- kv.r = reflect.Value{} //encode as nil
- }
- }
- }
- fkvs[newlen] = kv
- newlen++
- }
-
- if toMap {
- ee.WriteMapStart(newlen)
- if elemsep {
- for j := 0; j < newlen; j++ {
- kv = fkvs[j]
- ee.WriteMapElemKey()
- // ee.EncodeString(cUTF8, kv.v)
- encStructFieldKey(ee, fti.keyType, kv.v)
- ee.WriteMapElemValue()
- e.encodeValue(kv.r, nil, true)
- }
- } else {
- for j := 0; j < newlen; j++ {
- kv = fkvs[j]
- // ee.EncodeString(cUTF8, kv.v)
- encStructFieldKey(ee, fti.keyType, kv.v)
- e.encodeValue(kv.r, nil, true)
- }
- }
- ee.WriteMapEnd()
- } else {
- ee.WriteArrayStart(newlen)
- if elemsep {
- for j := 0; j < newlen; j++ {
- ee.WriteArrayElem()
- e.encodeValue(fkvs[j].r, nil, true)
- }
- } else {
- for j := 0; j < newlen; j++ {
- e.encodeValue(fkvs[j].r, nil, true)
- }
- }
- ee.WriteArrayEnd()
- }
-
- // do not use defer. Instead, use explicit pool return at end of function.
- // defer has a cost we are trying to avoid.
- // If there is a panic and these slices are not returned, it is ok.
- if spool != nil {
- spool.Put(poolv)
- }
-}
-
-func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
- ee := e.e
- if rv.IsNil() {
- ee.EncodeNil()
- return
- }
-
- l := rv.Len()
- ee.WriteMapStart(l)
- elemsep := e.esep
- if l == 0 {
- ee.WriteMapEnd()
- return
- }
- // var asSymbols bool
- // determine the underlying key and val encFn's for the map.
- // This eliminates some work which is done for each loop iteration i.e.
- // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn.
- //
- // However, if kind is reflect.Interface, do not pre-determine the
- // encoding type, because preEncodeValue may break it down to
- // a concrete type and kInterface will bomb.
- var keyFn, valFn *codecFn
- ti := f.ti
- rtkey0 := ti.key
- rtkey := rtkey0
- rtval0 := ti.elem
- rtval := rtval0
- // rtkeyid := rt2id(rtkey0)
- for rtval.Kind() == reflect.Ptr {
- rtval = rtval.Elem()
- }
- if rtval.Kind() != reflect.Interface {
- valFn = e.cfer().get(rtval, true, true)
- }
- mks := rv.MapKeys()
-
- if e.h.Canonical {
- e.kMapCanonical(rtkey, rv, mks, valFn)
- ee.WriteMapEnd()
- return
- }
-
- var keyTypeIsString = stringTypId == rt2id(rtkey0) // rtkeyid
- if !keyTypeIsString {
- for rtkey.Kind() == reflect.Ptr {
- rtkey = rtkey.Elem()
- }
- if rtkey.Kind() != reflect.Interface {
- // rtkeyid = rt2id(rtkey)
- keyFn = e.cfer().get(rtkey, true, true)
- }
- }
-
- // for j, lmks := 0, len(mks); j < lmks; j++ {
- for j := range mks {
- if elemsep {
- ee.WriteMapElemKey()
- }
- if keyTypeIsString {
- ee.EncodeString(cUTF8, mks[j].String())
- } else {
- e.encodeValue(mks[j], keyFn, true)
- }
- if elemsep {
- ee.WriteMapElemValue()
- }
- e.encodeValue(rv.MapIndex(mks[j]), valFn, true)
-
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn) {
- ee := e.e
- elemsep := e.esep
- // we previously did out-of-band if an extension was registered.
- // This is not necessary, as the natural kind is sufficient for ordering.
-
- switch rtkey.Kind() {
- case reflect.Bool:
- mksv := make([]boolRv, len(mks))
- for i, k := range mks {
- v := &mksv[i]
- v.r = k
- v.v = k.Bool()
- }
- sort.Sort(boolRvSlice(mksv))
- for i := range mksv {
- if elemsep {
- ee.WriteMapElemKey()
- }
- ee.EncodeBool(mksv[i].v)
- if elemsep {
- ee.WriteMapElemValue()
- }
- e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
- }
- case reflect.String:
- mksv := make([]stringRv, len(mks))
- for i, k := range mks {
- v := &mksv[i]
- v.r = k
- v.v = k.String()
- }
- sort.Sort(stringRvSlice(mksv))
- for i := range mksv {
- if elemsep {
- ee.WriteMapElemKey()
- }
- ee.EncodeString(cUTF8, mksv[i].v)
- if elemsep {
- ee.WriteMapElemValue()
- }
- e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
- }
- case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
- mksv := make([]uintRv, len(mks))
- for i, k := range mks {
- v := &mksv[i]
- v.r = k
- v.v = k.Uint()
- }
- sort.Sort(uintRvSlice(mksv))
- for i := range mksv {
- if elemsep {
- ee.WriteMapElemKey()
- }
- ee.EncodeUint(mksv[i].v)
- if elemsep {
- ee.WriteMapElemValue()
- }
- e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
- }
- case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
- mksv := make([]intRv, len(mks))
- for i, k := range mks {
- v := &mksv[i]
- v.r = k
- v.v = k.Int()
- }
- sort.Sort(intRvSlice(mksv))
- for i := range mksv {
- if elemsep {
- ee.WriteMapElemKey()
- }
- ee.EncodeInt(mksv[i].v)
- if elemsep {
- ee.WriteMapElemValue()
- }
- e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
- }
- case reflect.Float32:
- mksv := make([]floatRv, len(mks))
- for i, k := range mks {
- v := &mksv[i]
- v.r = k
- v.v = k.Float()
- }
- sort.Sort(floatRvSlice(mksv))
- for i := range mksv {
- if elemsep {
- ee.WriteMapElemKey()
- }
- ee.EncodeFloat32(float32(mksv[i].v))
- if elemsep {
- ee.WriteMapElemValue()
- }
- e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
- }
- case reflect.Float64:
- mksv := make([]floatRv, len(mks))
- for i, k := range mks {
- v := &mksv[i]
- v.r = k
- v.v = k.Float()
- }
- sort.Sort(floatRvSlice(mksv))
- for i := range mksv {
- if elemsep {
- ee.WriteMapElemKey()
- }
- ee.EncodeFloat64(mksv[i].v)
- if elemsep {
- ee.WriteMapElemValue()
- }
- e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
- }
- case reflect.Struct:
- if rv.Type() == timeTyp {
- mksv := make([]timeRv, len(mks))
- for i, k := range mks {
- v := &mksv[i]
- v.r = k
- v.v = rv2i(k).(time.Time)
- }
- sort.Sort(timeRvSlice(mksv))
- for i := range mksv {
- if elemsep {
- ee.WriteMapElemKey()
- }
- ee.EncodeTime(mksv[i].v)
- if elemsep {
- ee.WriteMapElemValue()
- }
- e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true)
- }
- break
- }
- fallthrough
- default:
- // out-of-band
- // first encode each key to a []byte first, then sort them, then record
- var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- mksbv := make([]bytesRv, len(mks))
- for i, k := range mks {
- v := &mksbv[i]
- l := len(mksv)
- e2.MustEncode(k)
- v.r = k
- v.v = mksv[l:]
- }
- sort.Sort(bytesRvSlice(mksbv))
- for j := range mksbv {
- if elemsep {
- ee.WriteMapElemKey()
- }
- e.asis(mksbv[j].v)
- if elemsep {
- ee.WriteMapElemValue()
- }
- e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true)
- }
- }
-}
-
-// // --------------------------------------------------
-
-type encWriterSwitch struct {
- wi *ioEncWriter
- // wb bytesEncWriter
- wb bytesEncAppender
- wx bool // if bytes, wx=true
- esep bool // whether it has elem separators
- isas bool // whether e.as != nil
-}
-
-// // TODO: Uncomment after mid-stack inlining enabled in go 1.11
-
-// func (z *encWriterSwitch) writeb(s []byte) {
-// if z.wx {
-// z.wb.writeb(s)
-// } else {
-// z.wi.writeb(s)
-// }
-// }
-// func (z *encWriterSwitch) writestr(s string) {
-// if z.wx {
-// z.wb.writestr(s)
-// } else {
-// z.wi.writestr(s)
-// }
-// }
-// func (z *encWriterSwitch) writen1(b1 byte) {
-// if z.wx {
-// z.wb.writen1(b1)
-// } else {
-// z.wi.writen1(b1)
-// }
-// }
-// func (z *encWriterSwitch) writen2(b1, b2 byte) {
-// if z.wx {
-// z.wb.writen2(b1, b2)
-// } else {
-// z.wi.writen2(b1, b2)
-// }
-// }
-
-// An Encoder writes an object to an output stream in the codec format.
-type Encoder struct {
- panicHdl
- // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder
- e encDriver
- // NOTE: Encoder shouldn't call it's write methods,
- // as the handler MAY need to do some coordination.
- w encWriter
-
- h *BasicHandle
- bw *bufio.Writer
- as encDriverAsis
-
- // ---- cpu cache line boundary?
-
- // ---- cpu cache line boundary?
- encWriterSwitch
- err error
-
- // ---- cpu cache line boundary?
- codecFnPooler
- ci set
- js bool // here, so that no need to piggy back on *codecFner for this
- be bool // here, so that no need to piggy back on *codecFner for this
- _ [6]byte // padding
-
- // ---- writable fields during execution --- *try* to keep in sep cache line
-
- // ---- cpu cache line boundary?
- // b [scratchByteArrayLen]byte
- // _ [cacheLineSize - scratchByteArrayLen]byte // padding
- b [cacheLineSize - 0]byte // used for encoding a chan or (non-addressable) array of bytes
-}
-
-// NewEncoder returns an Encoder for encoding into an io.Writer.
-//
-// For efficiency, Users are encouraged to pass in a memory buffered writer
-// (eg bufio.Writer, bytes.Buffer).
-func NewEncoder(w io.Writer, h Handle) *Encoder {
- e := newEncoder(h)
- e.Reset(w)
- return e
-}
-
-// NewEncoderBytes returns an encoder for encoding directly and efficiently
-// into a byte slice, using zero-copying to temporary slices.
-//
-// It will potentially replace the output byte slice pointed to.
-// After encoding, the out parameter contains the encoded contents.
-func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
- e := newEncoder(h)
- e.ResetBytes(out)
- return e
-}
-
-func newEncoder(h Handle) *Encoder {
- e := &Encoder{h: h.getBasicHandle(), err: errEncoderNotInitialized}
- e.hh = h
- e.esep = h.hasElemSeparators()
- return e
-}
-
-func (e *Encoder) resetCommon() {
- if e.e == nil || e.hh.recreateEncDriver(e.e) {
- e.e = e.hh.newEncDriver(e)
- e.as, e.isas = e.e.(encDriverAsis)
- // e.cr, _ = e.e.(containerStateRecv)
- }
- e.be = e.hh.isBinary()
- _, e.js = e.hh.(*JsonHandle)
- e.e.reset()
- e.err = nil
-}
-
-// Reset resets the Encoder with a new output stream.
-//
-// This accommodates using the state of the Encoder,
-// where it has "cached" information about sub-engines.
-func (e *Encoder) Reset(w io.Writer) {
- if w == nil {
- return
- }
- if e.wi == nil {
- e.wi = new(ioEncWriter)
- }
- var ok bool
- e.wx = false
- e.wi.w = w
- if e.h.WriterBufferSize > 0 {
- e.bw = bufio.NewWriterSize(w, e.h.WriterBufferSize)
- e.wi.bw = e.bw
- e.wi.sw = e.bw
- e.wi.fw = e.bw
- e.wi.ww = e.bw
- } else {
- if e.wi.bw, ok = w.(io.ByteWriter); !ok {
- e.wi.bw = e.wi
- }
- if e.wi.sw, ok = w.(ioEncStringWriter); !ok {
- e.wi.sw = e.wi
- }
- e.wi.fw, _ = w.(ioFlusher)
- e.wi.ww = w
- }
- e.w = e.wi
- e.resetCommon()
-}
-
-// ResetBytes resets the Encoder with a new destination output []byte.
-func (e *Encoder) ResetBytes(out *[]byte) {
- if out == nil {
- return
- }
- var in []byte
- if out != nil {
- in = *out
- }
- if in == nil {
- in = make([]byte, defEncByteBufSize)
- }
- e.wx = true
- e.wb.reset(in, out)
- e.w = &e.wb
- e.resetCommon()
-}
-
-// Encode writes an object into a stream.
-//
-// Encoding can be configured via the struct tag for the fields.
-// The key (in the struct tags) that we look at is configurable.
-//
-// By default, we look up the "codec" key in the struct field's tags,
-// and fall bak to the "json" key if "codec" is absent.
-// That key in struct field's tag value is the key name,
-// followed by an optional comma and options.
-//
-// To set an option on all fields (e.g. omitempty on all fields), you
-// can create a field called _struct, and set flags on it. The options
-// which can be set on _struct are:
-// - omitempty: so all fields are omitted if empty
-// - toarray: so struct is encoded as an array
-// - int: so struct key names are encoded as signed integers (instead of strings)
-// - uint: so struct key names are encoded as unsigned integers (instead of strings)
-// - float: so struct key names are encoded as floats (instead of strings)
-// More details on these below.
-//
-// Struct values "usually" encode as maps. Each exported struct field is encoded unless:
-// - the field's tag is "-", OR
-// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option.
-//
-// When encoding as a map, the first string in the tag (before the comma)
-// is the map key string to use when encoding.
-// ...
-// This key is typically encoded as a string.
-// However, there are instances where the encoded stream has mapping keys encoded as numbers.
-// For example, some cbor streams have keys as integer codes in the stream, but they should map
-// to fields in a structured object. Consequently, a struct is the natural representation in code.
-// For these, configure the struct to encode/decode the keys as numbers (instead of string).
-// This is done with the int,uint or float option on the _struct field (see above).
-//
-// However, struct values may encode as arrays. This happens when:
-// - StructToArray Encode option is set, OR
-// - the tag on the _struct field sets the "toarray" option
-// Note that omitempty is ignored when encoding struct values as arrays,
-// as an entry must be encoded for each field, to maintain its position.
-//
-// Values with types that implement MapBySlice are encoded as stream maps.
-//
-// The empty values (for omitempty option) are false, 0, any nil pointer
-// or interface value, and any array, slice, map, or string of length zero.
-//
-// Anonymous fields are encoded inline except:
-// - the struct tag specifies a replacement name (first value)
-// - the field is of an interface type
-//
-// Examples:
-//
-// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below.
-// type MyStruct struct {
-// _struct bool `codec:",omitempty"` //set omitempty for every field
-// Field1 string `codec:"-"` //skip this field
-// Field2 int `codec:"myName"` //Use key "myName" in encode stream
-// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty.
-// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty.
-// io.Reader //use key "Reader".
-// MyStruct `codec:"my1" //use key "my1".
-// MyStruct //inline it
-// ...
-// }
-//
-// type MyStruct struct {
-// _struct bool `codec:",toarray"` //encode struct as an array
-// }
-//
-// type MyStruct struct {
-// _struct bool `codec:",uint"` //encode struct with "unsigned integer" keys
-// Field1 string `codec:"1"` //encode Field1 key using: EncodeInt(1)
-// Field2 string `codec:"2"` //encode Field2 key using: EncodeInt(2)
-// }
-//
-// The mode of encoding is based on the type of the value. When a value is seen:
-// - If a Selfer, call its CodecEncodeSelf method
-// - If an extension is registered for it, call that extension function
-// - If implements encoding.(Binary|Text|JSON)Marshaler, call Marshal(Binary|Text|JSON) method
-// - Else encode it based on its reflect.Kind
-//
-// Note that struct field names and keys in map[string]XXX will be treated as symbols.
-// Some formats support symbols (e.g. binc) and will properly encode the string
-// only once in the stream, and use a tag to refer to it thereafter.
-func (e *Encoder) Encode(v interface{}) (err error) {
- defer e.deferred(&err)
- e.MustEncode(v)
- return
-}
-
-// MustEncode is like Encode, but panics if unable to Encode.
-// This provides insight to the code location that triggered the error.
-func (e *Encoder) MustEncode(v interface{}) {
- if e.err != nil {
- panic(e.err)
- }
- e.encode(v)
- e.e.atEndOfEncode()
- e.w.atEndOfEncode()
- e.alwaysAtEnd()
-}
-
-func (e *Encoder) deferred(err1 *error) {
- e.alwaysAtEnd()
- if recoverPanicToErr {
- if x := recover(); x != nil {
- panicValToErr(e, x, err1)
- panicValToErr(e, x, &e.err)
- }
- }
-}
-
-// func (e *Encoder) alwaysAtEnd() {
-// e.codecFnPooler.alwaysAtEnd()
-// }
-
-func (e *Encoder) encode(iv interface{}) {
- if iv == nil || definitelyNil(iv) {
- e.e.EncodeNil()
- return
- }
- if v, ok := iv.(Selfer); ok {
- v.CodecEncodeSelf(e)
- return
- }
-
- // a switch with only concrete types can be optimized.
- // consequently, we deal with nil and interfaces outside.
-
- switch v := iv.(type) {
- case Raw:
- e.rawBytes(v)
- case reflect.Value:
- e.encodeValue(v, nil, true)
-
- case string:
- e.e.EncodeString(cUTF8, v)
- case bool:
- e.e.EncodeBool(v)
- case int:
- e.e.EncodeInt(int64(v))
- case int8:
- e.e.EncodeInt(int64(v))
- case int16:
- e.e.EncodeInt(int64(v))
- case int32:
- e.e.EncodeInt(int64(v))
- case int64:
- e.e.EncodeInt(v)
- case uint:
- e.e.EncodeUint(uint64(v))
- case uint8:
- e.e.EncodeUint(uint64(v))
- case uint16:
- e.e.EncodeUint(uint64(v))
- case uint32:
- e.e.EncodeUint(uint64(v))
- case uint64:
- e.e.EncodeUint(v)
- case uintptr:
- e.e.EncodeUint(uint64(v))
- case float32:
- e.e.EncodeFloat32(v)
- case float64:
- e.e.EncodeFloat64(v)
- case time.Time:
- e.e.EncodeTime(v)
- case []uint8:
- e.e.EncodeStringBytes(cRAW, v)
-
- case *Raw:
- e.rawBytes(*v)
-
- case *string:
- e.e.EncodeString(cUTF8, *v)
- case *bool:
- e.e.EncodeBool(*v)
- case *int:
- e.e.EncodeInt(int64(*v))
- case *int8:
- e.e.EncodeInt(int64(*v))
- case *int16:
- e.e.EncodeInt(int64(*v))
- case *int32:
- e.e.EncodeInt(int64(*v))
- case *int64:
- e.e.EncodeInt(*v)
- case *uint:
- e.e.EncodeUint(uint64(*v))
- case *uint8:
- e.e.EncodeUint(uint64(*v))
- case *uint16:
- e.e.EncodeUint(uint64(*v))
- case *uint32:
- e.e.EncodeUint(uint64(*v))
- case *uint64:
- e.e.EncodeUint(*v)
- case *uintptr:
- e.e.EncodeUint(uint64(*v))
- case *float32:
- e.e.EncodeFloat32(*v)
- case *float64:
- e.e.EncodeFloat64(*v)
- case *time.Time:
- e.e.EncodeTime(*v)
-
- case *[]uint8:
- e.e.EncodeStringBytes(cRAW, *v)
-
- default:
- if !fastpathEncodeTypeSwitch(iv, e) {
- // checkfastpath=true (not false), as underlying slice/map type may be fast-path
- e.encodeValue(reflect.ValueOf(iv), nil, true)
- }
- }
-}
-
-func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) {
- // if a valid fn is passed, it MUST BE for the dereferenced type of rv
- var sptr uintptr
- var rvp reflect.Value
- var rvpValid bool
-TOP:
- switch rv.Kind() {
- case reflect.Ptr:
- if rv.IsNil() {
- e.e.EncodeNil()
- return
- }
- rvpValid = true
- rvp = rv
- rv = rv.Elem()
- if e.h.CheckCircularRef && rv.Kind() == reflect.Struct {
- // TODO: Movable pointers will be an issue here. Future problem.
- sptr = rv.UnsafeAddr()
- break TOP
- }
- goto TOP
- case reflect.Interface:
- if rv.IsNil() {
- e.e.EncodeNil()
- return
- }
- rv = rv.Elem()
- goto TOP
- case reflect.Slice, reflect.Map:
- if rv.IsNil() {
- e.e.EncodeNil()
- return
- }
- case reflect.Invalid, reflect.Func:
- e.e.EncodeNil()
- return
- }
-
- if sptr != 0 && (&e.ci).add(sptr) {
- e.errorf("circular reference found: # %d", sptr)
- }
-
- if fn == nil {
- rt := rv.Type()
- // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer
- fn = e.cfer().get(rt, checkFastpath, true)
- }
- if fn.i.addrE {
- if rvpValid {
- fn.fe(e, &fn.i, rvp)
- } else if rv.CanAddr() {
- fn.fe(e, &fn.i, rv.Addr())
- } else {
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- fn.fe(e, &fn.i, rv2)
- }
- } else {
- fn.fe(e, &fn.i, rv)
- }
- if sptr != 0 {
- (&e.ci).remove(sptr)
- }
-}
-
-func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) {
- if fnerr != nil {
- panic(fnerr)
- }
- if bs == nil {
- e.e.EncodeNil()
- } else if asis {
- e.asis(bs)
- } else {
- e.e.EncodeStringBytes(c, bs)
- }
-}
-
-func (e *Encoder) asis(v []byte) {
- if e.isas {
- e.as.EncodeAsis(v)
- } else {
- e.w.writeb(v)
- }
-}
-
-func (e *Encoder) rawBytes(vv Raw) {
- v := []byte(vv)
- if !e.h.Raw {
- e.errorf("Raw values cannot be encoded: %v", v)
- }
- e.asis(v)
-}
-
-func (e *Encoder) wrapErrstr(v interface{}, err *error) {
- *err = fmt.Errorf("%s encode error: %v", e.hh.Name(), v)
-}
diff --git a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/fast-path.generated.go
deleted file mode 100644
index 87f2562f..00000000
--- a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/fast-path.generated.go
+++ /dev/null
@@ -1,34522 +0,0 @@
-// +build !notfastpath
-
-// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
-// Use of this source code is governed by a MIT license found in the LICENSE file.
-
-// Code generated from fast-path.go.tmpl - DO NOT EDIT.
-
-package codec
-
-// Fast path functions try to create a fast path encode or decode implementation
-// for common maps and slices.
-//
-// We define the functions and register then in this single file
-// so as not to pollute the encode.go and decode.go, and create a dependency in there.
-// This file can be omitted without causing a build failure.
-//
-// The advantage of fast paths is:
-// - Many calls bypass reflection altogether
-//
-// Currently support
-// - slice of all builtin types,
-// - map of all builtin types to string or interface value
-// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
-// This should provide adequate "typical" implementations.
-//
-// Note that fast track decode functions must handle values for which an address cannot be obtained.
-// For example:
-// m2 := map[string]int{}
-// p2 := []interface{}{m2}
-// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
-//
-
-import (
- "reflect"
- "sort"
-)
-
-const fastpathEnabled = true
-
-type fastpathT struct{}
-
-var fastpathTV fastpathT
-
-type fastpathE struct {
- rtid uintptr
- rt reflect.Type
- encfn func(*Encoder, *codecFnInfo, reflect.Value)
- decfn func(*Decoder, *codecFnInfo, reflect.Value)
-}
-
-type fastpathA [271]fastpathE
-
-func (x *fastpathA) index(rtid uintptr) int {
- // use binary search to grab the index (adapted from sort/search.go)
- h, i, j := 0, 0, 271 // len(x)
- for i < j {
- h = i + (j-i)/2
- if x[h].rtid < rtid {
- i = h + 1
- } else {
- j = h
- }
- }
- if i < 271 && x[i].rtid == rtid {
- return i
- }
- return -1
-}
-
-type fastpathAslice []fastpathE
-
-func (x fastpathAslice) Len() int { return len(x) }
-func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
-func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-var fastpathAV fastpathA
-
-// due to possible initialization loop error, make fastpath in an init()
-func init() {
- i := 0
- fn := func(v interface{},
- fe func(*Encoder, *codecFnInfo, reflect.Value),
- fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) {
- xrt := reflect.TypeOf(v)
- xptr := rt2id(xrt)
- fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
- i++
- return
- }
-
- fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR)
- fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR)
- fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R)
- fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R)
- fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR)
- fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R)
- fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R)
- fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R)
- fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR)
- fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR)
- fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R)
- fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R)
- fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R)
- fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R)
- fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR)
-
- fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR)
- fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR)
- fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR)
- fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R)
- fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R)
- fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R)
- fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R)
- fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR)
- fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR)
- fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R)
- fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R)
- fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R)
- fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R)
- fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R)
- fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R)
- fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR)
- fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR)
- fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR)
- fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR)
- fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R)
- fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R)
- fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R)
- fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R)
- fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR)
- fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR)
- fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R)
- fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R)
- fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R)
- fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R)
- fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R)
- fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R)
- fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR)
- fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR)
- fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR)
- fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR)
- fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R)
- fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R)
- fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R)
- fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R)
- fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR)
- fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR)
- fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R)
- fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R)
- fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R)
- fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R)
- fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R)
- fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R)
- fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR)
- fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR)
- fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR)
- fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR)
- fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R)
- fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R)
- fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R)
- fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R)
- fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR)
- fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR)
- fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R)
- fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R)
- fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R)
- fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R)
- fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R)
- fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R)
- fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR)
- fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR)
- fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR)
- fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR)
- fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R)
- fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R)
- fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R)
- fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R)
- fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR)
- fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR)
- fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R)
- fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R)
- fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R)
- fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R)
- fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R)
- fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R)
- fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR)
- fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR)
- fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR)
- fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR)
- fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R)
- fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R)
- fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R)
- fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R)
- fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR)
- fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR)
- fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R)
- fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R)
- fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R)
- fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R)
- fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R)
- fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R)
- fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR)
- fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR)
- fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR)
- fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR)
- fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R)
- fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R)
- fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R)
- fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R)
- fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR)
- fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR)
- fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R)
- fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R)
- fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R)
- fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R)
- fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R)
- fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R)
- fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR)
- fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR)
- fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR)
- fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR)
- fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R)
- fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R)
- fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R)
- fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R)
- fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR)
- fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR)
- fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R)
- fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R)
- fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R)
- fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R)
- fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R)
- fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R)
- fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR)
- fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR)
- fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR)
- fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR)
- fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R)
- fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R)
- fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R)
- fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R)
- fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR)
- fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR)
- fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R)
- fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R)
- fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R)
- fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R)
- fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R)
- fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R)
- fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR)
- fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR)
- fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR)
- fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR)
- fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R)
- fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R)
- fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R)
- fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R)
- fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR)
- fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR)
- fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R)
- fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R)
- fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R)
- fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R)
- fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R)
- fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R)
- fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR)
- fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR)
- fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR)
- fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR)
- fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R)
- fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R)
- fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R)
- fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R)
- fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR)
- fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR)
- fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R)
- fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R)
- fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R)
- fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R)
- fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R)
- fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R)
- fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR)
- fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR)
- fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR)
- fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR)
- fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R)
- fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R)
- fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R)
- fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R)
- fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR)
- fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR)
- fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R)
- fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R)
- fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R)
- fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R)
- fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R)
- fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R)
- fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR)
- fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR)
- fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR)
- fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR)
- fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R)
- fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R)
- fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R)
- fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R)
- fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR)
- fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR)
- fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R)
- fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R)
- fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R)
- fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R)
- fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R)
- fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R)
- fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR)
- fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR)
- fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR)
- fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR)
- fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R)
- fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R)
- fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R)
- fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R)
- fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR)
- fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR)
- fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R)
- fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R)
- fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R)
- fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R)
- fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R)
- fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R)
- fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR)
- fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR)
- fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR)
- fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR)
- fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R)
- fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R)
- fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R)
- fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R)
- fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR)
- fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR)
- fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R)
- fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R)
- fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R)
- fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R)
- fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R)
- fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R)
- fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR)
- fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR)
- fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR)
- fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR)
- fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R)
- fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R)
- fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R)
- fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R)
- fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR)
- fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR)
- fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R)
- fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R)
- fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R)
- fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R)
- fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R)
- fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R)
- fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR)
-
- sort.Sort(fastpathAslice(fastpathAV[:]))
-}
-
-// -- encode
-
-// -- -- fast path type switch
-func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
- switch v := iv.(type) {
-
- case []interface{}:
- fastpathTV.EncSliceIntfV(v, e)
- case *[]interface{}:
- fastpathTV.EncSliceIntfV(*v, e)
- case []string:
- fastpathTV.EncSliceStringV(v, e)
- case *[]string:
- fastpathTV.EncSliceStringV(*v, e)
- case []float32:
- fastpathTV.EncSliceFloat32V(v, e)
- case *[]float32:
- fastpathTV.EncSliceFloat32V(*v, e)
- case []float64:
- fastpathTV.EncSliceFloat64V(v, e)
- case *[]float64:
- fastpathTV.EncSliceFloat64V(*v, e)
- case []uint:
- fastpathTV.EncSliceUintV(v, e)
- case *[]uint:
- fastpathTV.EncSliceUintV(*v, e)
- case []uint16:
- fastpathTV.EncSliceUint16V(v, e)
- case *[]uint16:
- fastpathTV.EncSliceUint16V(*v, e)
- case []uint32:
- fastpathTV.EncSliceUint32V(v, e)
- case *[]uint32:
- fastpathTV.EncSliceUint32V(*v, e)
- case []uint64:
- fastpathTV.EncSliceUint64V(v, e)
- case *[]uint64:
- fastpathTV.EncSliceUint64V(*v, e)
- case []uintptr:
- fastpathTV.EncSliceUintptrV(v, e)
- case *[]uintptr:
- fastpathTV.EncSliceUintptrV(*v, e)
- case []int:
- fastpathTV.EncSliceIntV(v, e)
- case *[]int:
- fastpathTV.EncSliceIntV(*v, e)
- case []int8:
- fastpathTV.EncSliceInt8V(v, e)
- case *[]int8:
- fastpathTV.EncSliceInt8V(*v, e)
- case []int16:
- fastpathTV.EncSliceInt16V(v, e)
- case *[]int16:
- fastpathTV.EncSliceInt16V(*v, e)
- case []int32:
- fastpathTV.EncSliceInt32V(v, e)
- case *[]int32:
- fastpathTV.EncSliceInt32V(*v, e)
- case []int64:
- fastpathTV.EncSliceInt64V(v, e)
- case *[]int64:
- fastpathTV.EncSliceInt64V(*v, e)
- case []bool:
- fastpathTV.EncSliceBoolV(v, e)
- case *[]bool:
- fastpathTV.EncSliceBoolV(*v, e)
-
- case map[interface{}]interface{}:
- fastpathTV.EncMapIntfIntfV(v, e)
- case *map[interface{}]interface{}:
- fastpathTV.EncMapIntfIntfV(*v, e)
- case map[interface{}]string:
- fastpathTV.EncMapIntfStringV(v, e)
- case *map[interface{}]string:
- fastpathTV.EncMapIntfStringV(*v, e)
- case map[interface{}]uint:
- fastpathTV.EncMapIntfUintV(v, e)
- case *map[interface{}]uint:
- fastpathTV.EncMapIntfUintV(*v, e)
- case map[interface{}]uint8:
- fastpathTV.EncMapIntfUint8V(v, e)
- case *map[interface{}]uint8:
- fastpathTV.EncMapIntfUint8V(*v, e)
- case map[interface{}]uint16:
- fastpathTV.EncMapIntfUint16V(v, e)
- case *map[interface{}]uint16:
- fastpathTV.EncMapIntfUint16V(*v, e)
- case map[interface{}]uint32:
- fastpathTV.EncMapIntfUint32V(v, e)
- case *map[interface{}]uint32:
- fastpathTV.EncMapIntfUint32V(*v, e)
- case map[interface{}]uint64:
- fastpathTV.EncMapIntfUint64V(v, e)
- case *map[interface{}]uint64:
- fastpathTV.EncMapIntfUint64V(*v, e)
- case map[interface{}]uintptr:
- fastpathTV.EncMapIntfUintptrV(v, e)
- case *map[interface{}]uintptr:
- fastpathTV.EncMapIntfUintptrV(*v, e)
- case map[interface{}]int:
- fastpathTV.EncMapIntfIntV(v, e)
- case *map[interface{}]int:
- fastpathTV.EncMapIntfIntV(*v, e)
- case map[interface{}]int8:
- fastpathTV.EncMapIntfInt8V(v, e)
- case *map[interface{}]int8:
- fastpathTV.EncMapIntfInt8V(*v, e)
- case map[interface{}]int16:
- fastpathTV.EncMapIntfInt16V(v, e)
- case *map[interface{}]int16:
- fastpathTV.EncMapIntfInt16V(*v, e)
- case map[interface{}]int32:
- fastpathTV.EncMapIntfInt32V(v, e)
- case *map[interface{}]int32:
- fastpathTV.EncMapIntfInt32V(*v, e)
- case map[interface{}]int64:
- fastpathTV.EncMapIntfInt64V(v, e)
- case *map[interface{}]int64:
- fastpathTV.EncMapIntfInt64V(*v, e)
- case map[interface{}]float32:
- fastpathTV.EncMapIntfFloat32V(v, e)
- case *map[interface{}]float32:
- fastpathTV.EncMapIntfFloat32V(*v, e)
- case map[interface{}]float64:
- fastpathTV.EncMapIntfFloat64V(v, e)
- case *map[interface{}]float64:
- fastpathTV.EncMapIntfFloat64V(*v, e)
- case map[interface{}]bool:
- fastpathTV.EncMapIntfBoolV(v, e)
- case *map[interface{}]bool:
- fastpathTV.EncMapIntfBoolV(*v, e)
- case map[string]interface{}:
- fastpathTV.EncMapStringIntfV(v, e)
- case *map[string]interface{}:
- fastpathTV.EncMapStringIntfV(*v, e)
- case map[string]string:
- fastpathTV.EncMapStringStringV(v, e)
- case *map[string]string:
- fastpathTV.EncMapStringStringV(*v, e)
- case map[string]uint:
- fastpathTV.EncMapStringUintV(v, e)
- case *map[string]uint:
- fastpathTV.EncMapStringUintV(*v, e)
- case map[string]uint8:
- fastpathTV.EncMapStringUint8V(v, e)
- case *map[string]uint8:
- fastpathTV.EncMapStringUint8V(*v, e)
- case map[string]uint16:
- fastpathTV.EncMapStringUint16V(v, e)
- case *map[string]uint16:
- fastpathTV.EncMapStringUint16V(*v, e)
- case map[string]uint32:
- fastpathTV.EncMapStringUint32V(v, e)
- case *map[string]uint32:
- fastpathTV.EncMapStringUint32V(*v, e)
- case map[string]uint64:
- fastpathTV.EncMapStringUint64V(v, e)
- case *map[string]uint64:
- fastpathTV.EncMapStringUint64V(*v, e)
- case map[string]uintptr:
- fastpathTV.EncMapStringUintptrV(v, e)
- case *map[string]uintptr:
- fastpathTV.EncMapStringUintptrV(*v, e)
- case map[string]int:
- fastpathTV.EncMapStringIntV(v, e)
- case *map[string]int:
- fastpathTV.EncMapStringIntV(*v, e)
- case map[string]int8:
- fastpathTV.EncMapStringInt8V(v, e)
- case *map[string]int8:
- fastpathTV.EncMapStringInt8V(*v, e)
- case map[string]int16:
- fastpathTV.EncMapStringInt16V(v, e)
- case *map[string]int16:
- fastpathTV.EncMapStringInt16V(*v, e)
- case map[string]int32:
- fastpathTV.EncMapStringInt32V(v, e)
- case *map[string]int32:
- fastpathTV.EncMapStringInt32V(*v, e)
- case map[string]int64:
- fastpathTV.EncMapStringInt64V(v, e)
- case *map[string]int64:
- fastpathTV.EncMapStringInt64V(*v, e)
- case map[string]float32:
- fastpathTV.EncMapStringFloat32V(v, e)
- case *map[string]float32:
- fastpathTV.EncMapStringFloat32V(*v, e)
- case map[string]float64:
- fastpathTV.EncMapStringFloat64V(v, e)
- case *map[string]float64:
- fastpathTV.EncMapStringFloat64V(*v, e)
- case map[string]bool:
- fastpathTV.EncMapStringBoolV(v, e)
- case *map[string]bool:
- fastpathTV.EncMapStringBoolV(*v, e)
- case map[float32]interface{}:
- fastpathTV.EncMapFloat32IntfV(v, e)
- case *map[float32]interface{}:
- fastpathTV.EncMapFloat32IntfV(*v, e)
- case map[float32]string:
- fastpathTV.EncMapFloat32StringV(v, e)
- case *map[float32]string:
- fastpathTV.EncMapFloat32StringV(*v, e)
- case map[float32]uint:
- fastpathTV.EncMapFloat32UintV(v, e)
- case *map[float32]uint:
- fastpathTV.EncMapFloat32UintV(*v, e)
- case map[float32]uint8:
- fastpathTV.EncMapFloat32Uint8V(v, e)
- case *map[float32]uint8:
- fastpathTV.EncMapFloat32Uint8V(*v, e)
- case map[float32]uint16:
- fastpathTV.EncMapFloat32Uint16V(v, e)
- case *map[float32]uint16:
- fastpathTV.EncMapFloat32Uint16V(*v, e)
- case map[float32]uint32:
- fastpathTV.EncMapFloat32Uint32V(v, e)
- case *map[float32]uint32:
- fastpathTV.EncMapFloat32Uint32V(*v, e)
- case map[float32]uint64:
- fastpathTV.EncMapFloat32Uint64V(v, e)
- case *map[float32]uint64:
- fastpathTV.EncMapFloat32Uint64V(*v, e)
- case map[float32]uintptr:
- fastpathTV.EncMapFloat32UintptrV(v, e)
- case *map[float32]uintptr:
- fastpathTV.EncMapFloat32UintptrV(*v, e)
- case map[float32]int:
- fastpathTV.EncMapFloat32IntV(v, e)
- case *map[float32]int:
- fastpathTV.EncMapFloat32IntV(*v, e)
- case map[float32]int8:
- fastpathTV.EncMapFloat32Int8V(v, e)
- case *map[float32]int8:
- fastpathTV.EncMapFloat32Int8V(*v, e)
- case map[float32]int16:
- fastpathTV.EncMapFloat32Int16V(v, e)
- case *map[float32]int16:
- fastpathTV.EncMapFloat32Int16V(*v, e)
- case map[float32]int32:
- fastpathTV.EncMapFloat32Int32V(v, e)
- case *map[float32]int32:
- fastpathTV.EncMapFloat32Int32V(*v, e)
- case map[float32]int64:
- fastpathTV.EncMapFloat32Int64V(v, e)
- case *map[float32]int64:
- fastpathTV.EncMapFloat32Int64V(*v, e)
- case map[float32]float32:
- fastpathTV.EncMapFloat32Float32V(v, e)
- case *map[float32]float32:
- fastpathTV.EncMapFloat32Float32V(*v, e)
- case map[float32]float64:
- fastpathTV.EncMapFloat32Float64V(v, e)
- case *map[float32]float64:
- fastpathTV.EncMapFloat32Float64V(*v, e)
- case map[float32]bool:
- fastpathTV.EncMapFloat32BoolV(v, e)
- case *map[float32]bool:
- fastpathTV.EncMapFloat32BoolV(*v, e)
- case map[float64]interface{}:
- fastpathTV.EncMapFloat64IntfV(v, e)
- case *map[float64]interface{}:
- fastpathTV.EncMapFloat64IntfV(*v, e)
- case map[float64]string:
- fastpathTV.EncMapFloat64StringV(v, e)
- case *map[float64]string:
- fastpathTV.EncMapFloat64StringV(*v, e)
- case map[float64]uint:
- fastpathTV.EncMapFloat64UintV(v, e)
- case *map[float64]uint:
- fastpathTV.EncMapFloat64UintV(*v, e)
- case map[float64]uint8:
- fastpathTV.EncMapFloat64Uint8V(v, e)
- case *map[float64]uint8:
- fastpathTV.EncMapFloat64Uint8V(*v, e)
- case map[float64]uint16:
- fastpathTV.EncMapFloat64Uint16V(v, e)
- case *map[float64]uint16:
- fastpathTV.EncMapFloat64Uint16V(*v, e)
- case map[float64]uint32:
- fastpathTV.EncMapFloat64Uint32V(v, e)
- case *map[float64]uint32:
- fastpathTV.EncMapFloat64Uint32V(*v, e)
- case map[float64]uint64:
- fastpathTV.EncMapFloat64Uint64V(v, e)
- case *map[float64]uint64:
- fastpathTV.EncMapFloat64Uint64V(*v, e)
- case map[float64]uintptr:
- fastpathTV.EncMapFloat64UintptrV(v, e)
- case *map[float64]uintptr:
- fastpathTV.EncMapFloat64UintptrV(*v, e)
- case map[float64]int:
- fastpathTV.EncMapFloat64IntV(v, e)
- case *map[float64]int:
- fastpathTV.EncMapFloat64IntV(*v, e)
- case map[float64]int8:
- fastpathTV.EncMapFloat64Int8V(v, e)
- case *map[float64]int8:
- fastpathTV.EncMapFloat64Int8V(*v, e)
- case map[float64]int16:
- fastpathTV.EncMapFloat64Int16V(v, e)
- case *map[float64]int16:
- fastpathTV.EncMapFloat64Int16V(*v, e)
- case map[float64]int32:
- fastpathTV.EncMapFloat64Int32V(v, e)
- case *map[float64]int32:
- fastpathTV.EncMapFloat64Int32V(*v, e)
- case map[float64]int64:
- fastpathTV.EncMapFloat64Int64V(v, e)
- case *map[float64]int64:
- fastpathTV.EncMapFloat64Int64V(*v, e)
- case map[float64]float32:
- fastpathTV.EncMapFloat64Float32V(v, e)
- case *map[float64]float32:
- fastpathTV.EncMapFloat64Float32V(*v, e)
- case map[float64]float64:
- fastpathTV.EncMapFloat64Float64V(v, e)
- case *map[float64]float64:
- fastpathTV.EncMapFloat64Float64V(*v, e)
- case map[float64]bool:
- fastpathTV.EncMapFloat64BoolV(v, e)
- case *map[float64]bool:
- fastpathTV.EncMapFloat64BoolV(*v, e)
- case map[uint]interface{}:
- fastpathTV.EncMapUintIntfV(v, e)
- case *map[uint]interface{}:
- fastpathTV.EncMapUintIntfV(*v, e)
- case map[uint]string:
- fastpathTV.EncMapUintStringV(v, e)
- case *map[uint]string:
- fastpathTV.EncMapUintStringV(*v, e)
- case map[uint]uint:
- fastpathTV.EncMapUintUintV(v, e)
- case *map[uint]uint:
- fastpathTV.EncMapUintUintV(*v, e)
- case map[uint]uint8:
- fastpathTV.EncMapUintUint8V(v, e)
- case *map[uint]uint8:
- fastpathTV.EncMapUintUint8V(*v, e)
- case map[uint]uint16:
- fastpathTV.EncMapUintUint16V(v, e)
- case *map[uint]uint16:
- fastpathTV.EncMapUintUint16V(*v, e)
- case map[uint]uint32:
- fastpathTV.EncMapUintUint32V(v, e)
- case *map[uint]uint32:
- fastpathTV.EncMapUintUint32V(*v, e)
- case map[uint]uint64:
- fastpathTV.EncMapUintUint64V(v, e)
- case *map[uint]uint64:
- fastpathTV.EncMapUintUint64V(*v, e)
- case map[uint]uintptr:
- fastpathTV.EncMapUintUintptrV(v, e)
- case *map[uint]uintptr:
- fastpathTV.EncMapUintUintptrV(*v, e)
- case map[uint]int:
- fastpathTV.EncMapUintIntV(v, e)
- case *map[uint]int:
- fastpathTV.EncMapUintIntV(*v, e)
- case map[uint]int8:
- fastpathTV.EncMapUintInt8V(v, e)
- case *map[uint]int8:
- fastpathTV.EncMapUintInt8V(*v, e)
- case map[uint]int16:
- fastpathTV.EncMapUintInt16V(v, e)
- case *map[uint]int16:
- fastpathTV.EncMapUintInt16V(*v, e)
- case map[uint]int32:
- fastpathTV.EncMapUintInt32V(v, e)
- case *map[uint]int32:
- fastpathTV.EncMapUintInt32V(*v, e)
- case map[uint]int64:
- fastpathTV.EncMapUintInt64V(v, e)
- case *map[uint]int64:
- fastpathTV.EncMapUintInt64V(*v, e)
- case map[uint]float32:
- fastpathTV.EncMapUintFloat32V(v, e)
- case *map[uint]float32:
- fastpathTV.EncMapUintFloat32V(*v, e)
- case map[uint]float64:
- fastpathTV.EncMapUintFloat64V(v, e)
- case *map[uint]float64:
- fastpathTV.EncMapUintFloat64V(*v, e)
- case map[uint]bool:
- fastpathTV.EncMapUintBoolV(v, e)
- case *map[uint]bool:
- fastpathTV.EncMapUintBoolV(*v, e)
- case map[uint8]interface{}:
- fastpathTV.EncMapUint8IntfV(v, e)
- case *map[uint8]interface{}:
- fastpathTV.EncMapUint8IntfV(*v, e)
- case map[uint8]string:
- fastpathTV.EncMapUint8StringV(v, e)
- case *map[uint8]string:
- fastpathTV.EncMapUint8StringV(*v, e)
- case map[uint8]uint:
- fastpathTV.EncMapUint8UintV(v, e)
- case *map[uint8]uint:
- fastpathTV.EncMapUint8UintV(*v, e)
- case map[uint8]uint8:
- fastpathTV.EncMapUint8Uint8V(v, e)
- case *map[uint8]uint8:
- fastpathTV.EncMapUint8Uint8V(*v, e)
- case map[uint8]uint16:
- fastpathTV.EncMapUint8Uint16V(v, e)
- case *map[uint8]uint16:
- fastpathTV.EncMapUint8Uint16V(*v, e)
- case map[uint8]uint32:
- fastpathTV.EncMapUint8Uint32V(v, e)
- case *map[uint8]uint32:
- fastpathTV.EncMapUint8Uint32V(*v, e)
- case map[uint8]uint64:
- fastpathTV.EncMapUint8Uint64V(v, e)
- case *map[uint8]uint64:
- fastpathTV.EncMapUint8Uint64V(*v, e)
- case map[uint8]uintptr:
- fastpathTV.EncMapUint8UintptrV(v, e)
- case *map[uint8]uintptr:
- fastpathTV.EncMapUint8UintptrV(*v, e)
- case map[uint8]int:
- fastpathTV.EncMapUint8IntV(v, e)
- case *map[uint8]int:
- fastpathTV.EncMapUint8IntV(*v, e)
- case map[uint8]int8:
- fastpathTV.EncMapUint8Int8V(v, e)
- case *map[uint8]int8:
- fastpathTV.EncMapUint8Int8V(*v, e)
- case map[uint8]int16:
- fastpathTV.EncMapUint8Int16V(v, e)
- case *map[uint8]int16:
- fastpathTV.EncMapUint8Int16V(*v, e)
- case map[uint8]int32:
- fastpathTV.EncMapUint8Int32V(v, e)
- case *map[uint8]int32:
- fastpathTV.EncMapUint8Int32V(*v, e)
- case map[uint8]int64:
- fastpathTV.EncMapUint8Int64V(v, e)
- case *map[uint8]int64:
- fastpathTV.EncMapUint8Int64V(*v, e)
- case map[uint8]float32:
- fastpathTV.EncMapUint8Float32V(v, e)
- case *map[uint8]float32:
- fastpathTV.EncMapUint8Float32V(*v, e)
- case map[uint8]float64:
- fastpathTV.EncMapUint8Float64V(v, e)
- case *map[uint8]float64:
- fastpathTV.EncMapUint8Float64V(*v, e)
- case map[uint8]bool:
- fastpathTV.EncMapUint8BoolV(v, e)
- case *map[uint8]bool:
- fastpathTV.EncMapUint8BoolV(*v, e)
- case map[uint16]interface{}:
- fastpathTV.EncMapUint16IntfV(v, e)
- case *map[uint16]interface{}:
- fastpathTV.EncMapUint16IntfV(*v, e)
- case map[uint16]string:
- fastpathTV.EncMapUint16StringV(v, e)
- case *map[uint16]string:
- fastpathTV.EncMapUint16StringV(*v, e)
- case map[uint16]uint:
- fastpathTV.EncMapUint16UintV(v, e)
- case *map[uint16]uint:
- fastpathTV.EncMapUint16UintV(*v, e)
- case map[uint16]uint8:
- fastpathTV.EncMapUint16Uint8V(v, e)
- case *map[uint16]uint8:
- fastpathTV.EncMapUint16Uint8V(*v, e)
- case map[uint16]uint16:
- fastpathTV.EncMapUint16Uint16V(v, e)
- case *map[uint16]uint16:
- fastpathTV.EncMapUint16Uint16V(*v, e)
- case map[uint16]uint32:
- fastpathTV.EncMapUint16Uint32V(v, e)
- case *map[uint16]uint32:
- fastpathTV.EncMapUint16Uint32V(*v, e)
- case map[uint16]uint64:
- fastpathTV.EncMapUint16Uint64V(v, e)
- case *map[uint16]uint64:
- fastpathTV.EncMapUint16Uint64V(*v, e)
- case map[uint16]uintptr:
- fastpathTV.EncMapUint16UintptrV(v, e)
- case *map[uint16]uintptr:
- fastpathTV.EncMapUint16UintptrV(*v, e)
- case map[uint16]int:
- fastpathTV.EncMapUint16IntV(v, e)
- case *map[uint16]int:
- fastpathTV.EncMapUint16IntV(*v, e)
- case map[uint16]int8:
- fastpathTV.EncMapUint16Int8V(v, e)
- case *map[uint16]int8:
- fastpathTV.EncMapUint16Int8V(*v, e)
- case map[uint16]int16:
- fastpathTV.EncMapUint16Int16V(v, e)
- case *map[uint16]int16:
- fastpathTV.EncMapUint16Int16V(*v, e)
- case map[uint16]int32:
- fastpathTV.EncMapUint16Int32V(v, e)
- case *map[uint16]int32:
- fastpathTV.EncMapUint16Int32V(*v, e)
- case map[uint16]int64:
- fastpathTV.EncMapUint16Int64V(v, e)
- case *map[uint16]int64:
- fastpathTV.EncMapUint16Int64V(*v, e)
- case map[uint16]float32:
- fastpathTV.EncMapUint16Float32V(v, e)
- case *map[uint16]float32:
- fastpathTV.EncMapUint16Float32V(*v, e)
- case map[uint16]float64:
- fastpathTV.EncMapUint16Float64V(v, e)
- case *map[uint16]float64:
- fastpathTV.EncMapUint16Float64V(*v, e)
- case map[uint16]bool:
- fastpathTV.EncMapUint16BoolV(v, e)
- case *map[uint16]bool:
- fastpathTV.EncMapUint16BoolV(*v, e)
- case map[uint32]interface{}:
- fastpathTV.EncMapUint32IntfV(v, e)
- case *map[uint32]interface{}:
- fastpathTV.EncMapUint32IntfV(*v, e)
- case map[uint32]string:
- fastpathTV.EncMapUint32StringV(v, e)
- case *map[uint32]string:
- fastpathTV.EncMapUint32StringV(*v, e)
- case map[uint32]uint:
- fastpathTV.EncMapUint32UintV(v, e)
- case *map[uint32]uint:
- fastpathTV.EncMapUint32UintV(*v, e)
- case map[uint32]uint8:
- fastpathTV.EncMapUint32Uint8V(v, e)
- case *map[uint32]uint8:
- fastpathTV.EncMapUint32Uint8V(*v, e)
- case map[uint32]uint16:
- fastpathTV.EncMapUint32Uint16V(v, e)
- case *map[uint32]uint16:
- fastpathTV.EncMapUint32Uint16V(*v, e)
- case map[uint32]uint32:
- fastpathTV.EncMapUint32Uint32V(v, e)
- case *map[uint32]uint32:
- fastpathTV.EncMapUint32Uint32V(*v, e)
- case map[uint32]uint64:
- fastpathTV.EncMapUint32Uint64V(v, e)
- case *map[uint32]uint64:
- fastpathTV.EncMapUint32Uint64V(*v, e)
- case map[uint32]uintptr:
- fastpathTV.EncMapUint32UintptrV(v, e)
- case *map[uint32]uintptr:
- fastpathTV.EncMapUint32UintptrV(*v, e)
- case map[uint32]int:
- fastpathTV.EncMapUint32IntV(v, e)
- case *map[uint32]int:
- fastpathTV.EncMapUint32IntV(*v, e)
- case map[uint32]int8:
- fastpathTV.EncMapUint32Int8V(v, e)
- case *map[uint32]int8:
- fastpathTV.EncMapUint32Int8V(*v, e)
- case map[uint32]int16:
- fastpathTV.EncMapUint32Int16V(v, e)
- case *map[uint32]int16:
- fastpathTV.EncMapUint32Int16V(*v, e)
- case map[uint32]int32:
- fastpathTV.EncMapUint32Int32V(v, e)
- case *map[uint32]int32:
- fastpathTV.EncMapUint32Int32V(*v, e)
- case map[uint32]int64:
- fastpathTV.EncMapUint32Int64V(v, e)
- case *map[uint32]int64:
- fastpathTV.EncMapUint32Int64V(*v, e)
- case map[uint32]float32:
- fastpathTV.EncMapUint32Float32V(v, e)
- case *map[uint32]float32:
- fastpathTV.EncMapUint32Float32V(*v, e)
- case map[uint32]float64:
- fastpathTV.EncMapUint32Float64V(v, e)
- case *map[uint32]float64:
- fastpathTV.EncMapUint32Float64V(*v, e)
- case map[uint32]bool:
- fastpathTV.EncMapUint32BoolV(v, e)
- case *map[uint32]bool:
- fastpathTV.EncMapUint32BoolV(*v, e)
- case map[uint64]interface{}:
- fastpathTV.EncMapUint64IntfV(v, e)
- case *map[uint64]interface{}:
- fastpathTV.EncMapUint64IntfV(*v, e)
- case map[uint64]string:
- fastpathTV.EncMapUint64StringV(v, e)
- case *map[uint64]string:
- fastpathTV.EncMapUint64StringV(*v, e)
- case map[uint64]uint:
- fastpathTV.EncMapUint64UintV(v, e)
- case *map[uint64]uint:
- fastpathTV.EncMapUint64UintV(*v, e)
- case map[uint64]uint8:
- fastpathTV.EncMapUint64Uint8V(v, e)
- case *map[uint64]uint8:
- fastpathTV.EncMapUint64Uint8V(*v, e)
- case map[uint64]uint16:
- fastpathTV.EncMapUint64Uint16V(v, e)
- case *map[uint64]uint16:
- fastpathTV.EncMapUint64Uint16V(*v, e)
- case map[uint64]uint32:
- fastpathTV.EncMapUint64Uint32V(v, e)
- case *map[uint64]uint32:
- fastpathTV.EncMapUint64Uint32V(*v, e)
- case map[uint64]uint64:
- fastpathTV.EncMapUint64Uint64V(v, e)
- case *map[uint64]uint64:
- fastpathTV.EncMapUint64Uint64V(*v, e)
- case map[uint64]uintptr:
- fastpathTV.EncMapUint64UintptrV(v, e)
- case *map[uint64]uintptr:
- fastpathTV.EncMapUint64UintptrV(*v, e)
- case map[uint64]int:
- fastpathTV.EncMapUint64IntV(v, e)
- case *map[uint64]int:
- fastpathTV.EncMapUint64IntV(*v, e)
- case map[uint64]int8:
- fastpathTV.EncMapUint64Int8V(v, e)
- case *map[uint64]int8:
- fastpathTV.EncMapUint64Int8V(*v, e)
- case map[uint64]int16:
- fastpathTV.EncMapUint64Int16V(v, e)
- case *map[uint64]int16:
- fastpathTV.EncMapUint64Int16V(*v, e)
- case map[uint64]int32:
- fastpathTV.EncMapUint64Int32V(v, e)
- case *map[uint64]int32:
- fastpathTV.EncMapUint64Int32V(*v, e)
- case map[uint64]int64:
- fastpathTV.EncMapUint64Int64V(v, e)
- case *map[uint64]int64:
- fastpathTV.EncMapUint64Int64V(*v, e)
- case map[uint64]float32:
- fastpathTV.EncMapUint64Float32V(v, e)
- case *map[uint64]float32:
- fastpathTV.EncMapUint64Float32V(*v, e)
- case map[uint64]float64:
- fastpathTV.EncMapUint64Float64V(v, e)
- case *map[uint64]float64:
- fastpathTV.EncMapUint64Float64V(*v, e)
- case map[uint64]bool:
- fastpathTV.EncMapUint64BoolV(v, e)
- case *map[uint64]bool:
- fastpathTV.EncMapUint64BoolV(*v, e)
- case map[uintptr]interface{}:
- fastpathTV.EncMapUintptrIntfV(v, e)
- case *map[uintptr]interface{}:
- fastpathTV.EncMapUintptrIntfV(*v, e)
- case map[uintptr]string:
- fastpathTV.EncMapUintptrStringV(v, e)
- case *map[uintptr]string:
- fastpathTV.EncMapUintptrStringV(*v, e)
- case map[uintptr]uint:
- fastpathTV.EncMapUintptrUintV(v, e)
- case *map[uintptr]uint:
- fastpathTV.EncMapUintptrUintV(*v, e)
- case map[uintptr]uint8:
- fastpathTV.EncMapUintptrUint8V(v, e)
- case *map[uintptr]uint8:
- fastpathTV.EncMapUintptrUint8V(*v, e)
- case map[uintptr]uint16:
- fastpathTV.EncMapUintptrUint16V(v, e)
- case *map[uintptr]uint16:
- fastpathTV.EncMapUintptrUint16V(*v, e)
- case map[uintptr]uint32:
- fastpathTV.EncMapUintptrUint32V(v, e)
- case *map[uintptr]uint32:
- fastpathTV.EncMapUintptrUint32V(*v, e)
- case map[uintptr]uint64:
- fastpathTV.EncMapUintptrUint64V(v, e)
- case *map[uintptr]uint64:
- fastpathTV.EncMapUintptrUint64V(*v, e)
- case map[uintptr]uintptr:
- fastpathTV.EncMapUintptrUintptrV(v, e)
- case *map[uintptr]uintptr:
- fastpathTV.EncMapUintptrUintptrV(*v, e)
- case map[uintptr]int:
- fastpathTV.EncMapUintptrIntV(v, e)
- case *map[uintptr]int:
- fastpathTV.EncMapUintptrIntV(*v, e)
- case map[uintptr]int8:
- fastpathTV.EncMapUintptrInt8V(v, e)
- case *map[uintptr]int8:
- fastpathTV.EncMapUintptrInt8V(*v, e)
- case map[uintptr]int16:
- fastpathTV.EncMapUintptrInt16V(v, e)
- case *map[uintptr]int16:
- fastpathTV.EncMapUintptrInt16V(*v, e)
- case map[uintptr]int32:
- fastpathTV.EncMapUintptrInt32V(v, e)
- case *map[uintptr]int32:
- fastpathTV.EncMapUintptrInt32V(*v, e)
- case map[uintptr]int64:
- fastpathTV.EncMapUintptrInt64V(v, e)
- case *map[uintptr]int64:
- fastpathTV.EncMapUintptrInt64V(*v, e)
- case map[uintptr]float32:
- fastpathTV.EncMapUintptrFloat32V(v, e)
- case *map[uintptr]float32:
- fastpathTV.EncMapUintptrFloat32V(*v, e)
- case map[uintptr]float64:
- fastpathTV.EncMapUintptrFloat64V(v, e)
- case *map[uintptr]float64:
- fastpathTV.EncMapUintptrFloat64V(*v, e)
- case map[uintptr]bool:
- fastpathTV.EncMapUintptrBoolV(v, e)
- case *map[uintptr]bool:
- fastpathTV.EncMapUintptrBoolV(*v, e)
- case map[int]interface{}:
- fastpathTV.EncMapIntIntfV(v, e)
- case *map[int]interface{}:
- fastpathTV.EncMapIntIntfV(*v, e)
- case map[int]string:
- fastpathTV.EncMapIntStringV(v, e)
- case *map[int]string:
- fastpathTV.EncMapIntStringV(*v, e)
- case map[int]uint:
- fastpathTV.EncMapIntUintV(v, e)
- case *map[int]uint:
- fastpathTV.EncMapIntUintV(*v, e)
- case map[int]uint8:
- fastpathTV.EncMapIntUint8V(v, e)
- case *map[int]uint8:
- fastpathTV.EncMapIntUint8V(*v, e)
- case map[int]uint16:
- fastpathTV.EncMapIntUint16V(v, e)
- case *map[int]uint16:
- fastpathTV.EncMapIntUint16V(*v, e)
- case map[int]uint32:
- fastpathTV.EncMapIntUint32V(v, e)
- case *map[int]uint32:
- fastpathTV.EncMapIntUint32V(*v, e)
- case map[int]uint64:
- fastpathTV.EncMapIntUint64V(v, e)
- case *map[int]uint64:
- fastpathTV.EncMapIntUint64V(*v, e)
- case map[int]uintptr:
- fastpathTV.EncMapIntUintptrV(v, e)
- case *map[int]uintptr:
- fastpathTV.EncMapIntUintptrV(*v, e)
- case map[int]int:
- fastpathTV.EncMapIntIntV(v, e)
- case *map[int]int:
- fastpathTV.EncMapIntIntV(*v, e)
- case map[int]int8:
- fastpathTV.EncMapIntInt8V(v, e)
- case *map[int]int8:
- fastpathTV.EncMapIntInt8V(*v, e)
- case map[int]int16:
- fastpathTV.EncMapIntInt16V(v, e)
- case *map[int]int16:
- fastpathTV.EncMapIntInt16V(*v, e)
- case map[int]int32:
- fastpathTV.EncMapIntInt32V(v, e)
- case *map[int]int32:
- fastpathTV.EncMapIntInt32V(*v, e)
- case map[int]int64:
- fastpathTV.EncMapIntInt64V(v, e)
- case *map[int]int64:
- fastpathTV.EncMapIntInt64V(*v, e)
- case map[int]float32:
- fastpathTV.EncMapIntFloat32V(v, e)
- case *map[int]float32:
- fastpathTV.EncMapIntFloat32V(*v, e)
- case map[int]float64:
- fastpathTV.EncMapIntFloat64V(v, e)
- case *map[int]float64:
- fastpathTV.EncMapIntFloat64V(*v, e)
- case map[int]bool:
- fastpathTV.EncMapIntBoolV(v, e)
- case *map[int]bool:
- fastpathTV.EncMapIntBoolV(*v, e)
- case map[int8]interface{}:
- fastpathTV.EncMapInt8IntfV(v, e)
- case *map[int8]interface{}:
- fastpathTV.EncMapInt8IntfV(*v, e)
- case map[int8]string:
- fastpathTV.EncMapInt8StringV(v, e)
- case *map[int8]string:
- fastpathTV.EncMapInt8StringV(*v, e)
- case map[int8]uint:
- fastpathTV.EncMapInt8UintV(v, e)
- case *map[int8]uint:
- fastpathTV.EncMapInt8UintV(*v, e)
- case map[int8]uint8:
- fastpathTV.EncMapInt8Uint8V(v, e)
- case *map[int8]uint8:
- fastpathTV.EncMapInt8Uint8V(*v, e)
- case map[int8]uint16:
- fastpathTV.EncMapInt8Uint16V(v, e)
- case *map[int8]uint16:
- fastpathTV.EncMapInt8Uint16V(*v, e)
- case map[int8]uint32:
- fastpathTV.EncMapInt8Uint32V(v, e)
- case *map[int8]uint32:
- fastpathTV.EncMapInt8Uint32V(*v, e)
- case map[int8]uint64:
- fastpathTV.EncMapInt8Uint64V(v, e)
- case *map[int8]uint64:
- fastpathTV.EncMapInt8Uint64V(*v, e)
- case map[int8]uintptr:
- fastpathTV.EncMapInt8UintptrV(v, e)
- case *map[int8]uintptr:
- fastpathTV.EncMapInt8UintptrV(*v, e)
- case map[int8]int:
- fastpathTV.EncMapInt8IntV(v, e)
- case *map[int8]int:
- fastpathTV.EncMapInt8IntV(*v, e)
- case map[int8]int8:
- fastpathTV.EncMapInt8Int8V(v, e)
- case *map[int8]int8:
- fastpathTV.EncMapInt8Int8V(*v, e)
- case map[int8]int16:
- fastpathTV.EncMapInt8Int16V(v, e)
- case *map[int8]int16:
- fastpathTV.EncMapInt8Int16V(*v, e)
- case map[int8]int32:
- fastpathTV.EncMapInt8Int32V(v, e)
- case *map[int8]int32:
- fastpathTV.EncMapInt8Int32V(*v, e)
- case map[int8]int64:
- fastpathTV.EncMapInt8Int64V(v, e)
- case *map[int8]int64:
- fastpathTV.EncMapInt8Int64V(*v, e)
- case map[int8]float32:
- fastpathTV.EncMapInt8Float32V(v, e)
- case *map[int8]float32:
- fastpathTV.EncMapInt8Float32V(*v, e)
- case map[int8]float64:
- fastpathTV.EncMapInt8Float64V(v, e)
- case *map[int8]float64:
- fastpathTV.EncMapInt8Float64V(*v, e)
- case map[int8]bool:
- fastpathTV.EncMapInt8BoolV(v, e)
- case *map[int8]bool:
- fastpathTV.EncMapInt8BoolV(*v, e)
- case map[int16]interface{}:
- fastpathTV.EncMapInt16IntfV(v, e)
- case *map[int16]interface{}:
- fastpathTV.EncMapInt16IntfV(*v, e)
- case map[int16]string:
- fastpathTV.EncMapInt16StringV(v, e)
- case *map[int16]string:
- fastpathTV.EncMapInt16StringV(*v, e)
- case map[int16]uint:
- fastpathTV.EncMapInt16UintV(v, e)
- case *map[int16]uint:
- fastpathTV.EncMapInt16UintV(*v, e)
- case map[int16]uint8:
- fastpathTV.EncMapInt16Uint8V(v, e)
- case *map[int16]uint8:
- fastpathTV.EncMapInt16Uint8V(*v, e)
- case map[int16]uint16:
- fastpathTV.EncMapInt16Uint16V(v, e)
- case *map[int16]uint16:
- fastpathTV.EncMapInt16Uint16V(*v, e)
- case map[int16]uint32:
- fastpathTV.EncMapInt16Uint32V(v, e)
- case *map[int16]uint32:
- fastpathTV.EncMapInt16Uint32V(*v, e)
- case map[int16]uint64:
- fastpathTV.EncMapInt16Uint64V(v, e)
- case *map[int16]uint64:
- fastpathTV.EncMapInt16Uint64V(*v, e)
- case map[int16]uintptr:
- fastpathTV.EncMapInt16UintptrV(v, e)
- case *map[int16]uintptr:
- fastpathTV.EncMapInt16UintptrV(*v, e)
- case map[int16]int:
- fastpathTV.EncMapInt16IntV(v, e)
- case *map[int16]int:
- fastpathTV.EncMapInt16IntV(*v, e)
- case map[int16]int8:
- fastpathTV.EncMapInt16Int8V(v, e)
- case *map[int16]int8:
- fastpathTV.EncMapInt16Int8V(*v, e)
- case map[int16]int16:
- fastpathTV.EncMapInt16Int16V(v, e)
- case *map[int16]int16:
- fastpathTV.EncMapInt16Int16V(*v, e)
- case map[int16]int32:
- fastpathTV.EncMapInt16Int32V(v, e)
- case *map[int16]int32:
- fastpathTV.EncMapInt16Int32V(*v, e)
- case map[int16]int64:
- fastpathTV.EncMapInt16Int64V(v, e)
- case *map[int16]int64:
- fastpathTV.EncMapInt16Int64V(*v, e)
- case map[int16]float32:
- fastpathTV.EncMapInt16Float32V(v, e)
- case *map[int16]float32:
- fastpathTV.EncMapInt16Float32V(*v, e)
- case map[int16]float64:
- fastpathTV.EncMapInt16Float64V(v, e)
- case *map[int16]float64:
- fastpathTV.EncMapInt16Float64V(*v, e)
- case map[int16]bool:
- fastpathTV.EncMapInt16BoolV(v, e)
- case *map[int16]bool:
- fastpathTV.EncMapInt16BoolV(*v, e)
- case map[int32]interface{}:
- fastpathTV.EncMapInt32IntfV(v, e)
- case *map[int32]interface{}:
- fastpathTV.EncMapInt32IntfV(*v, e)
- case map[int32]string:
- fastpathTV.EncMapInt32StringV(v, e)
- case *map[int32]string:
- fastpathTV.EncMapInt32StringV(*v, e)
- case map[int32]uint:
- fastpathTV.EncMapInt32UintV(v, e)
- case *map[int32]uint:
- fastpathTV.EncMapInt32UintV(*v, e)
- case map[int32]uint8:
- fastpathTV.EncMapInt32Uint8V(v, e)
- case *map[int32]uint8:
- fastpathTV.EncMapInt32Uint8V(*v, e)
- case map[int32]uint16:
- fastpathTV.EncMapInt32Uint16V(v, e)
- case *map[int32]uint16:
- fastpathTV.EncMapInt32Uint16V(*v, e)
- case map[int32]uint32:
- fastpathTV.EncMapInt32Uint32V(v, e)
- case *map[int32]uint32:
- fastpathTV.EncMapInt32Uint32V(*v, e)
- case map[int32]uint64:
- fastpathTV.EncMapInt32Uint64V(v, e)
- case *map[int32]uint64:
- fastpathTV.EncMapInt32Uint64V(*v, e)
- case map[int32]uintptr:
- fastpathTV.EncMapInt32UintptrV(v, e)
- case *map[int32]uintptr:
- fastpathTV.EncMapInt32UintptrV(*v, e)
- case map[int32]int:
- fastpathTV.EncMapInt32IntV(v, e)
- case *map[int32]int:
- fastpathTV.EncMapInt32IntV(*v, e)
- case map[int32]int8:
- fastpathTV.EncMapInt32Int8V(v, e)
- case *map[int32]int8:
- fastpathTV.EncMapInt32Int8V(*v, e)
- case map[int32]int16:
- fastpathTV.EncMapInt32Int16V(v, e)
- case *map[int32]int16:
- fastpathTV.EncMapInt32Int16V(*v, e)
- case map[int32]int32:
- fastpathTV.EncMapInt32Int32V(v, e)
- case *map[int32]int32:
- fastpathTV.EncMapInt32Int32V(*v, e)
- case map[int32]int64:
- fastpathTV.EncMapInt32Int64V(v, e)
- case *map[int32]int64:
- fastpathTV.EncMapInt32Int64V(*v, e)
- case map[int32]float32:
- fastpathTV.EncMapInt32Float32V(v, e)
- case *map[int32]float32:
- fastpathTV.EncMapInt32Float32V(*v, e)
- case map[int32]float64:
- fastpathTV.EncMapInt32Float64V(v, e)
- case *map[int32]float64:
- fastpathTV.EncMapInt32Float64V(*v, e)
- case map[int32]bool:
- fastpathTV.EncMapInt32BoolV(v, e)
- case *map[int32]bool:
- fastpathTV.EncMapInt32BoolV(*v, e)
- case map[int64]interface{}:
- fastpathTV.EncMapInt64IntfV(v, e)
- case *map[int64]interface{}:
- fastpathTV.EncMapInt64IntfV(*v, e)
- case map[int64]string:
- fastpathTV.EncMapInt64StringV(v, e)
- case *map[int64]string:
- fastpathTV.EncMapInt64StringV(*v, e)
- case map[int64]uint:
- fastpathTV.EncMapInt64UintV(v, e)
- case *map[int64]uint:
- fastpathTV.EncMapInt64UintV(*v, e)
- case map[int64]uint8:
- fastpathTV.EncMapInt64Uint8V(v, e)
- case *map[int64]uint8:
- fastpathTV.EncMapInt64Uint8V(*v, e)
- case map[int64]uint16:
- fastpathTV.EncMapInt64Uint16V(v, e)
- case *map[int64]uint16:
- fastpathTV.EncMapInt64Uint16V(*v, e)
- case map[int64]uint32:
- fastpathTV.EncMapInt64Uint32V(v, e)
- case *map[int64]uint32:
- fastpathTV.EncMapInt64Uint32V(*v, e)
- case map[int64]uint64:
- fastpathTV.EncMapInt64Uint64V(v, e)
- case *map[int64]uint64:
- fastpathTV.EncMapInt64Uint64V(*v, e)
- case map[int64]uintptr:
- fastpathTV.EncMapInt64UintptrV(v, e)
- case *map[int64]uintptr:
- fastpathTV.EncMapInt64UintptrV(*v, e)
- case map[int64]int:
- fastpathTV.EncMapInt64IntV(v, e)
- case *map[int64]int:
- fastpathTV.EncMapInt64IntV(*v, e)
- case map[int64]int8:
- fastpathTV.EncMapInt64Int8V(v, e)
- case *map[int64]int8:
- fastpathTV.EncMapInt64Int8V(*v, e)
- case map[int64]int16:
- fastpathTV.EncMapInt64Int16V(v, e)
- case *map[int64]int16:
- fastpathTV.EncMapInt64Int16V(*v, e)
- case map[int64]int32:
- fastpathTV.EncMapInt64Int32V(v, e)
- case *map[int64]int32:
- fastpathTV.EncMapInt64Int32V(*v, e)
- case map[int64]int64:
- fastpathTV.EncMapInt64Int64V(v, e)
- case *map[int64]int64:
- fastpathTV.EncMapInt64Int64V(*v, e)
- case map[int64]float32:
- fastpathTV.EncMapInt64Float32V(v, e)
- case *map[int64]float32:
- fastpathTV.EncMapInt64Float32V(*v, e)
- case map[int64]float64:
- fastpathTV.EncMapInt64Float64V(v, e)
- case *map[int64]float64:
- fastpathTV.EncMapInt64Float64V(*v, e)
- case map[int64]bool:
- fastpathTV.EncMapInt64BoolV(v, e)
- case *map[int64]bool:
- fastpathTV.EncMapInt64BoolV(*v, e)
- case map[bool]interface{}:
- fastpathTV.EncMapBoolIntfV(v, e)
- case *map[bool]interface{}:
- fastpathTV.EncMapBoolIntfV(*v, e)
- case map[bool]string:
- fastpathTV.EncMapBoolStringV(v, e)
- case *map[bool]string:
- fastpathTV.EncMapBoolStringV(*v, e)
- case map[bool]uint:
- fastpathTV.EncMapBoolUintV(v, e)
- case *map[bool]uint:
- fastpathTV.EncMapBoolUintV(*v, e)
- case map[bool]uint8:
- fastpathTV.EncMapBoolUint8V(v, e)
- case *map[bool]uint8:
- fastpathTV.EncMapBoolUint8V(*v, e)
- case map[bool]uint16:
- fastpathTV.EncMapBoolUint16V(v, e)
- case *map[bool]uint16:
- fastpathTV.EncMapBoolUint16V(*v, e)
- case map[bool]uint32:
- fastpathTV.EncMapBoolUint32V(v, e)
- case *map[bool]uint32:
- fastpathTV.EncMapBoolUint32V(*v, e)
- case map[bool]uint64:
- fastpathTV.EncMapBoolUint64V(v, e)
- case *map[bool]uint64:
- fastpathTV.EncMapBoolUint64V(*v, e)
- case map[bool]uintptr:
- fastpathTV.EncMapBoolUintptrV(v, e)
- case *map[bool]uintptr:
- fastpathTV.EncMapBoolUintptrV(*v, e)
- case map[bool]int:
- fastpathTV.EncMapBoolIntV(v, e)
- case *map[bool]int:
- fastpathTV.EncMapBoolIntV(*v, e)
- case map[bool]int8:
- fastpathTV.EncMapBoolInt8V(v, e)
- case *map[bool]int8:
- fastpathTV.EncMapBoolInt8V(*v, e)
- case map[bool]int16:
- fastpathTV.EncMapBoolInt16V(v, e)
- case *map[bool]int16:
- fastpathTV.EncMapBoolInt16V(*v, e)
- case map[bool]int32:
- fastpathTV.EncMapBoolInt32V(v, e)
- case *map[bool]int32:
- fastpathTV.EncMapBoolInt32V(*v, e)
- case map[bool]int64:
- fastpathTV.EncMapBoolInt64V(v, e)
- case *map[bool]int64:
- fastpathTV.EncMapBoolInt64V(*v, e)
- case map[bool]float32:
- fastpathTV.EncMapBoolFloat32V(v, e)
- case *map[bool]float32:
- fastpathTV.EncMapBoolFloat32V(*v, e)
- case map[bool]float64:
- fastpathTV.EncMapBoolFloat64V(v, e)
- case *map[bool]float64:
- fastpathTV.EncMapBoolFloat64V(*v, e)
- case map[bool]bool:
- fastpathTV.EncMapBoolBoolV(v, e)
- case *map[bool]bool:
- fastpathTV.EncMapBoolBoolV(*v, e)
-
- default:
- _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
- return false
- }
- return true
-}
-
-// -- -- fast path functions
-
-func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e)
- } else {
- fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e)
- }
-}
-func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- e.encode(v2)
- }
- } else {
- for _, v2 := range v {
- e.encode(v2)
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- e.encode(v2)
- }
- } else {
- for _, v2 := range v {
- e.encode(v2)
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e)
- } else {
- fastpathTV.EncSliceStringV(rv2i(rv).([]string), e)
- }
-}
-func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for _, v2 := range v {
- ee.EncodeString(cUTF8, v2)
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for _, v2 := range v {
- ee.EncodeString(cUTF8, v2)
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e)
- } else {
- fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e)
- }
-}
-func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeFloat32(v2)
- }
- } else {
- for _, v2 := range v {
- ee.EncodeFloat32(v2)
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeFloat32(v2)
- }
- } else {
- for _, v2 := range v {
- ee.EncodeFloat32(v2)
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e)
- } else {
- fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e)
- }
-}
-func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeFloat64(v2)
- }
- } else {
- for _, v2 := range v {
- ee.EncodeFloat64(v2)
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeFloat64(v2)
- }
- } else {
- for _, v2 := range v {
- ee.EncodeFloat64(v2)
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e)
- } else {
- fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e)
- }
-}
-func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeUint(uint64(v2))
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeUint(uint64(v2))
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceUint8R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceUint8V(rv2i(rv).([]uint8), e)
- } else {
- fastpathTV.EncSliceUint8V(rv2i(rv).([]uint8), e)
- }
-}
-func (_ fastpathT) EncSliceUint8V(v []uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeUint(uint64(v2))
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceUint8V(v []uint8, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeUint(uint64(v2))
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e)
- } else {
- fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e)
- }
-}
-func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeUint(uint64(v2))
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeUint(uint64(v2))
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e)
- } else {
- fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e)
- }
-}
-func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeUint(uint64(v2))
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeUint(uint64(v2))
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e)
- } else {
- fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e)
- }
-}
-func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeUint(uint64(v2))
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeUint(uint64(v2))
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e)
- } else {
- fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e)
- }
-}
-func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- e.encode(v2)
- }
- } else {
- for _, v2 := range v {
- e.encode(v2)
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- e.encode(v2)
- }
- } else {
- for _, v2 := range v {
- e.encode(v2)
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e)
- } else {
- fastpathTV.EncSliceIntV(rv2i(rv).([]int), e)
- }
-}
-func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeInt(int64(v2))
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeInt(int64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeInt(int64(v2))
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e)
- } else {
- fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e)
- }
-}
-func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeInt(int64(v2))
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeInt(int64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeInt(int64(v2))
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e)
- } else {
- fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e)
- }
-}
-func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeInt(int64(v2))
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeInt(int64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeInt(int64(v2))
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e)
- } else {
- fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e)
- }
-}
-func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeInt(int64(v2))
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeInt(int64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeInt(int64(v2))
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e)
- } else {
- fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e)
- }
-}
-func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeInt(int64(v2))
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeInt(int64(v2))
- }
- } else {
- for _, v2 := range v {
- ee.EncodeInt(int64(v2))
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e)
- } else {
- fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e)
- }
-}
-func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- ee.EncodeBool(v2)
- }
- } else {
- for _, v2 := range v {
- ee.EncodeBool(v2)
- }
- }
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- ee.EncodeBool(v2)
- }
- } else {
- for _, v2 := range v {
- ee.EncodeBool(v2)
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e)
-}
-func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e)
-}
-func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e)
-}
-func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e)
-}
-func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e)
-}
-func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e)
-}
-func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e)
-}
-func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e)
-}
-func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e)
-}
-func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e)
-}
-func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e)
-}
-func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e)
-}
-func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e)
-}
-func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e)
-}
-func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e)
-}
-func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e)
-}
-func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e)
-}
-func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- e.encode(v[string(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- e.encode(v[string(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e)
-}
-func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[string(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeString(cUTF8, v[string(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e)
-}
-func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[string(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeUint(uint64(v[string(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e)
-}
-func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[string(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeUint(uint64(v[string(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e)
-}
-func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[string(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeUint(uint64(v[string(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e)
-}
-func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[string(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeUint(uint64(v[string(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e)
-}
-func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[string(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeUint(uint64(v[string(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e)
-}
-func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- e.encode(v[string(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- e.encode(v[string(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e)
-}
-func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[string(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeInt(int64(v[string(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e)
-}
-func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[string(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeInt(int64(v[string(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e)
-}
-func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[string(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeInt(int64(v[string(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e)
-}
-func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[string(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeInt(int64(v[string(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e)
-}
-func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[string(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeInt(int64(v[string(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e)
-}
-func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[string(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeFloat32(v[string(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e)
-}
-func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[string(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeFloat64(v[string(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e)
-}
-func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]string, len(v))
- var i int
- for k, _ := range v {
- v2[i] = string(k)
- i++
- }
- sort.Sort(stringSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeBool(v[string(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeBool(v[string(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeString(cUTF8, k2)
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeString(cUTF8, k2)
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e)
-}
-func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- e.encode(v[float32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- e.encode(v[float32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e)
-}
-func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[float32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeString(cUTF8, v[float32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e)
-}
-func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[float32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeUint(uint64(v[float32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e)
-}
-func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[float32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeUint(uint64(v[float32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e)
-}
-func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[float32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeUint(uint64(v[float32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e)
-}
-func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[float32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeUint(uint64(v[float32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e)
-}
-func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[float32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeUint(uint64(v[float32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e)
-}
-func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- e.encode(v[float32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- e.encode(v[float32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e)
-}
-func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[float32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeInt(int64(v[float32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e)
-}
-func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[float32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeInt(int64(v[float32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e)
-}
-func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[float32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeInt(int64(v[float32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e)
-}
-func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[float32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeInt(int64(v[float32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e)
-}
-func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[float32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeInt(int64(v[float32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e)
-}
-func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[float32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeFloat32(v[float32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e)
-}
-func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[float32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeFloat64(v[float32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e)
-}
-func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(float32(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[float32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat32(float32(k2))
- ee.EncodeBool(v[float32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat32(k2)
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat32(k2)
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e)
-}
-func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- e.encode(v[float64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- e.encode(v[float64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e)
-}
-func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[float64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeString(cUTF8, v[float64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e)
-}
-func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[float64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeUint(uint64(v[float64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e)
-}
-func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[float64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeUint(uint64(v[float64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e)
-}
-func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[float64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeUint(uint64(v[float64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e)
-}
-func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[float64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeUint(uint64(v[float64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e)
-}
-func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[float64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeUint(uint64(v[float64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e)
-}
-func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- e.encode(v[float64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- e.encode(v[float64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e)
-}
-func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[float64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeInt(int64(v[float64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e)
-}
-func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[float64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeInt(int64(v[float64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e)
-}
-func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[float64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeInt(int64(v[float64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e)
-}
-func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[float64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeInt(int64(v[float64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e)
-}
-func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[float64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeInt(int64(v[float64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e)
-}
-func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[float64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeFloat32(v[float64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e)
-}
-func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[float64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeFloat64(v[float64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e)
-}
-func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]float64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = float64(k)
- i++
- }
- sort.Sort(floatSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(float64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[float64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeFloat64(float64(k2))
- ee.EncodeBool(v[float64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeFloat64(k2)
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeFloat64(k2)
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e)
-}
-func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- e.encode(v[uint(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- e.encode(v[uint(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e)
-}
-func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[uint(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeString(cUTF8, v[uint(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e)
-}
-func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeUint(uint64(v[uint(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e)
-}
-func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeUint(uint64(v[uint(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e)
-}
-func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeUint(uint64(v[uint(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e)
-}
-func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeUint(uint64(v[uint(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e)
-}
-func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeUint(uint64(v[uint(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e)
-}
-func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- e.encode(v[uint(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- e.encode(v[uint(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e)
-}
-func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeInt(int64(v[uint(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e)
-}
-func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeInt(int64(v[uint(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e)
-}
-func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeInt(int64(v[uint(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e)
-}
-func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeInt(int64(v[uint(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e)
-}
-func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeInt(int64(v[uint(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e)
-}
-func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[uint(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeFloat32(v[uint(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e)
-}
-func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[uint(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeFloat64(v[uint(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e)
-}
-func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint(k2)))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[uint(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint(k2)))
- ee.EncodeBool(v[uint(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e)
-}
-func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- e.encode(v[uint8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- e.encode(v[uint8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e)
-}
-func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[uint8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeString(cUTF8, v[uint8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e)
-}
-func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeUint(uint64(v[uint8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e)
-}
-func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeUint(uint64(v[uint8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e)
-}
-func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeUint(uint64(v[uint8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e)
-}
-func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeUint(uint64(v[uint8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e)
-}
-func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeUint(uint64(v[uint8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e)
-}
-func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- e.encode(v[uint8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- e.encode(v[uint8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e)
-}
-func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeInt(int64(v[uint8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e)
-}
-func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeInt(int64(v[uint8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e)
-}
-func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeInt(int64(v[uint8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e)
-}
-func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeInt(int64(v[uint8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e)
-}
-func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeInt(int64(v[uint8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e)
-}
-func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[uint8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeFloat32(v[uint8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e)
-}
-func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[uint8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeFloat64(v[uint8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e)
-}
-func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[uint8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint8(k2)))
- ee.EncodeBool(v[uint8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e)
-}
-func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- e.encode(v[uint16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- e.encode(v[uint16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e)
-}
-func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[uint16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeString(cUTF8, v[uint16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e)
-}
-func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeUint(uint64(v[uint16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e)
-}
-func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeUint(uint64(v[uint16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e)
-}
-func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeUint(uint64(v[uint16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e)
-}
-func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeUint(uint64(v[uint16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e)
-}
-func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeUint(uint64(v[uint16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e)
-}
-func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- e.encode(v[uint16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- e.encode(v[uint16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e)
-}
-func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeInt(int64(v[uint16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e)
-}
-func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeInt(int64(v[uint16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e)
-}
-func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeInt(int64(v[uint16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e)
-}
-func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeInt(int64(v[uint16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e)
-}
-func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeInt(int64(v[uint16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e)
-}
-func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[uint16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeFloat32(v[uint16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e)
-}
-func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[uint16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeFloat64(v[uint16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e)
-}
-func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[uint16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint16(k2)))
- ee.EncodeBool(v[uint16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e)
-}
-func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- e.encode(v[uint32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- e.encode(v[uint32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e)
-}
-func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[uint32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeString(cUTF8, v[uint32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e)
-}
-func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeUint(uint64(v[uint32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e)
-}
-func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeUint(uint64(v[uint32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e)
-}
-func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeUint(uint64(v[uint32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e)
-}
-func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeUint(uint64(v[uint32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e)
-}
-func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeUint(uint64(v[uint32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e)
-}
-func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- e.encode(v[uint32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- e.encode(v[uint32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e)
-}
-func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeInt(int64(v[uint32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e)
-}
-func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeInt(int64(v[uint32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e)
-}
-func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeInt(int64(v[uint32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e)
-}
-func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeInt(int64(v[uint32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e)
-}
-func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeInt(int64(v[uint32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e)
-}
-func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[uint32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeFloat32(v[uint32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e)
-}
-func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[uint32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeFloat64(v[uint32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e)
-}
-func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[uint32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint32(k2)))
- ee.EncodeBool(v[uint32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e)
-}
-func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- e.encode(v[uint64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- e.encode(v[uint64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e)
-}
-func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[uint64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeString(cUTF8, v[uint64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e)
-}
-func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeUint(uint64(v[uint64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e)
-}
-func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeUint(uint64(v[uint64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e)
-}
-func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeUint(uint64(v[uint64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e)
-}
-func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeUint(uint64(v[uint64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e)
-}
-func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uint64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeUint(uint64(v[uint64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e)
-}
-func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- e.encode(v[uint64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- e.encode(v[uint64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e)
-}
-func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeInt(int64(v[uint64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e)
-}
-func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeInt(int64(v[uint64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e)
-}
-func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeInt(int64(v[uint64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e)
-}
-func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeInt(int64(v[uint64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e)
-}
-func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uint64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeInt(int64(v[uint64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e)
-}
-func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[uint64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeFloat32(v[uint64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e)
-}
-func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[uint64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeFloat64(v[uint64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e)
-}
-func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(uint64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[uint64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeUint(uint64(uint64(k2)))
- ee.EncodeBool(v[uint64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeUint(uint64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeUint(uint64(k2))
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e)
-}
-func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- e.encode(v[uintptr(k2)])
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- e.encode(v[uintptr(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e)
-}
-func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[uintptr(k2)])
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeString(cUTF8, v[uintptr(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e)
-}
-func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uintptr(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeUint(uint64(v[uintptr(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e)
-}
-func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uintptr(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeUint(uint64(v[uintptr(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e)
-}
-func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uintptr(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeUint(uint64(v[uintptr(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e)
-}
-func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uintptr(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeUint(uint64(v[uintptr(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e)
-}
-func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[uintptr(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeUint(uint64(v[uintptr(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e)
-}
-func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- e.encode(v[uintptr(k2)])
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- e.encode(v[uintptr(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e)
-}
-func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uintptr(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeInt(int64(v[uintptr(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e)
-}
-func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uintptr(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeInt(int64(v[uintptr(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e)
-}
-func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uintptr(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeInt(int64(v[uintptr(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e)
-}
-func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uintptr(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeInt(int64(v[uintptr(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e)
-}
-func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[uintptr(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeInt(int64(v[uintptr(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e)
-}
-func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[uintptr(k2)])
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeFloat32(v[uintptr(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e)
-}
-func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[uintptr(k2)])
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeFloat64(v[uintptr(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e)
-}
-func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]uint64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = uint64(k)
- i++
- }
- sort.Sort(uintSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- e.encode(uintptr(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[uintptr(k2)])
- }
- } else {
- for _, k2 := range v2 {
- e.encode(uintptr(k2))
- ee.EncodeBool(v[uintptr(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- e.encode(k2)
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- e.encode(k2)
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e)
-}
-func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- e.encode(v[int(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- e.encode(v[int(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e)
-}
-func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[int(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeString(cUTF8, v[int(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e)
-}
-func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeUint(uint64(v[int(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e)
-}
-func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeUint(uint64(v[int(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e)
-}
-func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeUint(uint64(v[int(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e)
-}
-func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeUint(uint64(v[int(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e)
-}
-func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeUint(uint64(v[int(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e)
-}
-func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- e.encode(v[int(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- e.encode(v[int(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e)
-}
-func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeInt(int64(v[int(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e)
-}
-func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeInt(int64(v[int(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e)
-}
-func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeInt(int64(v[int(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e)
-}
-func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeInt(int64(v[int(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e)
-}
-func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeInt(int64(v[int(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e)
-}
-func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[int(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeFloat32(v[int(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e)
-}
-func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[int(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeFloat64(v[int(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e)
-}
-func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int(k2)))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[int(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int(k2)))
- ee.EncodeBool(v[int(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e)
-}
-func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- e.encode(v[int8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- e.encode(v[int8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e)
-}
-func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[int8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeString(cUTF8, v[int8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e)
-}
-func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeUint(uint64(v[int8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e)
-}
-func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeUint(uint64(v[int8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e)
-}
-func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeUint(uint64(v[int8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e)
-}
-func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeUint(uint64(v[int8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e)
-}
-func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeUint(uint64(v[int8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e)
-}
-func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- e.encode(v[int8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- e.encode(v[int8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e)
-}
-func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeInt(int64(v[int8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e)
-}
-func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeInt(int64(v[int8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e)
-}
-func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeInt(int64(v[int8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e)
-}
-func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeInt(int64(v[int8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e)
-}
-func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int8(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeInt(int64(v[int8(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e)
-}
-func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[int8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeFloat32(v[int8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e)
-}
-func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[int8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeFloat64(v[int8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e)
-}
-func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int8(k2)))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[int8(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int8(k2)))
- ee.EncodeBool(v[int8(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e)
-}
-func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- e.encode(v[int16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- e.encode(v[int16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e)
-}
-func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[int16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeString(cUTF8, v[int16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e)
-}
-func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeUint(uint64(v[int16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e)
-}
-func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeUint(uint64(v[int16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e)
-}
-func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeUint(uint64(v[int16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e)
-}
-func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeUint(uint64(v[int16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e)
-}
-func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeUint(uint64(v[int16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e)
-}
-func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- e.encode(v[int16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- e.encode(v[int16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e)
-}
-func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeInt(int64(v[int16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e)
-}
-func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeInt(int64(v[int16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e)
-}
-func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeInt(int64(v[int16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e)
-}
-func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeInt(int64(v[int16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e)
-}
-func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int16(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeInt(int64(v[int16(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e)
-}
-func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[int16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeFloat32(v[int16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e)
-}
-func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[int16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeFloat64(v[int16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e)
-}
-func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int16(k2)))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[int16(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int16(k2)))
- ee.EncodeBool(v[int16(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e)
-}
-func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- e.encode(v[int32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- e.encode(v[int32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e)
-}
-func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[int32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeString(cUTF8, v[int32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e)
-}
-func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeUint(uint64(v[int32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e)
-}
-func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeUint(uint64(v[int32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e)
-}
-func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeUint(uint64(v[int32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e)
-}
-func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeUint(uint64(v[int32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e)
-}
-func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeUint(uint64(v[int32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e)
-}
-func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- e.encode(v[int32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- e.encode(v[int32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e)
-}
-func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeInt(int64(v[int32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e)
-}
-func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeInt(int64(v[int32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e)
-}
-func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeInt(int64(v[int32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e)
-}
-func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeInt(int64(v[int32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e)
-}
-func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int32(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeInt(int64(v[int32(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e)
-}
-func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[int32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeFloat32(v[int32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e)
-}
-func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[int32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeFloat64(v[int32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e)
-}
-func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int32(k2)))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[int32(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int32(k2)))
- ee.EncodeBool(v[int32(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e)
-}
-func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- e.encode(v[int64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- e.encode(v[int64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e)
-}
-func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[int64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeString(cUTF8, v[int64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e)
-}
-func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeUint(uint64(v[int64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e)
-}
-func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeUint(uint64(v[int64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e)
-}
-func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeUint(uint64(v[int64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e)
-}
-func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeUint(uint64(v[int64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e)
-}
-func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[int64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeUint(uint64(v[int64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e)
-}
-func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- e.encode(v[int64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- e.encode(v[int64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e)
-}
-func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeInt(int64(v[int64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e)
-}
-func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeInt(int64(v[int64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e)
-}
-func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeInt(int64(v[int64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e)
-}
-func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeInt(int64(v[int64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e)
-}
-func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[int64(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeInt(int64(v[int64(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e)
-}
-func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[int64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeFloat32(v[int64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e)
-}
-func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[int64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeFloat64(v[int64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e)
-}
-func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]int64, len(v))
- var i int
- for k, _ := range v {
- v2[i] = int64(k)
- i++
- }
- sort.Sort(intSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(int64(k2)))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[int64(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeInt(int64(int64(k2)))
- ee.EncodeBool(v[int64(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeInt(int64(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeInt(int64(k2))
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e)
-}
-func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- e.encode(v[bool(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- e.encode(v[bool(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e)
-}
-func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v[bool(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeString(cUTF8, v[bool(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeString(cUTF8, v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeString(cUTF8, v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e)
-}
-func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[bool(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeUint(uint64(v[bool(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e)
-}
-func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[bool(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeUint(uint64(v[bool(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e)
-}
-func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[bool(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeUint(uint64(v[bool(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e)
-}
-func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[bool(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeUint(uint64(v[bool(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e)
-}
-func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v[bool(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeUint(uint64(v[bool(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeUint(uint64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeUint(uint64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e)
-}
-func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- e.encode(v[bool(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- e.encode(v[bool(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- e.encode(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- e.encode(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e)
-}
-func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[bool(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeInt(int64(v[bool(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e)
-}
-func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[bool(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeInt(int64(v[bool(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e)
-}
-func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[bool(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeInt(int64(v[bool(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e)
-}
-func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[bool(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeInt(int64(v[bool(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e)
-}
-func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v[bool(k2)]))
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeInt(int64(v[bool(k2)]))
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeInt(int64(v2))
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeInt(int64(v2))
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e)
-}
-func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v[bool(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeFloat32(v[bool(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat32(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeFloat32(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e)
-}
-func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v[bool(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeFloat64(v[bool(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeFloat64(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeFloat64(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e)
-}
-func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) {
- if v == nil {
- e.e.EncodeNil()
- return
- }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- v2 := make([]bool, len(v))
- var i int
- for k, _ := range v {
- v2[i] = bool(k)
- i++
- }
- sort.Sort(boolSlice(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- ee.EncodeBool(bool(k2))
- ee.WriteMapElemValue()
- ee.EncodeBool(v[bool(k2)])
- }
- } else {
- for _, k2 := range v2 {
- ee.EncodeBool(bool(k2))
- ee.EncodeBool(v[bool(k2)])
- }
- }
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- ee.EncodeBool(k2)
- ee.WriteMapElemValue()
- ee.EncodeBool(v2)
- }
- } else {
- for k2, v2 := range v {
- ee.EncodeBool(k2)
- ee.EncodeBool(v2)
- }
- }
- }
- ee.WriteMapEnd()
-}
-
-// -- decode
-
-// -- -- fast path type switch
-func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
- var changed bool
- switch v := iv.(type) {
-
- case []interface{}:
- var v2 []interface{}
- v2, changed = fastpathTV.DecSliceIntfV(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]interface{}:
- var v2 []interface{}
- v2, changed = fastpathTV.DecSliceIntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case []string:
- var v2 []string
- v2, changed = fastpathTV.DecSliceStringV(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]string:
- var v2 []string
- v2, changed = fastpathTV.DecSliceStringV(*v, true, d)
- if changed {
- *v = v2
- }
- case []float32:
- var v2 []float32
- v2, changed = fastpathTV.DecSliceFloat32V(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]float32:
- var v2 []float32
- v2, changed = fastpathTV.DecSliceFloat32V(*v, true, d)
- if changed {
- *v = v2
- }
- case []float64:
- var v2 []float64
- v2, changed = fastpathTV.DecSliceFloat64V(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]float64:
- var v2 []float64
- v2, changed = fastpathTV.DecSliceFloat64V(*v, true, d)
- if changed {
- *v = v2
- }
- case []uint:
- var v2 []uint
- v2, changed = fastpathTV.DecSliceUintV(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]uint:
- var v2 []uint
- v2, changed = fastpathTV.DecSliceUintV(*v, true, d)
- if changed {
- *v = v2
- }
- case []uint16:
- var v2 []uint16
- v2, changed = fastpathTV.DecSliceUint16V(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]uint16:
- var v2 []uint16
- v2, changed = fastpathTV.DecSliceUint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case []uint32:
- var v2 []uint32
- v2, changed = fastpathTV.DecSliceUint32V(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]uint32:
- var v2 []uint32
- v2, changed = fastpathTV.DecSliceUint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case []uint64:
- var v2 []uint64
- v2, changed = fastpathTV.DecSliceUint64V(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]uint64:
- var v2 []uint64
- v2, changed = fastpathTV.DecSliceUint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case []uintptr:
- var v2 []uintptr
- v2, changed = fastpathTV.DecSliceUintptrV(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]uintptr:
- var v2 []uintptr
- v2, changed = fastpathTV.DecSliceUintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case []int:
- var v2 []int
- v2, changed = fastpathTV.DecSliceIntV(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]int:
- var v2 []int
- v2, changed = fastpathTV.DecSliceIntV(*v, true, d)
- if changed {
- *v = v2
- }
- case []int8:
- var v2 []int8
- v2, changed = fastpathTV.DecSliceInt8V(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]int8:
- var v2 []int8
- v2, changed = fastpathTV.DecSliceInt8V(*v, true, d)
- if changed {
- *v = v2
- }
- case []int16:
- var v2 []int16
- v2, changed = fastpathTV.DecSliceInt16V(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]int16:
- var v2 []int16
- v2, changed = fastpathTV.DecSliceInt16V(*v, true, d)
- if changed {
- *v = v2
- }
- case []int32:
- var v2 []int32
- v2, changed = fastpathTV.DecSliceInt32V(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]int32:
- var v2 []int32
- v2, changed = fastpathTV.DecSliceInt32V(*v, true, d)
- if changed {
- *v = v2
- }
- case []int64:
- var v2 []int64
- v2, changed = fastpathTV.DecSliceInt64V(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]int64:
- var v2 []int64
- v2, changed = fastpathTV.DecSliceInt64V(*v, true, d)
- if changed {
- *v = v2
- }
- case []bool:
- var v2 []bool
- v2, changed = fastpathTV.DecSliceBoolV(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]bool:
- var v2 []bool
- v2, changed = fastpathTV.DecSliceBoolV(*v, true, d)
- if changed {
- *v = v2
- }
-
- case map[interface{}]interface{}:
- fastpathTV.DecMapIntfIntfV(v, false, d)
- case *map[interface{}]interface{}:
- var v2 map[interface{}]interface{}
- v2, changed = fastpathTV.DecMapIntfIntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]string:
- fastpathTV.DecMapIntfStringV(v, false, d)
- case *map[interface{}]string:
- var v2 map[interface{}]string
- v2, changed = fastpathTV.DecMapIntfStringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]uint:
- fastpathTV.DecMapIntfUintV(v, false, d)
- case *map[interface{}]uint:
- var v2 map[interface{}]uint
- v2, changed = fastpathTV.DecMapIntfUintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]uint8:
- fastpathTV.DecMapIntfUint8V(v, false, d)
- case *map[interface{}]uint8:
- var v2 map[interface{}]uint8
- v2, changed = fastpathTV.DecMapIntfUint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]uint16:
- fastpathTV.DecMapIntfUint16V(v, false, d)
- case *map[interface{}]uint16:
- var v2 map[interface{}]uint16
- v2, changed = fastpathTV.DecMapIntfUint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]uint32:
- fastpathTV.DecMapIntfUint32V(v, false, d)
- case *map[interface{}]uint32:
- var v2 map[interface{}]uint32
- v2, changed = fastpathTV.DecMapIntfUint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]uint64:
- fastpathTV.DecMapIntfUint64V(v, false, d)
- case *map[interface{}]uint64:
- var v2 map[interface{}]uint64
- v2, changed = fastpathTV.DecMapIntfUint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]uintptr:
- fastpathTV.DecMapIntfUintptrV(v, false, d)
- case *map[interface{}]uintptr:
- var v2 map[interface{}]uintptr
- v2, changed = fastpathTV.DecMapIntfUintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]int:
- fastpathTV.DecMapIntfIntV(v, false, d)
- case *map[interface{}]int:
- var v2 map[interface{}]int
- v2, changed = fastpathTV.DecMapIntfIntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]int8:
- fastpathTV.DecMapIntfInt8V(v, false, d)
- case *map[interface{}]int8:
- var v2 map[interface{}]int8
- v2, changed = fastpathTV.DecMapIntfInt8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]int16:
- fastpathTV.DecMapIntfInt16V(v, false, d)
- case *map[interface{}]int16:
- var v2 map[interface{}]int16
- v2, changed = fastpathTV.DecMapIntfInt16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]int32:
- fastpathTV.DecMapIntfInt32V(v, false, d)
- case *map[interface{}]int32:
- var v2 map[interface{}]int32
- v2, changed = fastpathTV.DecMapIntfInt32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]int64:
- fastpathTV.DecMapIntfInt64V(v, false, d)
- case *map[interface{}]int64:
- var v2 map[interface{}]int64
- v2, changed = fastpathTV.DecMapIntfInt64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]float32:
- fastpathTV.DecMapIntfFloat32V(v, false, d)
- case *map[interface{}]float32:
- var v2 map[interface{}]float32
- v2, changed = fastpathTV.DecMapIntfFloat32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]float64:
- fastpathTV.DecMapIntfFloat64V(v, false, d)
- case *map[interface{}]float64:
- var v2 map[interface{}]float64
- v2, changed = fastpathTV.DecMapIntfFloat64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[interface{}]bool:
- fastpathTV.DecMapIntfBoolV(v, false, d)
- case *map[interface{}]bool:
- var v2 map[interface{}]bool
- v2, changed = fastpathTV.DecMapIntfBoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]interface{}:
- fastpathTV.DecMapStringIntfV(v, false, d)
- case *map[string]interface{}:
- var v2 map[string]interface{}
- v2, changed = fastpathTV.DecMapStringIntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]string:
- fastpathTV.DecMapStringStringV(v, false, d)
- case *map[string]string:
- var v2 map[string]string
- v2, changed = fastpathTV.DecMapStringStringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]uint:
- fastpathTV.DecMapStringUintV(v, false, d)
- case *map[string]uint:
- var v2 map[string]uint
- v2, changed = fastpathTV.DecMapStringUintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]uint8:
- fastpathTV.DecMapStringUint8V(v, false, d)
- case *map[string]uint8:
- var v2 map[string]uint8
- v2, changed = fastpathTV.DecMapStringUint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]uint16:
- fastpathTV.DecMapStringUint16V(v, false, d)
- case *map[string]uint16:
- var v2 map[string]uint16
- v2, changed = fastpathTV.DecMapStringUint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]uint32:
- fastpathTV.DecMapStringUint32V(v, false, d)
- case *map[string]uint32:
- var v2 map[string]uint32
- v2, changed = fastpathTV.DecMapStringUint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]uint64:
- fastpathTV.DecMapStringUint64V(v, false, d)
- case *map[string]uint64:
- var v2 map[string]uint64
- v2, changed = fastpathTV.DecMapStringUint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]uintptr:
- fastpathTV.DecMapStringUintptrV(v, false, d)
- case *map[string]uintptr:
- var v2 map[string]uintptr
- v2, changed = fastpathTV.DecMapStringUintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]int:
- fastpathTV.DecMapStringIntV(v, false, d)
- case *map[string]int:
- var v2 map[string]int
- v2, changed = fastpathTV.DecMapStringIntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]int8:
- fastpathTV.DecMapStringInt8V(v, false, d)
- case *map[string]int8:
- var v2 map[string]int8
- v2, changed = fastpathTV.DecMapStringInt8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]int16:
- fastpathTV.DecMapStringInt16V(v, false, d)
- case *map[string]int16:
- var v2 map[string]int16
- v2, changed = fastpathTV.DecMapStringInt16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]int32:
- fastpathTV.DecMapStringInt32V(v, false, d)
- case *map[string]int32:
- var v2 map[string]int32
- v2, changed = fastpathTV.DecMapStringInt32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]int64:
- fastpathTV.DecMapStringInt64V(v, false, d)
- case *map[string]int64:
- var v2 map[string]int64
- v2, changed = fastpathTV.DecMapStringInt64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]float32:
- fastpathTV.DecMapStringFloat32V(v, false, d)
- case *map[string]float32:
- var v2 map[string]float32
- v2, changed = fastpathTV.DecMapStringFloat32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]float64:
- fastpathTV.DecMapStringFloat64V(v, false, d)
- case *map[string]float64:
- var v2 map[string]float64
- v2, changed = fastpathTV.DecMapStringFloat64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[string]bool:
- fastpathTV.DecMapStringBoolV(v, false, d)
- case *map[string]bool:
- var v2 map[string]bool
- v2, changed = fastpathTV.DecMapStringBoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]interface{}:
- fastpathTV.DecMapFloat32IntfV(v, false, d)
- case *map[float32]interface{}:
- var v2 map[float32]interface{}
- v2, changed = fastpathTV.DecMapFloat32IntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]string:
- fastpathTV.DecMapFloat32StringV(v, false, d)
- case *map[float32]string:
- var v2 map[float32]string
- v2, changed = fastpathTV.DecMapFloat32StringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]uint:
- fastpathTV.DecMapFloat32UintV(v, false, d)
- case *map[float32]uint:
- var v2 map[float32]uint
- v2, changed = fastpathTV.DecMapFloat32UintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]uint8:
- fastpathTV.DecMapFloat32Uint8V(v, false, d)
- case *map[float32]uint8:
- var v2 map[float32]uint8
- v2, changed = fastpathTV.DecMapFloat32Uint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]uint16:
- fastpathTV.DecMapFloat32Uint16V(v, false, d)
- case *map[float32]uint16:
- var v2 map[float32]uint16
- v2, changed = fastpathTV.DecMapFloat32Uint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]uint32:
- fastpathTV.DecMapFloat32Uint32V(v, false, d)
- case *map[float32]uint32:
- var v2 map[float32]uint32
- v2, changed = fastpathTV.DecMapFloat32Uint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]uint64:
- fastpathTV.DecMapFloat32Uint64V(v, false, d)
- case *map[float32]uint64:
- var v2 map[float32]uint64
- v2, changed = fastpathTV.DecMapFloat32Uint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]uintptr:
- fastpathTV.DecMapFloat32UintptrV(v, false, d)
- case *map[float32]uintptr:
- var v2 map[float32]uintptr
- v2, changed = fastpathTV.DecMapFloat32UintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]int:
- fastpathTV.DecMapFloat32IntV(v, false, d)
- case *map[float32]int:
- var v2 map[float32]int
- v2, changed = fastpathTV.DecMapFloat32IntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]int8:
- fastpathTV.DecMapFloat32Int8V(v, false, d)
- case *map[float32]int8:
- var v2 map[float32]int8
- v2, changed = fastpathTV.DecMapFloat32Int8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]int16:
- fastpathTV.DecMapFloat32Int16V(v, false, d)
- case *map[float32]int16:
- var v2 map[float32]int16
- v2, changed = fastpathTV.DecMapFloat32Int16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]int32:
- fastpathTV.DecMapFloat32Int32V(v, false, d)
- case *map[float32]int32:
- var v2 map[float32]int32
- v2, changed = fastpathTV.DecMapFloat32Int32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]int64:
- fastpathTV.DecMapFloat32Int64V(v, false, d)
- case *map[float32]int64:
- var v2 map[float32]int64
- v2, changed = fastpathTV.DecMapFloat32Int64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]float32:
- fastpathTV.DecMapFloat32Float32V(v, false, d)
- case *map[float32]float32:
- var v2 map[float32]float32
- v2, changed = fastpathTV.DecMapFloat32Float32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]float64:
- fastpathTV.DecMapFloat32Float64V(v, false, d)
- case *map[float32]float64:
- var v2 map[float32]float64
- v2, changed = fastpathTV.DecMapFloat32Float64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float32]bool:
- fastpathTV.DecMapFloat32BoolV(v, false, d)
- case *map[float32]bool:
- var v2 map[float32]bool
- v2, changed = fastpathTV.DecMapFloat32BoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]interface{}:
- fastpathTV.DecMapFloat64IntfV(v, false, d)
- case *map[float64]interface{}:
- var v2 map[float64]interface{}
- v2, changed = fastpathTV.DecMapFloat64IntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]string:
- fastpathTV.DecMapFloat64StringV(v, false, d)
- case *map[float64]string:
- var v2 map[float64]string
- v2, changed = fastpathTV.DecMapFloat64StringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]uint:
- fastpathTV.DecMapFloat64UintV(v, false, d)
- case *map[float64]uint:
- var v2 map[float64]uint
- v2, changed = fastpathTV.DecMapFloat64UintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]uint8:
- fastpathTV.DecMapFloat64Uint8V(v, false, d)
- case *map[float64]uint8:
- var v2 map[float64]uint8
- v2, changed = fastpathTV.DecMapFloat64Uint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]uint16:
- fastpathTV.DecMapFloat64Uint16V(v, false, d)
- case *map[float64]uint16:
- var v2 map[float64]uint16
- v2, changed = fastpathTV.DecMapFloat64Uint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]uint32:
- fastpathTV.DecMapFloat64Uint32V(v, false, d)
- case *map[float64]uint32:
- var v2 map[float64]uint32
- v2, changed = fastpathTV.DecMapFloat64Uint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]uint64:
- fastpathTV.DecMapFloat64Uint64V(v, false, d)
- case *map[float64]uint64:
- var v2 map[float64]uint64
- v2, changed = fastpathTV.DecMapFloat64Uint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]uintptr:
- fastpathTV.DecMapFloat64UintptrV(v, false, d)
- case *map[float64]uintptr:
- var v2 map[float64]uintptr
- v2, changed = fastpathTV.DecMapFloat64UintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]int:
- fastpathTV.DecMapFloat64IntV(v, false, d)
- case *map[float64]int:
- var v2 map[float64]int
- v2, changed = fastpathTV.DecMapFloat64IntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]int8:
- fastpathTV.DecMapFloat64Int8V(v, false, d)
- case *map[float64]int8:
- var v2 map[float64]int8
- v2, changed = fastpathTV.DecMapFloat64Int8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]int16:
- fastpathTV.DecMapFloat64Int16V(v, false, d)
- case *map[float64]int16:
- var v2 map[float64]int16
- v2, changed = fastpathTV.DecMapFloat64Int16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]int32:
- fastpathTV.DecMapFloat64Int32V(v, false, d)
- case *map[float64]int32:
- var v2 map[float64]int32
- v2, changed = fastpathTV.DecMapFloat64Int32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]int64:
- fastpathTV.DecMapFloat64Int64V(v, false, d)
- case *map[float64]int64:
- var v2 map[float64]int64
- v2, changed = fastpathTV.DecMapFloat64Int64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]float32:
- fastpathTV.DecMapFloat64Float32V(v, false, d)
- case *map[float64]float32:
- var v2 map[float64]float32
- v2, changed = fastpathTV.DecMapFloat64Float32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]float64:
- fastpathTV.DecMapFloat64Float64V(v, false, d)
- case *map[float64]float64:
- var v2 map[float64]float64
- v2, changed = fastpathTV.DecMapFloat64Float64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[float64]bool:
- fastpathTV.DecMapFloat64BoolV(v, false, d)
- case *map[float64]bool:
- var v2 map[float64]bool
- v2, changed = fastpathTV.DecMapFloat64BoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]interface{}:
- fastpathTV.DecMapUintIntfV(v, false, d)
- case *map[uint]interface{}:
- var v2 map[uint]interface{}
- v2, changed = fastpathTV.DecMapUintIntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]string:
- fastpathTV.DecMapUintStringV(v, false, d)
- case *map[uint]string:
- var v2 map[uint]string
- v2, changed = fastpathTV.DecMapUintStringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]uint:
- fastpathTV.DecMapUintUintV(v, false, d)
- case *map[uint]uint:
- var v2 map[uint]uint
- v2, changed = fastpathTV.DecMapUintUintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]uint8:
- fastpathTV.DecMapUintUint8V(v, false, d)
- case *map[uint]uint8:
- var v2 map[uint]uint8
- v2, changed = fastpathTV.DecMapUintUint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]uint16:
- fastpathTV.DecMapUintUint16V(v, false, d)
- case *map[uint]uint16:
- var v2 map[uint]uint16
- v2, changed = fastpathTV.DecMapUintUint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]uint32:
- fastpathTV.DecMapUintUint32V(v, false, d)
- case *map[uint]uint32:
- var v2 map[uint]uint32
- v2, changed = fastpathTV.DecMapUintUint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]uint64:
- fastpathTV.DecMapUintUint64V(v, false, d)
- case *map[uint]uint64:
- var v2 map[uint]uint64
- v2, changed = fastpathTV.DecMapUintUint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]uintptr:
- fastpathTV.DecMapUintUintptrV(v, false, d)
- case *map[uint]uintptr:
- var v2 map[uint]uintptr
- v2, changed = fastpathTV.DecMapUintUintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]int:
- fastpathTV.DecMapUintIntV(v, false, d)
- case *map[uint]int:
- var v2 map[uint]int
- v2, changed = fastpathTV.DecMapUintIntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]int8:
- fastpathTV.DecMapUintInt8V(v, false, d)
- case *map[uint]int8:
- var v2 map[uint]int8
- v2, changed = fastpathTV.DecMapUintInt8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]int16:
- fastpathTV.DecMapUintInt16V(v, false, d)
- case *map[uint]int16:
- var v2 map[uint]int16
- v2, changed = fastpathTV.DecMapUintInt16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]int32:
- fastpathTV.DecMapUintInt32V(v, false, d)
- case *map[uint]int32:
- var v2 map[uint]int32
- v2, changed = fastpathTV.DecMapUintInt32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]int64:
- fastpathTV.DecMapUintInt64V(v, false, d)
- case *map[uint]int64:
- var v2 map[uint]int64
- v2, changed = fastpathTV.DecMapUintInt64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]float32:
- fastpathTV.DecMapUintFloat32V(v, false, d)
- case *map[uint]float32:
- var v2 map[uint]float32
- v2, changed = fastpathTV.DecMapUintFloat32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]float64:
- fastpathTV.DecMapUintFloat64V(v, false, d)
- case *map[uint]float64:
- var v2 map[uint]float64
- v2, changed = fastpathTV.DecMapUintFloat64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint]bool:
- fastpathTV.DecMapUintBoolV(v, false, d)
- case *map[uint]bool:
- var v2 map[uint]bool
- v2, changed = fastpathTV.DecMapUintBoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]interface{}:
- fastpathTV.DecMapUint8IntfV(v, false, d)
- case *map[uint8]interface{}:
- var v2 map[uint8]interface{}
- v2, changed = fastpathTV.DecMapUint8IntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]string:
- fastpathTV.DecMapUint8StringV(v, false, d)
- case *map[uint8]string:
- var v2 map[uint8]string
- v2, changed = fastpathTV.DecMapUint8StringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]uint:
- fastpathTV.DecMapUint8UintV(v, false, d)
- case *map[uint8]uint:
- var v2 map[uint8]uint
- v2, changed = fastpathTV.DecMapUint8UintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]uint8:
- fastpathTV.DecMapUint8Uint8V(v, false, d)
- case *map[uint8]uint8:
- var v2 map[uint8]uint8
- v2, changed = fastpathTV.DecMapUint8Uint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]uint16:
- fastpathTV.DecMapUint8Uint16V(v, false, d)
- case *map[uint8]uint16:
- var v2 map[uint8]uint16
- v2, changed = fastpathTV.DecMapUint8Uint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]uint32:
- fastpathTV.DecMapUint8Uint32V(v, false, d)
- case *map[uint8]uint32:
- var v2 map[uint8]uint32
- v2, changed = fastpathTV.DecMapUint8Uint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]uint64:
- fastpathTV.DecMapUint8Uint64V(v, false, d)
- case *map[uint8]uint64:
- var v2 map[uint8]uint64
- v2, changed = fastpathTV.DecMapUint8Uint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]uintptr:
- fastpathTV.DecMapUint8UintptrV(v, false, d)
- case *map[uint8]uintptr:
- var v2 map[uint8]uintptr
- v2, changed = fastpathTV.DecMapUint8UintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]int:
- fastpathTV.DecMapUint8IntV(v, false, d)
- case *map[uint8]int:
- var v2 map[uint8]int
- v2, changed = fastpathTV.DecMapUint8IntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]int8:
- fastpathTV.DecMapUint8Int8V(v, false, d)
- case *map[uint8]int8:
- var v2 map[uint8]int8
- v2, changed = fastpathTV.DecMapUint8Int8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]int16:
- fastpathTV.DecMapUint8Int16V(v, false, d)
- case *map[uint8]int16:
- var v2 map[uint8]int16
- v2, changed = fastpathTV.DecMapUint8Int16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]int32:
- fastpathTV.DecMapUint8Int32V(v, false, d)
- case *map[uint8]int32:
- var v2 map[uint8]int32
- v2, changed = fastpathTV.DecMapUint8Int32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]int64:
- fastpathTV.DecMapUint8Int64V(v, false, d)
- case *map[uint8]int64:
- var v2 map[uint8]int64
- v2, changed = fastpathTV.DecMapUint8Int64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]float32:
- fastpathTV.DecMapUint8Float32V(v, false, d)
- case *map[uint8]float32:
- var v2 map[uint8]float32
- v2, changed = fastpathTV.DecMapUint8Float32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]float64:
- fastpathTV.DecMapUint8Float64V(v, false, d)
- case *map[uint8]float64:
- var v2 map[uint8]float64
- v2, changed = fastpathTV.DecMapUint8Float64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint8]bool:
- fastpathTV.DecMapUint8BoolV(v, false, d)
- case *map[uint8]bool:
- var v2 map[uint8]bool
- v2, changed = fastpathTV.DecMapUint8BoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]interface{}:
- fastpathTV.DecMapUint16IntfV(v, false, d)
- case *map[uint16]interface{}:
- var v2 map[uint16]interface{}
- v2, changed = fastpathTV.DecMapUint16IntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]string:
- fastpathTV.DecMapUint16StringV(v, false, d)
- case *map[uint16]string:
- var v2 map[uint16]string
- v2, changed = fastpathTV.DecMapUint16StringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]uint:
- fastpathTV.DecMapUint16UintV(v, false, d)
- case *map[uint16]uint:
- var v2 map[uint16]uint
- v2, changed = fastpathTV.DecMapUint16UintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]uint8:
- fastpathTV.DecMapUint16Uint8V(v, false, d)
- case *map[uint16]uint8:
- var v2 map[uint16]uint8
- v2, changed = fastpathTV.DecMapUint16Uint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]uint16:
- fastpathTV.DecMapUint16Uint16V(v, false, d)
- case *map[uint16]uint16:
- var v2 map[uint16]uint16
- v2, changed = fastpathTV.DecMapUint16Uint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]uint32:
- fastpathTV.DecMapUint16Uint32V(v, false, d)
- case *map[uint16]uint32:
- var v2 map[uint16]uint32
- v2, changed = fastpathTV.DecMapUint16Uint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]uint64:
- fastpathTV.DecMapUint16Uint64V(v, false, d)
- case *map[uint16]uint64:
- var v2 map[uint16]uint64
- v2, changed = fastpathTV.DecMapUint16Uint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]uintptr:
- fastpathTV.DecMapUint16UintptrV(v, false, d)
- case *map[uint16]uintptr:
- var v2 map[uint16]uintptr
- v2, changed = fastpathTV.DecMapUint16UintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]int:
- fastpathTV.DecMapUint16IntV(v, false, d)
- case *map[uint16]int:
- var v2 map[uint16]int
- v2, changed = fastpathTV.DecMapUint16IntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]int8:
- fastpathTV.DecMapUint16Int8V(v, false, d)
- case *map[uint16]int8:
- var v2 map[uint16]int8
- v2, changed = fastpathTV.DecMapUint16Int8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]int16:
- fastpathTV.DecMapUint16Int16V(v, false, d)
- case *map[uint16]int16:
- var v2 map[uint16]int16
- v2, changed = fastpathTV.DecMapUint16Int16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]int32:
- fastpathTV.DecMapUint16Int32V(v, false, d)
- case *map[uint16]int32:
- var v2 map[uint16]int32
- v2, changed = fastpathTV.DecMapUint16Int32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]int64:
- fastpathTV.DecMapUint16Int64V(v, false, d)
- case *map[uint16]int64:
- var v2 map[uint16]int64
- v2, changed = fastpathTV.DecMapUint16Int64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]float32:
- fastpathTV.DecMapUint16Float32V(v, false, d)
- case *map[uint16]float32:
- var v2 map[uint16]float32
- v2, changed = fastpathTV.DecMapUint16Float32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]float64:
- fastpathTV.DecMapUint16Float64V(v, false, d)
- case *map[uint16]float64:
- var v2 map[uint16]float64
- v2, changed = fastpathTV.DecMapUint16Float64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint16]bool:
- fastpathTV.DecMapUint16BoolV(v, false, d)
- case *map[uint16]bool:
- var v2 map[uint16]bool
- v2, changed = fastpathTV.DecMapUint16BoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]interface{}:
- fastpathTV.DecMapUint32IntfV(v, false, d)
- case *map[uint32]interface{}:
- var v2 map[uint32]interface{}
- v2, changed = fastpathTV.DecMapUint32IntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]string:
- fastpathTV.DecMapUint32StringV(v, false, d)
- case *map[uint32]string:
- var v2 map[uint32]string
- v2, changed = fastpathTV.DecMapUint32StringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]uint:
- fastpathTV.DecMapUint32UintV(v, false, d)
- case *map[uint32]uint:
- var v2 map[uint32]uint
- v2, changed = fastpathTV.DecMapUint32UintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]uint8:
- fastpathTV.DecMapUint32Uint8V(v, false, d)
- case *map[uint32]uint8:
- var v2 map[uint32]uint8
- v2, changed = fastpathTV.DecMapUint32Uint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]uint16:
- fastpathTV.DecMapUint32Uint16V(v, false, d)
- case *map[uint32]uint16:
- var v2 map[uint32]uint16
- v2, changed = fastpathTV.DecMapUint32Uint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]uint32:
- fastpathTV.DecMapUint32Uint32V(v, false, d)
- case *map[uint32]uint32:
- var v2 map[uint32]uint32
- v2, changed = fastpathTV.DecMapUint32Uint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]uint64:
- fastpathTV.DecMapUint32Uint64V(v, false, d)
- case *map[uint32]uint64:
- var v2 map[uint32]uint64
- v2, changed = fastpathTV.DecMapUint32Uint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]uintptr:
- fastpathTV.DecMapUint32UintptrV(v, false, d)
- case *map[uint32]uintptr:
- var v2 map[uint32]uintptr
- v2, changed = fastpathTV.DecMapUint32UintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]int:
- fastpathTV.DecMapUint32IntV(v, false, d)
- case *map[uint32]int:
- var v2 map[uint32]int
- v2, changed = fastpathTV.DecMapUint32IntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]int8:
- fastpathTV.DecMapUint32Int8V(v, false, d)
- case *map[uint32]int8:
- var v2 map[uint32]int8
- v2, changed = fastpathTV.DecMapUint32Int8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]int16:
- fastpathTV.DecMapUint32Int16V(v, false, d)
- case *map[uint32]int16:
- var v2 map[uint32]int16
- v2, changed = fastpathTV.DecMapUint32Int16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]int32:
- fastpathTV.DecMapUint32Int32V(v, false, d)
- case *map[uint32]int32:
- var v2 map[uint32]int32
- v2, changed = fastpathTV.DecMapUint32Int32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]int64:
- fastpathTV.DecMapUint32Int64V(v, false, d)
- case *map[uint32]int64:
- var v2 map[uint32]int64
- v2, changed = fastpathTV.DecMapUint32Int64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]float32:
- fastpathTV.DecMapUint32Float32V(v, false, d)
- case *map[uint32]float32:
- var v2 map[uint32]float32
- v2, changed = fastpathTV.DecMapUint32Float32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]float64:
- fastpathTV.DecMapUint32Float64V(v, false, d)
- case *map[uint32]float64:
- var v2 map[uint32]float64
- v2, changed = fastpathTV.DecMapUint32Float64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint32]bool:
- fastpathTV.DecMapUint32BoolV(v, false, d)
- case *map[uint32]bool:
- var v2 map[uint32]bool
- v2, changed = fastpathTV.DecMapUint32BoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]interface{}:
- fastpathTV.DecMapUint64IntfV(v, false, d)
- case *map[uint64]interface{}:
- var v2 map[uint64]interface{}
- v2, changed = fastpathTV.DecMapUint64IntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]string:
- fastpathTV.DecMapUint64StringV(v, false, d)
- case *map[uint64]string:
- var v2 map[uint64]string
- v2, changed = fastpathTV.DecMapUint64StringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]uint:
- fastpathTV.DecMapUint64UintV(v, false, d)
- case *map[uint64]uint:
- var v2 map[uint64]uint
- v2, changed = fastpathTV.DecMapUint64UintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]uint8:
- fastpathTV.DecMapUint64Uint8V(v, false, d)
- case *map[uint64]uint8:
- var v2 map[uint64]uint8
- v2, changed = fastpathTV.DecMapUint64Uint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]uint16:
- fastpathTV.DecMapUint64Uint16V(v, false, d)
- case *map[uint64]uint16:
- var v2 map[uint64]uint16
- v2, changed = fastpathTV.DecMapUint64Uint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]uint32:
- fastpathTV.DecMapUint64Uint32V(v, false, d)
- case *map[uint64]uint32:
- var v2 map[uint64]uint32
- v2, changed = fastpathTV.DecMapUint64Uint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]uint64:
- fastpathTV.DecMapUint64Uint64V(v, false, d)
- case *map[uint64]uint64:
- var v2 map[uint64]uint64
- v2, changed = fastpathTV.DecMapUint64Uint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]uintptr:
- fastpathTV.DecMapUint64UintptrV(v, false, d)
- case *map[uint64]uintptr:
- var v2 map[uint64]uintptr
- v2, changed = fastpathTV.DecMapUint64UintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]int:
- fastpathTV.DecMapUint64IntV(v, false, d)
- case *map[uint64]int:
- var v2 map[uint64]int
- v2, changed = fastpathTV.DecMapUint64IntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]int8:
- fastpathTV.DecMapUint64Int8V(v, false, d)
- case *map[uint64]int8:
- var v2 map[uint64]int8
- v2, changed = fastpathTV.DecMapUint64Int8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]int16:
- fastpathTV.DecMapUint64Int16V(v, false, d)
- case *map[uint64]int16:
- var v2 map[uint64]int16
- v2, changed = fastpathTV.DecMapUint64Int16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]int32:
- fastpathTV.DecMapUint64Int32V(v, false, d)
- case *map[uint64]int32:
- var v2 map[uint64]int32
- v2, changed = fastpathTV.DecMapUint64Int32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]int64:
- fastpathTV.DecMapUint64Int64V(v, false, d)
- case *map[uint64]int64:
- var v2 map[uint64]int64
- v2, changed = fastpathTV.DecMapUint64Int64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]float32:
- fastpathTV.DecMapUint64Float32V(v, false, d)
- case *map[uint64]float32:
- var v2 map[uint64]float32
- v2, changed = fastpathTV.DecMapUint64Float32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]float64:
- fastpathTV.DecMapUint64Float64V(v, false, d)
- case *map[uint64]float64:
- var v2 map[uint64]float64
- v2, changed = fastpathTV.DecMapUint64Float64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uint64]bool:
- fastpathTV.DecMapUint64BoolV(v, false, d)
- case *map[uint64]bool:
- var v2 map[uint64]bool
- v2, changed = fastpathTV.DecMapUint64BoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]interface{}:
- fastpathTV.DecMapUintptrIntfV(v, false, d)
- case *map[uintptr]interface{}:
- var v2 map[uintptr]interface{}
- v2, changed = fastpathTV.DecMapUintptrIntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]string:
- fastpathTV.DecMapUintptrStringV(v, false, d)
- case *map[uintptr]string:
- var v2 map[uintptr]string
- v2, changed = fastpathTV.DecMapUintptrStringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]uint:
- fastpathTV.DecMapUintptrUintV(v, false, d)
- case *map[uintptr]uint:
- var v2 map[uintptr]uint
- v2, changed = fastpathTV.DecMapUintptrUintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]uint8:
- fastpathTV.DecMapUintptrUint8V(v, false, d)
- case *map[uintptr]uint8:
- var v2 map[uintptr]uint8
- v2, changed = fastpathTV.DecMapUintptrUint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]uint16:
- fastpathTV.DecMapUintptrUint16V(v, false, d)
- case *map[uintptr]uint16:
- var v2 map[uintptr]uint16
- v2, changed = fastpathTV.DecMapUintptrUint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]uint32:
- fastpathTV.DecMapUintptrUint32V(v, false, d)
- case *map[uintptr]uint32:
- var v2 map[uintptr]uint32
- v2, changed = fastpathTV.DecMapUintptrUint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]uint64:
- fastpathTV.DecMapUintptrUint64V(v, false, d)
- case *map[uintptr]uint64:
- var v2 map[uintptr]uint64
- v2, changed = fastpathTV.DecMapUintptrUint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]uintptr:
- fastpathTV.DecMapUintptrUintptrV(v, false, d)
- case *map[uintptr]uintptr:
- var v2 map[uintptr]uintptr
- v2, changed = fastpathTV.DecMapUintptrUintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]int:
- fastpathTV.DecMapUintptrIntV(v, false, d)
- case *map[uintptr]int:
- var v2 map[uintptr]int
- v2, changed = fastpathTV.DecMapUintptrIntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]int8:
- fastpathTV.DecMapUintptrInt8V(v, false, d)
- case *map[uintptr]int8:
- var v2 map[uintptr]int8
- v2, changed = fastpathTV.DecMapUintptrInt8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]int16:
- fastpathTV.DecMapUintptrInt16V(v, false, d)
- case *map[uintptr]int16:
- var v2 map[uintptr]int16
- v2, changed = fastpathTV.DecMapUintptrInt16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]int32:
- fastpathTV.DecMapUintptrInt32V(v, false, d)
- case *map[uintptr]int32:
- var v2 map[uintptr]int32
- v2, changed = fastpathTV.DecMapUintptrInt32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]int64:
- fastpathTV.DecMapUintptrInt64V(v, false, d)
- case *map[uintptr]int64:
- var v2 map[uintptr]int64
- v2, changed = fastpathTV.DecMapUintptrInt64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]float32:
- fastpathTV.DecMapUintptrFloat32V(v, false, d)
- case *map[uintptr]float32:
- var v2 map[uintptr]float32
- v2, changed = fastpathTV.DecMapUintptrFloat32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]float64:
- fastpathTV.DecMapUintptrFloat64V(v, false, d)
- case *map[uintptr]float64:
- var v2 map[uintptr]float64
- v2, changed = fastpathTV.DecMapUintptrFloat64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[uintptr]bool:
- fastpathTV.DecMapUintptrBoolV(v, false, d)
- case *map[uintptr]bool:
- var v2 map[uintptr]bool
- v2, changed = fastpathTV.DecMapUintptrBoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]interface{}:
- fastpathTV.DecMapIntIntfV(v, false, d)
- case *map[int]interface{}:
- var v2 map[int]interface{}
- v2, changed = fastpathTV.DecMapIntIntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]string:
- fastpathTV.DecMapIntStringV(v, false, d)
- case *map[int]string:
- var v2 map[int]string
- v2, changed = fastpathTV.DecMapIntStringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]uint:
- fastpathTV.DecMapIntUintV(v, false, d)
- case *map[int]uint:
- var v2 map[int]uint
- v2, changed = fastpathTV.DecMapIntUintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]uint8:
- fastpathTV.DecMapIntUint8V(v, false, d)
- case *map[int]uint8:
- var v2 map[int]uint8
- v2, changed = fastpathTV.DecMapIntUint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]uint16:
- fastpathTV.DecMapIntUint16V(v, false, d)
- case *map[int]uint16:
- var v2 map[int]uint16
- v2, changed = fastpathTV.DecMapIntUint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]uint32:
- fastpathTV.DecMapIntUint32V(v, false, d)
- case *map[int]uint32:
- var v2 map[int]uint32
- v2, changed = fastpathTV.DecMapIntUint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]uint64:
- fastpathTV.DecMapIntUint64V(v, false, d)
- case *map[int]uint64:
- var v2 map[int]uint64
- v2, changed = fastpathTV.DecMapIntUint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]uintptr:
- fastpathTV.DecMapIntUintptrV(v, false, d)
- case *map[int]uintptr:
- var v2 map[int]uintptr
- v2, changed = fastpathTV.DecMapIntUintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]int:
- fastpathTV.DecMapIntIntV(v, false, d)
- case *map[int]int:
- var v2 map[int]int
- v2, changed = fastpathTV.DecMapIntIntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]int8:
- fastpathTV.DecMapIntInt8V(v, false, d)
- case *map[int]int8:
- var v2 map[int]int8
- v2, changed = fastpathTV.DecMapIntInt8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]int16:
- fastpathTV.DecMapIntInt16V(v, false, d)
- case *map[int]int16:
- var v2 map[int]int16
- v2, changed = fastpathTV.DecMapIntInt16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]int32:
- fastpathTV.DecMapIntInt32V(v, false, d)
- case *map[int]int32:
- var v2 map[int]int32
- v2, changed = fastpathTV.DecMapIntInt32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]int64:
- fastpathTV.DecMapIntInt64V(v, false, d)
- case *map[int]int64:
- var v2 map[int]int64
- v2, changed = fastpathTV.DecMapIntInt64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]float32:
- fastpathTV.DecMapIntFloat32V(v, false, d)
- case *map[int]float32:
- var v2 map[int]float32
- v2, changed = fastpathTV.DecMapIntFloat32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]float64:
- fastpathTV.DecMapIntFloat64V(v, false, d)
- case *map[int]float64:
- var v2 map[int]float64
- v2, changed = fastpathTV.DecMapIntFloat64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int]bool:
- fastpathTV.DecMapIntBoolV(v, false, d)
- case *map[int]bool:
- var v2 map[int]bool
- v2, changed = fastpathTV.DecMapIntBoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]interface{}:
- fastpathTV.DecMapInt8IntfV(v, false, d)
- case *map[int8]interface{}:
- var v2 map[int8]interface{}
- v2, changed = fastpathTV.DecMapInt8IntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]string:
- fastpathTV.DecMapInt8StringV(v, false, d)
- case *map[int8]string:
- var v2 map[int8]string
- v2, changed = fastpathTV.DecMapInt8StringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]uint:
- fastpathTV.DecMapInt8UintV(v, false, d)
- case *map[int8]uint:
- var v2 map[int8]uint
- v2, changed = fastpathTV.DecMapInt8UintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]uint8:
- fastpathTV.DecMapInt8Uint8V(v, false, d)
- case *map[int8]uint8:
- var v2 map[int8]uint8
- v2, changed = fastpathTV.DecMapInt8Uint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]uint16:
- fastpathTV.DecMapInt8Uint16V(v, false, d)
- case *map[int8]uint16:
- var v2 map[int8]uint16
- v2, changed = fastpathTV.DecMapInt8Uint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]uint32:
- fastpathTV.DecMapInt8Uint32V(v, false, d)
- case *map[int8]uint32:
- var v2 map[int8]uint32
- v2, changed = fastpathTV.DecMapInt8Uint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]uint64:
- fastpathTV.DecMapInt8Uint64V(v, false, d)
- case *map[int8]uint64:
- var v2 map[int8]uint64
- v2, changed = fastpathTV.DecMapInt8Uint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]uintptr:
- fastpathTV.DecMapInt8UintptrV(v, false, d)
- case *map[int8]uintptr:
- var v2 map[int8]uintptr
- v2, changed = fastpathTV.DecMapInt8UintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]int:
- fastpathTV.DecMapInt8IntV(v, false, d)
- case *map[int8]int:
- var v2 map[int8]int
- v2, changed = fastpathTV.DecMapInt8IntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]int8:
- fastpathTV.DecMapInt8Int8V(v, false, d)
- case *map[int8]int8:
- var v2 map[int8]int8
- v2, changed = fastpathTV.DecMapInt8Int8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]int16:
- fastpathTV.DecMapInt8Int16V(v, false, d)
- case *map[int8]int16:
- var v2 map[int8]int16
- v2, changed = fastpathTV.DecMapInt8Int16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]int32:
- fastpathTV.DecMapInt8Int32V(v, false, d)
- case *map[int8]int32:
- var v2 map[int8]int32
- v2, changed = fastpathTV.DecMapInt8Int32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]int64:
- fastpathTV.DecMapInt8Int64V(v, false, d)
- case *map[int8]int64:
- var v2 map[int8]int64
- v2, changed = fastpathTV.DecMapInt8Int64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]float32:
- fastpathTV.DecMapInt8Float32V(v, false, d)
- case *map[int8]float32:
- var v2 map[int8]float32
- v2, changed = fastpathTV.DecMapInt8Float32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]float64:
- fastpathTV.DecMapInt8Float64V(v, false, d)
- case *map[int8]float64:
- var v2 map[int8]float64
- v2, changed = fastpathTV.DecMapInt8Float64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int8]bool:
- fastpathTV.DecMapInt8BoolV(v, false, d)
- case *map[int8]bool:
- var v2 map[int8]bool
- v2, changed = fastpathTV.DecMapInt8BoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]interface{}:
- fastpathTV.DecMapInt16IntfV(v, false, d)
- case *map[int16]interface{}:
- var v2 map[int16]interface{}
- v2, changed = fastpathTV.DecMapInt16IntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]string:
- fastpathTV.DecMapInt16StringV(v, false, d)
- case *map[int16]string:
- var v2 map[int16]string
- v2, changed = fastpathTV.DecMapInt16StringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]uint:
- fastpathTV.DecMapInt16UintV(v, false, d)
- case *map[int16]uint:
- var v2 map[int16]uint
- v2, changed = fastpathTV.DecMapInt16UintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]uint8:
- fastpathTV.DecMapInt16Uint8V(v, false, d)
- case *map[int16]uint8:
- var v2 map[int16]uint8
- v2, changed = fastpathTV.DecMapInt16Uint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]uint16:
- fastpathTV.DecMapInt16Uint16V(v, false, d)
- case *map[int16]uint16:
- var v2 map[int16]uint16
- v2, changed = fastpathTV.DecMapInt16Uint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]uint32:
- fastpathTV.DecMapInt16Uint32V(v, false, d)
- case *map[int16]uint32:
- var v2 map[int16]uint32
- v2, changed = fastpathTV.DecMapInt16Uint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]uint64:
- fastpathTV.DecMapInt16Uint64V(v, false, d)
- case *map[int16]uint64:
- var v2 map[int16]uint64
- v2, changed = fastpathTV.DecMapInt16Uint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]uintptr:
- fastpathTV.DecMapInt16UintptrV(v, false, d)
- case *map[int16]uintptr:
- var v2 map[int16]uintptr
- v2, changed = fastpathTV.DecMapInt16UintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]int:
- fastpathTV.DecMapInt16IntV(v, false, d)
- case *map[int16]int:
- var v2 map[int16]int
- v2, changed = fastpathTV.DecMapInt16IntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]int8:
- fastpathTV.DecMapInt16Int8V(v, false, d)
- case *map[int16]int8:
- var v2 map[int16]int8
- v2, changed = fastpathTV.DecMapInt16Int8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]int16:
- fastpathTV.DecMapInt16Int16V(v, false, d)
- case *map[int16]int16:
- var v2 map[int16]int16
- v2, changed = fastpathTV.DecMapInt16Int16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]int32:
- fastpathTV.DecMapInt16Int32V(v, false, d)
- case *map[int16]int32:
- var v2 map[int16]int32
- v2, changed = fastpathTV.DecMapInt16Int32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]int64:
- fastpathTV.DecMapInt16Int64V(v, false, d)
- case *map[int16]int64:
- var v2 map[int16]int64
- v2, changed = fastpathTV.DecMapInt16Int64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]float32:
- fastpathTV.DecMapInt16Float32V(v, false, d)
- case *map[int16]float32:
- var v2 map[int16]float32
- v2, changed = fastpathTV.DecMapInt16Float32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]float64:
- fastpathTV.DecMapInt16Float64V(v, false, d)
- case *map[int16]float64:
- var v2 map[int16]float64
- v2, changed = fastpathTV.DecMapInt16Float64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int16]bool:
- fastpathTV.DecMapInt16BoolV(v, false, d)
- case *map[int16]bool:
- var v2 map[int16]bool
- v2, changed = fastpathTV.DecMapInt16BoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]interface{}:
- fastpathTV.DecMapInt32IntfV(v, false, d)
- case *map[int32]interface{}:
- var v2 map[int32]interface{}
- v2, changed = fastpathTV.DecMapInt32IntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]string:
- fastpathTV.DecMapInt32StringV(v, false, d)
- case *map[int32]string:
- var v2 map[int32]string
- v2, changed = fastpathTV.DecMapInt32StringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]uint:
- fastpathTV.DecMapInt32UintV(v, false, d)
- case *map[int32]uint:
- var v2 map[int32]uint
- v2, changed = fastpathTV.DecMapInt32UintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]uint8:
- fastpathTV.DecMapInt32Uint8V(v, false, d)
- case *map[int32]uint8:
- var v2 map[int32]uint8
- v2, changed = fastpathTV.DecMapInt32Uint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]uint16:
- fastpathTV.DecMapInt32Uint16V(v, false, d)
- case *map[int32]uint16:
- var v2 map[int32]uint16
- v2, changed = fastpathTV.DecMapInt32Uint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]uint32:
- fastpathTV.DecMapInt32Uint32V(v, false, d)
- case *map[int32]uint32:
- var v2 map[int32]uint32
- v2, changed = fastpathTV.DecMapInt32Uint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]uint64:
- fastpathTV.DecMapInt32Uint64V(v, false, d)
- case *map[int32]uint64:
- var v2 map[int32]uint64
- v2, changed = fastpathTV.DecMapInt32Uint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]uintptr:
- fastpathTV.DecMapInt32UintptrV(v, false, d)
- case *map[int32]uintptr:
- var v2 map[int32]uintptr
- v2, changed = fastpathTV.DecMapInt32UintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]int:
- fastpathTV.DecMapInt32IntV(v, false, d)
- case *map[int32]int:
- var v2 map[int32]int
- v2, changed = fastpathTV.DecMapInt32IntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]int8:
- fastpathTV.DecMapInt32Int8V(v, false, d)
- case *map[int32]int8:
- var v2 map[int32]int8
- v2, changed = fastpathTV.DecMapInt32Int8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]int16:
- fastpathTV.DecMapInt32Int16V(v, false, d)
- case *map[int32]int16:
- var v2 map[int32]int16
- v2, changed = fastpathTV.DecMapInt32Int16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]int32:
- fastpathTV.DecMapInt32Int32V(v, false, d)
- case *map[int32]int32:
- var v2 map[int32]int32
- v2, changed = fastpathTV.DecMapInt32Int32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]int64:
- fastpathTV.DecMapInt32Int64V(v, false, d)
- case *map[int32]int64:
- var v2 map[int32]int64
- v2, changed = fastpathTV.DecMapInt32Int64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]float32:
- fastpathTV.DecMapInt32Float32V(v, false, d)
- case *map[int32]float32:
- var v2 map[int32]float32
- v2, changed = fastpathTV.DecMapInt32Float32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]float64:
- fastpathTV.DecMapInt32Float64V(v, false, d)
- case *map[int32]float64:
- var v2 map[int32]float64
- v2, changed = fastpathTV.DecMapInt32Float64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int32]bool:
- fastpathTV.DecMapInt32BoolV(v, false, d)
- case *map[int32]bool:
- var v2 map[int32]bool
- v2, changed = fastpathTV.DecMapInt32BoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]interface{}:
- fastpathTV.DecMapInt64IntfV(v, false, d)
- case *map[int64]interface{}:
- var v2 map[int64]interface{}
- v2, changed = fastpathTV.DecMapInt64IntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]string:
- fastpathTV.DecMapInt64StringV(v, false, d)
- case *map[int64]string:
- var v2 map[int64]string
- v2, changed = fastpathTV.DecMapInt64StringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]uint:
- fastpathTV.DecMapInt64UintV(v, false, d)
- case *map[int64]uint:
- var v2 map[int64]uint
- v2, changed = fastpathTV.DecMapInt64UintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]uint8:
- fastpathTV.DecMapInt64Uint8V(v, false, d)
- case *map[int64]uint8:
- var v2 map[int64]uint8
- v2, changed = fastpathTV.DecMapInt64Uint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]uint16:
- fastpathTV.DecMapInt64Uint16V(v, false, d)
- case *map[int64]uint16:
- var v2 map[int64]uint16
- v2, changed = fastpathTV.DecMapInt64Uint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]uint32:
- fastpathTV.DecMapInt64Uint32V(v, false, d)
- case *map[int64]uint32:
- var v2 map[int64]uint32
- v2, changed = fastpathTV.DecMapInt64Uint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]uint64:
- fastpathTV.DecMapInt64Uint64V(v, false, d)
- case *map[int64]uint64:
- var v2 map[int64]uint64
- v2, changed = fastpathTV.DecMapInt64Uint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]uintptr:
- fastpathTV.DecMapInt64UintptrV(v, false, d)
- case *map[int64]uintptr:
- var v2 map[int64]uintptr
- v2, changed = fastpathTV.DecMapInt64UintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]int:
- fastpathTV.DecMapInt64IntV(v, false, d)
- case *map[int64]int:
- var v2 map[int64]int
- v2, changed = fastpathTV.DecMapInt64IntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]int8:
- fastpathTV.DecMapInt64Int8V(v, false, d)
- case *map[int64]int8:
- var v2 map[int64]int8
- v2, changed = fastpathTV.DecMapInt64Int8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]int16:
- fastpathTV.DecMapInt64Int16V(v, false, d)
- case *map[int64]int16:
- var v2 map[int64]int16
- v2, changed = fastpathTV.DecMapInt64Int16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]int32:
- fastpathTV.DecMapInt64Int32V(v, false, d)
- case *map[int64]int32:
- var v2 map[int64]int32
- v2, changed = fastpathTV.DecMapInt64Int32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]int64:
- fastpathTV.DecMapInt64Int64V(v, false, d)
- case *map[int64]int64:
- var v2 map[int64]int64
- v2, changed = fastpathTV.DecMapInt64Int64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]float32:
- fastpathTV.DecMapInt64Float32V(v, false, d)
- case *map[int64]float32:
- var v2 map[int64]float32
- v2, changed = fastpathTV.DecMapInt64Float32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]float64:
- fastpathTV.DecMapInt64Float64V(v, false, d)
- case *map[int64]float64:
- var v2 map[int64]float64
- v2, changed = fastpathTV.DecMapInt64Float64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[int64]bool:
- fastpathTV.DecMapInt64BoolV(v, false, d)
- case *map[int64]bool:
- var v2 map[int64]bool
- v2, changed = fastpathTV.DecMapInt64BoolV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]interface{}:
- fastpathTV.DecMapBoolIntfV(v, false, d)
- case *map[bool]interface{}:
- var v2 map[bool]interface{}
- v2, changed = fastpathTV.DecMapBoolIntfV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]string:
- fastpathTV.DecMapBoolStringV(v, false, d)
- case *map[bool]string:
- var v2 map[bool]string
- v2, changed = fastpathTV.DecMapBoolStringV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]uint:
- fastpathTV.DecMapBoolUintV(v, false, d)
- case *map[bool]uint:
- var v2 map[bool]uint
- v2, changed = fastpathTV.DecMapBoolUintV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]uint8:
- fastpathTV.DecMapBoolUint8V(v, false, d)
- case *map[bool]uint8:
- var v2 map[bool]uint8
- v2, changed = fastpathTV.DecMapBoolUint8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]uint16:
- fastpathTV.DecMapBoolUint16V(v, false, d)
- case *map[bool]uint16:
- var v2 map[bool]uint16
- v2, changed = fastpathTV.DecMapBoolUint16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]uint32:
- fastpathTV.DecMapBoolUint32V(v, false, d)
- case *map[bool]uint32:
- var v2 map[bool]uint32
- v2, changed = fastpathTV.DecMapBoolUint32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]uint64:
- fastpathTV.DecMapBoolUint64V(v, false, d)
- case *map[bool]uint64:
- var v2 map[bool]uint64
- v2, changed = fastpathTV.DecMapBoolUint64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]uintptr:
- fastpathTV.DecMapBoolUintptrV(v, false, d)
- case *map[bool]uintptr:
- var v2 map[bool]uintptr
- v2, changed = fastpathTV.DecMapBoolUintptrV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]int:
- fastpathTV.DecMapBoolIntV(v, false, d)
- case *map[bool]int:
- var v2 map[bool]int
- v2, changed = fastpathTV.DecMapBoolIntV(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]int8:
- fastpathTV.DecMapBoolInt8V(v, false, d)
- case *map[bool]int8:
- var v2 map[bool]int8
- v2, changed = fastpathTV.DecMapBoolInt8V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]int16:
- fastpathTV.DecMapBoolInt16V(v, false, d)
- case *map[bool]int16:
- var v2 map[bool]int16
- v2, changed = fastpathTV.DecMapBoolInt16V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]int32:
- fastpathTV.DecMapBoolInt32V(v, false, d)
- case *map[bool]int32:
- var v2 map[bool]int32
- v2, changed = fastpathTV.DecMapBoolInt32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]int64:
- fastpathTV.DecMapBoolInt64V(v, false, d)
- case *map[bool]int64:
- var v2 map[bool]int64
- v2, changed = fastpathTV.DecMapBoolInt64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]float32:
- fastpathTV.DecMapBoolFloat32V(v, false, d)
- case *map[bool]float32:
- var v2 map[bool]float32
- v2, changed = fastpathTV.DecMapBoolFloat32V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]float64:
- fastpathTV.DecMapBoolFloat64V(v, false, d)
- case *map[bool]float64:
- var v2 map[bool]float64
- v2, changed = fastpathTV.DecMapBoolFloat64V(*v, true, d)
- if changed {
- *v = v2
- }
- case map[bool]bool:
- fastpathTV.DecMapBoolBoolV(v, false, d)
- case *map[bool]bool:
- var v2 map[bool]bool
- v2, changed = fastpathTV.DecMapBoolBoolV(*v, true, d)
- if changed {
- *v = v2
- }
- default:
- _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
- return false
- }
- return true
-}
-
-func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
- switch v := iv.(type) {
-
- case *[]interface{}:
- *v = nil
- case *[]string:
- *v = nil
- case *[]float32:
- *v = nil
- case *[]float64:
- *v = nil
- case *[]uint:
- *v = nil
- case *[]uint8:
- *v = nil
- case *[]uint16:
- *v = nil
- case *[]uint32:
- *v = nil
- case *[]uint64:
- *v = nil
- case *[]uintptr:
- *v = nil
- case *[]int:
- *v = nil
- case *[]int8:
- *v = nil
- case *[]int16:
- *v = nil
- case *[]int32:
- *v = nil
- case *[]int64:
- *v = nil
- case *[]bool:
- *v = nil
-
- case *map[interface{}]interface{}:
- *v = nil
- case *map[interface{}]string:
- *v = nil
- case *map[interface{}]uint:
- *v = nil
- case *map[interface{}]uint8:
- *v = nil
- case *map[interface{}]uint16:
- *v = nil
- case *map[interface{}]uint32:
- *v = nil
- case *map[interface{}]uint64:
- *v = nil
- case *map[interface{}]uintptr:
- *v = nil
- case *map[interface{}]int:
- *v = nil
- case *map[interface{}]int8:
- *v = nil
- case *map[interface{}]int16:
- *v = nil
- case *map[interface{}]int32:
- *v = nil
- case *map[interface{}]int64:
- *v = nil
- case *map[interface{}]float32:
- *v = nil
- case *map[interface{}]float64:
- *v = nil
- case *map[interface{}]bool:
- *v = nil
- case *map[string]interface{}:
- *v = nil
- case *map[string]string:
- *v = nil
- case *map[string]uint:
- *v = nil
- case *map[string]uint8:
- *v = nil
- case *map[string]uint16:
- *v = nil
- case *map[string]uint32:
- *v = nil
- case *map[string]uint64:
- *v = nil
- case *map[string]uintptr:
- *v = nil
- case *map[string]int:
- *v = nil
- case *map[string]int8:
- *v = nil
- case *map[string]int16:
- *v = nil
- case *map[string]int32:
- *v = nil
- case *map[string]int64:
- *v = nil
- case *map[string]float32:
- *v = nil
- case *map[string]float64:
- *v = nil
- case *map[string]bool:
- *v = nil
- case *map[float32]interface{}:
- *v = nil
- case *map[float32]string:
- *v = nil
- case *map[float32]uint:
- *v = nil
- case *map[float32]uint8:
- *v = nil
- case *map[float32]uint16:
- *v = nil
- case *map[float32]uint32:
- *v = nil
- case *map[float32]uint64:
- *v = nil
- case *map[float32]uintptr:
- *v = nil
- case *map[float32]int:
- *v = nil
- case *map[float32]int8:
- *v = nil
- case *map[float32]int16:
- *v = nil
- case *map[float32]int32:
- *v = nil
- case *map[float32]int64:
- *v = nil
- case *map[float32]float32:
- *v = nil
- case *map[float32]float64:
- *v = nil
- case *map[float32]bool:
- *v = nil
- case *map[float64]interface{}:
- *v = nil
- case *map[float64]string:
- *v = nil
- case *map[float64]uint:
- *v = nil
- case *map[float64]uint8:
- *v = nil
- case *map[float64]uint16:
- *v = nil
- case *map[float64]uint32:
- *v = nil
- case *map[float64]uint64:
- *v = nil
- case *map[float64]uintptr:
- *v = nil
- case *map[float64]int:
- *v = nil
- case *map[float64]int8:
- *v = nil
- case *map[float64]int16:
- *v = nil
- case *map[float64]int32:
- *v = nil
- case *map[float64]int64:
- *v = nil
- case *map[float64]float32:
- *v = nil
- case *map[float64]float64:
- *v = nil
- case *map[float64]bool:
- *v = nil
- case *map[uint]interface{}:
- *v = nil
- case *map[uint]string:
- *v = nil
- case *map[uint]uint:
- *v = nil
- case *map[uint]uint8:
- *v = nil
- case *map[uint]uint16:
- *v = nil
- case *map[uint]uint32:
- *v = nil
- case *map[uint]uint64:
- *v = nil
- case *map[uint]uintptr:
- *v = nil
- case *map[uint]int:
- *v = nil
- case *map[uint]int8:
- *v = nil
- case *map[uint]int16:
- *v = nil
- case *map[uint]int32:
- *v = nil
- case *map[uint]int64:
- *v = nil
- case *map[uint]float32:
- *v = nil
- case *map[uint]float64:
- *v = nil
- case *map[uint]bool:
- *v = nil
- case *map[uint8]interface{}:
- *v = nil
- case *map[uint8]string:
- *v = nil
- case *map[uint8]uint:
- *v = nil
- case *map[uint8]uint8:
- *v = nil
- case *map[uint8]uint16:
- *v = nil
- case *map[uint8]uint32:
- *v = nil
- case *map[uint8]uint64:
- *v = nil
- case *map[uint8]uintptr:
- *v = nil
- case *map[uint8]int:
- *v = nil
- case *map[uint8]int8:
- *v = nil
- case *map[uint8]int16:
- *v = nil
- case *map[uint8]int32:
- *v = nil
- case *map[uint8]int64:
- *v = nil
- case *map[uint8]float32:
- *v = nil
- case *map[uint8]float64:
- *v = nil
- case *map[uint8]bool:
- *v = nil
- case *map[uint16]interface{}:
- *v = nil
- case *map[uint16]string:
- *v = nil
- case *map[uint16]uint:
- *v = nil
- case *map[uint16]uint8:
- *v = nil
- case *map[uint16]uint16:
- *v = nil
- case *map[uint16]uint32:
- *v = nil
- case *map[uint16]uint64:
- *v = nil
- case *map[uint16]uintptr:
- *v = nil
- case *map[uint16]int:
- *v = nil
- case *map[uint16]int8:
- *v = nil
- case *map[uint16]int16:
- *v = nil
- case *map[uint16]int32:
- *v = nil
- case *map[uint16]int64:
- *v = nil
- case *map[uint16]float32:
- *v = nil
- case *map[uint16]float64:
- *v = nil
- case *map[uint16]bool:
- *v = nil
- case *map[uint32]interface{}:
- *v = nil
- case *map[uint32]string:
- *v = nil
- case *map[uint32]uint:
- *v = nil
- case *map[uint32]uint8:
- *v = nil
- case *map[uint32]uint16:
- *v = nil
- case *map[uint32]uint32:
- *v = nil
- case *map[uint32]uint64:
- *v = nil
- case *map[uint32]uintptr:
- *v = nil
- case *map[uint32]int:
- *v = nil
- case *map[uint32]int8:
- *v = nil
- case *map[uint32]int16:
- *v = nil
- case *map[uint32]int32:
- *v = nil
- case *map[uint32]int64:
- *v = nil
- case *map[uint32]float32:
- *v = nil
- case *map[uint32]float64:
- *v = nil
- case *map[uint32]bool:
- *v = nil
- case *map[uint64]interface{}:
- *v = nil
- case *map[uint64]string:
- *v = nil
- case *map[uint64]uint:
- *v = nil
- case *map[uint64]uint8:
- *v = nil
- case *map[uint64]uint16:
- *v = nil
- case *map[uint64]uint32:
- *v = nil
- case *map[uint64]uint64:
- *v = nil
- case *map[uint64]uintptr:
- *v = nil
- case *map[uint64]int:
- *v = nil
- case *map[uint64]int8:
- *v = nil
- case *map[uint64]int16:
- *v = nil
- case *map[uint64]int32:
- *v = nil
- case *map[uint64]int64:
- *v = nil
- case *map[uint64]float32:
- *v = nil
- case *map[uint64]float64:
- *v = nil
- case *map[uint64]bool:
- *v = nil
- case *map[uintptr]interface{}:
- *v = nil
- case *map[uintptr]string:
- *v = nil
- case *map[uintptr]uint:
- *v = nil
- case *map[uintptr]uint8:
- *v = nil
- case *map[uintptr]uint16:
- *v = nil
- case *map[uintptr]uint32:
- *v = nil
- case *map[uintptr]uint64:
- *v = nil
- case *map[uintptr]uintptr:
- *v = nil
- case *map[uintptr]int:
- *v = nil
- case *map[uintptr]int8:
- *v = nil
- case *map[uintptr]int16:
- *v = nil
- case *map[uintptr]int32:
- *v = nil
- case *map[uintptr]int64:
- *v = nil
- case *map[uintptr]float32:
- *v = nil
- case *map[uintptr]float64:
- *v = nil
- case *map[uintptr]bool:
- *v = nil
- case *map[int]interface{}:
- *v = nil
- case *map[int]string:
- *v = nil
- case *map[int]uint:
- *v = nil
- case *map[int]uint8:
- *v = nil
- case *map[int]uint16:
- *v = nil
- case *map[int]uint32:
- *v = nil
- case *map[int]uint64:
- *v = nil
- case *map[int]uintptr:
- *v = nil
- case *map[int]int:
- *v = nil
- case *map[int]int8:
- *v = nil
- case *map[int]int16:
- *v = nil
- case *map[int]int32:
- *v = nil
- case *map[int]int64:
- *v = nil
- case *map[int]float32:
- *v = nil
- case *map[int]float64:
- *v = nil
- case *map[int]bool:
- *v = nil
- case *map[int8]interface{}:
- *v = nil
- case *map[int8]string:
- *v = nil
- case *map[int8]uint:
- *v = nil
- case *map[int8]uint8:
- *v = nil
- case *map[int8]uint16:
- *v = nil
- case *map[int8]uint32:
- *v = nil
- case *map[int8]uint64:
- *v = nil
- case *map[int8]uintptr:
- *v = nil
- case *map[int8]int:
- *v = nil
- case *map[int8]int8:
- *v = nil
- case *map[int8]int16:
- *v = nil
- case *map[int8]int32:
- *v = nil
- case *map[int8]int64:
- *v = nil
- case *map[int8]float32:
- *v = nil
- case *map[int8]float64:
- *v = nil
- case *map[int8]bool:
- *v = nil
- case *map[int16]interface{}:
- *v = nil
- case *map[int16]string:
- *v = nil
- case *map[int16]uint:
- *v = nil
- case *map[int16]uint8:
- *v = nil
- case *map[int16]uint16:
- *v = nil
- case *map[int16]uint32:
- *v = nil
- case *map[int16]uint64:
- *v = nil
- case *map[int16]uintptr:
- *v = nil
- case *map[int16]int:
- *v = nil
- case *map[int16]int8:
- *v = nil
- case *map[int16]int16:
- *v = nil
- case *map[int16]int32:
- *v = nil
- case *map[int16]int64:
- *v = nil
- case *map[int16]float32:
- *v = nil
- case *map[int16]float64:
- *v = nil
- case *map[int16]bool:
- *v = nil
- case *map[int32]interface{}:
- *v = nil
- case *map[int32]string:
- *v = nil
- case *map[int32]uint:
- *v = nil
- case *map[int32]uint8:
- *v = nil
- case *map[int32]uint16:
- *v = nil
- case *map[int32]uint32:
- *v = nil
- case *map[int32]uint64:
- *v = nil
- case *map[int32]uintptr:
- *v = nil
- case *map[int32]int:
- *v = nil
- case *map[int32]int8:
- *v = nil
- case *map[int32]int16:
- *v = nil
- case *map[int32]int32:
- *v = nil
- case *map[int32]int64:
- *v = nil
- case *map[int32]float32:
- *v = nil
- case *map[int32]float64:
- *v = nil
- case *map[int32]bool:
- *v = nil
- case *map[int64]interface{}:
- *v = nil
- case *map[int64]string:
- *v = nil
- case *map[int64]uint:
- *v = nil
- case *map[int64]uint8:
- *v = nil
- case *map[int64]uint16:
- *v = nil
- case *map[int64]uint32:
- *v = nil
- case *map[int64]uint64:
- *v = nil
- case *map[int64]uintptr:
- *v = nil
- case *map[int64]int:
- *v = nil
- case *map[int64]int8:
- *v = nil
- case *map[int64]int16:
- *v = nil
- case *map[int64]int32:
- *v = nil
- case *map[int64]int64:
- *v = nil
- case *map[int64]float32:
- *v = nil
- case *map[int64]float64:
- *v = nil
- case *map[int64]bool:
- *v = nil
- case *map[bool]interface{}:
- *v = nil
- case *map[bool]string:
- *v = nil
- case *map[bool]uint:
- *v = nil
- case *map[bool]uint8:
- *v = nil
- case *map[bool]uint16:
- *v = nil
- case *map[bool]uint32:
- *v = nil
- case *map[bool]uint64:
- *v = nil
- case *map[bool]uintptr:
- *v = nil
- case *map[bool]int:
- *v = nil
- case *map[bool]int8:
- *v = nil
- case *map[bool]int16:
- *v = nil
- case *map[bool]int32:
- *v = nil
- case *map[bool]int64:
- *v = nil
- case *map[bool]float32:
- *v = nil
- case *map[bool]float64:
- *v = nil
- case *map[bool]bool:
- *v = nil
- default:
- _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
- return false
- }
- return true
-}
-
-// -- -- fast path functions
-
-func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]interface{})
- v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]interface{})
- v2, changed := fastpathTV.DecSliceIntfV(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) {
- v, changed := f.DecSliceIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []interface{}{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]interface{}, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
- } else {
- xlen = 8
- }
- v = make([]interface{}, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, nil)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = nil
- } else {
- d.decode(&v[j])
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]interface{}, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]string)
- v, changed := fastpathTV.DecSliceStringV(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]string)
- v2, changed := fastpathTV.DecSliceStringV(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) {
- v, changed := f.DecSliceStringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []string{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]string, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
- } else {
- xlen = 8
- }
- v = make([]string, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, "")
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = ""
- } else {
- v[j] = dd.DecodeString()
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]string, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]float32)
- v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]float32)
- v2, changed := fastpathTV.DecSliceFloat32V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) {
- v, changed := f.DecSliceFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []float32{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]float32, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
- } else {
- xlen = 8
- }
- v = make([]float32, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]float32, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]float64)
- v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]float64)
- v2, changed := fastpathTV.DecSliceFloat64V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) {
- v, changed := f.DecSliceFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []float64{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]float64, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- } else {
- xlen = 8
- }
- v = make([]float64, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = dd.DecodeFloat64()
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]float64, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]uint)
- v, changed := fastpathTV.DecSliceUintV(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]uint)
- v2, changed := fastpathTV.DecSliceUintV(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) {
- v, changed := f.DecSliceUintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []uint{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]uint, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- } else {
- xlen = 8
- }
- v = make([]uint, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]uint, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceUint8R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]uint8)
- v, changed := fastpathTV.DecSliceUint8V(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]uint8)
- v2, changed := fastpathTV.DecSliceUint8V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceUint8X(vp *[]uint8, d *Decoder) {
- v, changed := f.DecSliceUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []uint8{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]uint8, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
- } else {
- xlen = 8
- }
- v = make([]uint8, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]uint8, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]uint16)
- v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]uint16)
- v2, changed := fastpathTV.DecSliceUint16V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) {
- v, changed := f.DecSliceUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []uint16{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]uint16, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
- } else {
- xlen = 8
- }
- v = make([]uint16, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]uint16, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]uint32)
- v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]uint32)
- v2, changed := fastpathTV.DecSliceUint32V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) {
- v, changed := f.DecSliceUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []uint32{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]uint32, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
- } else {
- xlen = 8
- }
- v = make([]uint32, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]uint32, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]uint64)
- v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]uint64)
- v2, changed := fastpathTV.DecSliceUint64V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) {
- v, changed := f.DecSliceUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []uint64{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]uint64, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- } else {
- xlen = 8
- }
- v = make([]uint64, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = dd.DecodeUint64()
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]uint64, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]uintptr)
- v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]uintptr)
- v2, changed := fastpathTV.DecSliceUintptrV(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) {
- v, changed := f.DecSliceUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []uintptr{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]uintptr, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- } else {
- xlen = 8
- }
- v = make([]uintptr, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]uintptr, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]int)
- v, changed := fastpathTV.DecSliceIntV(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]int)
- v2, changed := fastpathTV.DecSliceIntV(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) {
- v, changed := f.DecSliceIntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []int{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]int, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- } else {
- xlen = 8
- }
- v = make([]int, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]int, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]int8)
- v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]int8)
- v2, changed := fastpathTV.DecSliceInt8V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) {
- v, changed := f.DecSliceInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []int8{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]int8, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
- } else {
- xlen = 8
- }
- v = make([]int8, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]int8, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]int16)
- v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]int16)
- v2, changed := fastpathTV.DecSliceInt16V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) {
- v, changed := f.DecSliceInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []int16{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]int16, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2)
- } else {
- xlen = 8
- }
- v = make([]int16, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]int16, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]int32)
- v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]int32)
- v2, changed := fastpathTV.DecSliceInt32V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) {
- v, changed := f.DecSliceInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []int32{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]int32, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
- } else {
- xlen = 8
- }
- v = make([]int32, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]int32, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]int64)
- v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]int64)
- v2, changed := fastpathTV.DecSliceInt64V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) {
- v, changed := f.DecSliceInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []int64{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]int64, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
- } else {
- xlen = 8
- }
- v = make([]int64, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, 0)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = 0
- } else {
- v[j] = dd.DecodeInt64()
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]int64, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]bool)
- v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d)
- if changed {
- *vp = v
- }
- } else {
- v := rv2i(rv).([]bool)
- v2, changed := fastpathTV.DecSliceBoolV(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) {
- v, changed := f.DecSliceBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) {
- dd := d.d
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil {
- v = []bool{}
- } else if len(v) != 0 {
- v = v[:0]
- }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]bool, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
- } else {
- xlen = 8
- }
- v = make([]bool, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, false)
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = false
- } else {
- v[j] = dd.DecodeBool()
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]bool, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]interface{})
- v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) {
- v, changed := f.DecMapIntfIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool,
- d *Decoder) (_ map[interface{}]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
- v = make(map[interface{}]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk interface{}
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]string)
- v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d)
- }
-}
-func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) {
- v, changed := f.DecMapIntfStringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool,
- d *Decoder) (_ map[interface{}]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
- v = make(map[interface{}]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]uint)
- v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d)
- }
-}
-func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) {
- v, changed := f.DecMapIntfUintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool,
- d *Decoder) (_ map[interface{}]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[interface{}]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]uint8)
- v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) {
- v, changed := f.DecMapIntfUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool,
- d *Decoder) (_ map[interface{}]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[interface{}]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]uint16)
- v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) {
- v, changed := f.DecMapIntfUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool,
- d *Decoder) (_ map[interface{}]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
- v = make(map[interface{}]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]uint32)
- v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) {
- v, changed := f.DecMapIntfUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool,
- d *Decoder) (_ map[interface{}]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[interface{}]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]uint64)
- v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) {
- v, changed := f.DecMapIntfUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool,
- d *Decoder) (_ map[interface{}]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[interface{}]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]uintptr)
- v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) {
- v, changed := f.DecMapIntfUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool,
- d *Decoder) (_ map[interface{}]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[interface{}]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]int)
- v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d)
- }
-}
-func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) {
- v, changed := f.DecMapIntfIntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool,
- d *Decoder) (_ map[interface{}]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[interface{}]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]int8)
- v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d)
- }
-}
-func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) {
- v, changed := f.DecMapIntfInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool,
- d *Decoder) (_ map[interface{}]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[interface{}]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]int16)
- v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d)
- }
-}
-func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) {
- v, changed := f.DecMapIntfInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool,
- d *Decoder) (_ map[interface{}]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
- v = make(map[interface{}]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]int32)
- v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d)
- }
-}
-func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) {
- v, changed := f.DecMapIntfInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool,
- d *Decoder) (_ map[interface{}]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[interface{}]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]int64)
- v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d)
- }
-}
-func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) {
- v, changed := f.DecMapIntfInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool,
- d *Decoder) (_ map[interface{}]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[interface{}]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]float32)
- v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d)
- }
-}
-func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) {
- v, changed := f.DecMapIntfFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool,
- d *Decoder) (_ map[interface{}]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[interface{}]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]float64)
- v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d)
- }
-}
-func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) {
- v, changed := f.DecMapIntfFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool,
- d *Decoder) (_ map[interface{}]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[interface{}]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[interface{}]bool)
- v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d)
- }
-}
-func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) {
- v, changed := f.DecMapIntfBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool,
- d *Decoder) (_ map[interface{}]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[interface{}]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk interface{}
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv)
- }
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]interface{})
- v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) {
- v, changed := f.DecMapStringIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool,
- d *Decoder) (_ map[string]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
- v = make(map[string]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk string
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]string)
- v, changed := fastpathTV.DecMapStringStringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d)
- }
-}
-func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) {
- v, changed := f.DecMapStringStringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool,
- d *Decoder) (_ map[string]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 32)
- v = make(map[string]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]uint)
- v, changed := fastpathTV.DecMapStringUintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d)
- }
-}
-func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) {
- v, changed := f.DecMapStringUintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool,
- d *Decoder) (_ map[string]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[string]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]uint8)
- v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) {
- v, changed := f.DecMapStringUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool,
- d *Decoder) (_ map[string]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[string]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]uint16)
- v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) {
- v, changed := f.DecMapStringUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool,
- d *Decoder) (_ map[string]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
- v = make(map[string]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]uint32)
- v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) {
- v, changed := f.DecMapStringUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool,
- d *Decoder) (_ map[string]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[string]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]uint64)
- v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) {
- v, changed := f.DecMapStringUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool,
- d *Decoder) (_ map[string]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[string]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]uintptr)
- v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) {
- v, changed := f.DecMapStringUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool,
- d *Decoder) (_ map[string]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[string]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]int)
- v, changed := fastpathTV.DecMapStringIntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d)
- }
-}
-func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) {
- v, changed := f.DecMapStringIntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool,
- d *Decoder) (_ map[string]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[string]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]int8)
- v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d)
- }
-}
-func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) {
- v, changed := f.DecMapStringInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool,
- d *Decoder) (_ map[string]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[string]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]int16)
- v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d)
- }
-}
-func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) {
- v, changed := f.DecMapStringInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool,
- d *Decoder) (_ map[string]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
- v = make(map[string]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]int32)
- v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d)
- }
-}
-func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) {
- v, changed := f.DecMapStringInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool,
- d *Decoder) (_ map[string]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[string]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]int64)
- v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d)
- }
-}
-func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) {
- v, changed := f.DecMapStringInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool,
- d *Decoder) (_ map[string]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[string]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]float32)
- v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d)
- }
-}
-func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) {
- v, changed := f.DecMapStringFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool,
- d *Decoder) (_ map[string]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[string]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]float64)
- v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d)
- }
-}
-func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) {
- v, changed := f.DecMapStringFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool,
- d *Decoder) (_ map[string]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[string]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[string]bool)
- v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d)
- }
-}
-func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) {
- v, changed := f.DecMapStringBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool,
- d *Decoder) (_ map[string]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[string]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk string
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeString()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]interface{})
- v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) {
- v, changed := f.DecMapFloat32IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool,
- d *Decoder) (_ map[float32]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[float32]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk float32
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]string)
- v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) {
- v, changed := f.DecMapFloat32StringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool,
- d *Decoder) (_ map[float32]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[float32]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]uint)
- v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) {
- v, changed := f.DecMapFloat32UintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool,
- d *Decoder) (_ map[float32]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[float32]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]uint8)
- v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) {
- v, changed := f.DecMapFloat32Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool,
- d *Decoder) (_ map[float32]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[float32]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]uint16)
- v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) {
- v, changed := f.DecMapFloat32Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool,
- d *Decoder) (_ map[float32]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[float32]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]uint32)
- v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) {
- v, changed := f.DecMapFloat32Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool,
- d *Decoder) (_ map[float32]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
- v = make(map[float32]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]uint64)
- v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) {
- v, changed := f.DecMapFloat32Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool,
- d *Decoder) (_ map[float32]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[float32]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]uintptr)
- v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) {
- v, changed := f.DecMapFloat32UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool,
- d *Decoder) (_ map[float32]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[float32]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]int)
- v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) {
- v, changed := f.DecMapFloat32IntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool,
- d *Decoder) (_ map[float32]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[float32]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]int8)
- v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) {
- v, changed := f.DecMapFloat32Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool,
- d *Decoder) (_ map[float32]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[float32]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]int16)
- v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) {
- v, changed := f.DecMapFloat32Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool,
- d *Decoder) (_ map[float32]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[float32]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]int32)
- v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) {
- v, changed := f.DecMapFloat32Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool,
- d *Decoder) (_ map[float32]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
- v = make(map[float32]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]int64)
- v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) {
- v, changed := f.DecMapFloat32Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool,
- d *Decoder) (_ map[float32]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[float32]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]float32)
- v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) {
- v, changed := f.DecMapFloat32Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool,
- d *Decoder) (_ map[float32]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
- v = make(map[float32]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]float64)
- v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) {
- v, changed := f.DecMapFloat32Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool,
- d *Decoder) (_ map[float32]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[float32]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float32]bool)
- v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d)
- }
-}
-func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) {
- v, changed := f.DecMapFloat32BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool,
- d *Decoder) (_ map[float32]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[float32]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float32
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]interface{})
- v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) {
- v, changed := f.DecMapFloat64IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool,
- d *Decoder) (_ map[float64]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[float64]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk float64
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]string)
- v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) {
- v, changed := f.DecMapFloat64StringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool,
- d *Decoder) (_ map[float64]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[float64]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]uint)
- v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) {
- v, changed := f.DecMapFloat64UintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool,
- d *Decoder) (_ map[float64]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[float64]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]uint8)
- v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) {
- v, changed := f.DecMapFloat64Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool,
- d *Decoder) (_ map[float64]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[float64]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]uint16)
- v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) {
- v, changed := f.DecMapFloat64Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool,
- d *Decoder) (_ map[float64]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[float64]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]uint32)
- v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) {
- v, changed := f.DecMapFloat64Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool,
- d *Decoder) (_ map[float64]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[float64]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]uint64)
- v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) {
- v, changed := f.DecMapFloat64Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool,
- d *Decoder) (_ map[float64]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[float64]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]uintptr)
- v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) {
- v, changed := f.DecMapFloat64UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool,
- d *Decoder) (_ map[float64]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[float64]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]int)
- v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) {
- v, changed := f.DecMapFloat64IntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool,
- d *Decoder) (_ map[float64]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[float64]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]int8)
- v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) {
- v, changed := f.DecMapFloat64Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool,
- d *Decoder) (_ map[float64]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[float64]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]int16)
- v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) {
- v, changed := f.DecMapFloat64Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool,
- d *Decoder) (_ map[float64]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[float64]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]int32)
- v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) {
- v, changed := f.DecMapFloat64Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool,
- d *Decoder) (_ map[float64]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[float64]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]int64)
- v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) {
- v, changed := f.DecMapFloat64Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool,
- d *Decoder) (_ map[float64]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[float64]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]float32)
- v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) {
- v, changed := f.DecMapFloat64Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool,
- d *Decoder) (_ map[float64]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[float64]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]float64)
- v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) {
- v, changed := f.DecMapFloat64Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool,
- d *Decoder) (_ map[float64]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[float64]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[float64]bool)
- v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d)
- }
-}
-func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) {
- v, changed := f.DecMapFloat64BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool,
- d *Decoder) (_ map[float64]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[float64]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk float64
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeFloat64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]interface{})
- v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) {
- v, changed := f.DecMapUintIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool,
- d *Decoder) (_ map[uint]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[uint]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk uint
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]string)
- v, changed := fastpathTV.DecMapUintStringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d)
- }
-}
-func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) {
- v, changed := f.DecMapUintStringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool,
- d *Decoder) (_ map[uint]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[uint]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]uint)
- v, changed := fastpathTV.DecMapUintUintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d)
- }
-}
-func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) {
- v, changed := f.DecMapUintUintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool,
- d *Decoder) (_ map[uint]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]uint8)
- v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) {
- v, changed := f.DecMapUintUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool,
- d *Decoder) (_ map[uint]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]uint16)
- v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) {
- v, changed := f.DecMapUintUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool,
- d *Decoder) (_ map[uint]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uint]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]uint32)
- v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) {
- v, changed := f.DecMapUintUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool,
- d *Decoder) (_ map[uint]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]uint64)
- v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) {
- v, changed := f.DecMapUintUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool,
- d *Decoder) (_ map[uint]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]uintptr)
- v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) {
- v, changed := f.DecMapUintUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool,
- d *Decoder) (_ map[uint]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]int)
- v, changed := fastpathTV.DecMapUintIntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d)
- }
-}
-func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) {
- v, changed := f.DecMapUintIntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool,
- d *Decoder) (_ map[uint]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]int8)
- v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d)
- }
-}
-func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) {
- v, changed := f.DecMapUintInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool,
- d *Decoder) (_ map[uint]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]int16)
- v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d)
- }
-}
-func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) {
- v, changed := f.DecMapUintInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool,
- d *Decoder) (_ map[uint]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uint]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]int32)
- v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d)
- }
-}
-func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) {
- v, changed := f.DecMapUintInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool,
- d *Decoder) (_ map[uint]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]int64)
- v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d)
- }
-}
-func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) {
- v, changed := f.DecMapUintInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool,
- d *Decoder) (_ map[uint]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]float32)
- v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d)
- }
-}
-func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) {
- v, changed := f.DecMapUintFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool,
- d *Decoder) (_ map[uint]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]float64)
- v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d)
- }
-}
-func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) {
- v, changed := f.DecMapUintFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool,
- d *Decoder) (_ map[uint]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint]bool)
- v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d)
- }
-}
-func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) {
- v, changed := f.DecMapUintBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool,
- d *Decoder) (_ map[uint]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]interface{})
- v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) {
- v, changed := f.DecMapUint8IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool,
- d *Decoder) (_ map[uint8]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[uint8]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk uint8
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]string)
- v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d)
- }
-}
-func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) {
- v, changed := f.DecMapUint8StringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool,
- d *Decoder) (_ map[uint8]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[uint8]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]uint)
- v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d)
- }
-}
-func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) {
- v, changed := f.DecMapUint8UintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool,
- d *Decoder) (_ map[uint8]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint8]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]uint8)
- v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) {
- v, changed := f.DecMapUint8Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool,
- d *Decoder) (_ map[uint8]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
- v = make(map[uint8]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]uint16)
- v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) {
- v, changed := f.DecMapUint8Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool,
- d *Decoder) (_ map[uint8]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[uint8]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]uint32)
- v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) {
- v, changed := f.DecMapUint8Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool,
- d *Decoder) (_ map[uint8]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[uint8]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]uint64)
- v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) {
- v, changed := f.DecMapUint8Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool,
- d *Decoder) (_ map[uint8]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint8]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]uintptr)
- v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) {
- v, changed := f.DecMapUint8UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool,
- d *Decoder) (_ map[uint8]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint8]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]int)
- v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d)
- }
-}
-func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) {
- v, changed := f.DecMapUint8IntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool,
- d *Decoder) (_ map[uint8]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint8]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]int8)
- v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d)
- }
-}
-func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) {
- v, changed := f.DecMapUint8Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool,
- d *Decoder) (_ map[uint8]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
- v = make(map[uint8]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]int16)
- v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d)
- }
-}
-func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) {
- v, changed := f.DecMapUint8Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool,
- d *Decoder) (_ map[uint8]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[uint8]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]int32)
- v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d)
- }
-}
-func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) {
- v, changed := f.DecMapUint8Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool,
- d *Decoder) (_ map[uint8]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[uint8]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]int64)
- v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d)
- }
-}
-func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) {
- v, changed := f.DecMapUint8Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool,
- d *Decoder) (_ map[uint8]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint8]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]float32)
- v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d)
- }
-}
-func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) {
- v, changed := f.DecMapUint8Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool,
- d *Decoder) (_ map[uint8]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[uint8]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]float64)
- v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d)
- }
-}
-func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) {
- v, changed := f.DecMapUint8Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool,
- d *Decoder) (_ map[uint8]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint8]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint8]bool)
- v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d)
- }
-}
-func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) {
- v, changed := f.DecMapUint8BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool,
- d *Decoder) (_ map[uint8]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
- v = make(map[uint8]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint8
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]interface{})
- v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) {
- v, changed := f.DecMapUint16IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool,
- d *Decoder) (_ map[uint16]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
- v = make(map[uint16]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk uint16
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]string)
- v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d)
- }
-}
-func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) {
- v, changed := f.DecMapUint16StringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool,
- d *Decoder) (_ map[uint16]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
- v = make(map[uint16]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]uint)
- v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d)
- }
-}
-func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) {
- v, changed := f.DecMapUint16UintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool,
- d *Decoder) (_ map[uint16]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uint16]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]uint8)
- v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) {
- v, changed := f.DecMapUint16Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool,
- d *Decoder) (_ map[uint16]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[uint16]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]uint16)
- v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) {
- v, changed := f.DecMapUint16Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool,
- d *Decoder) (_ map[uint16]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
- v = make(map[uint16]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]uint32)
- v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) {
- v, changed := f.DecMapUint16Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool,
- d *Decoder) (_ map[uint16]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[uint16]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]uint64)
- v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) {
- v, changed := f.DecMapUint16Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool,
- d *Decoder) (_ map[uint16]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uint16]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]uintptr)
- v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) {
- v, changed := f.DecMapUint16UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool,
- d *Decoder) (_ map[uint16]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uint16]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]int)
- v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d)
- }
-}
-func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) {
- v, changed := f.DecMapUint16IntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool,
- d *Decoder) (_ map[uint16]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uint16]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]int8)
- v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d)
- }
-}
-func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) {
- v, changed := f.DecMapUint16Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool,
- d *Decoder) (_ map[uint16]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[uint16]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]int16)
- v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d)
- }
-}
-func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) {
- v, changed := f.DecMapUint16Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool,
- d *Decoder) (_ map[uint16]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
- v = make(map[uint16]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]int32)
- v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d)
- }
-}
-func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) {
- v, changed := f.DecMapUint16Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool,
- d *Decoder) (_ map[uint16]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[uint16]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]int64)
- v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d)
- }
-}
-func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) {
- v, changed := f.DecMapUint16Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool,
- d *Decoder) (_ map[uint16]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uint16]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]float32)
- v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d)
- }
-}
-func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) {
- v, changed := f.DecMapUint16Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool,
- d *Decoder) (_ map[uint16]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[uint16]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]float64)
- v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d)
- }
-}
-func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) {
- v, changed := f.DecMapUint16Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool,
- d *Decoder) (_ map[uint16]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uint16]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint16]bool)
- v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d)
- }
-}
-func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) {
- v, changed := f.DecMapUint16BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool,
- d *Decoder) (_ map[uint16]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[uint16]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint16
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]interface{})
- v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) {
- v, changed := f.DecMapUint32IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool,
- d *Decoder) (_ map[uint32]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[uint32]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk uint32
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]string)
- v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d)
- }
-}
-func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) {
- v, changed := f.DecMapUint32StringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool,
- d *Decoder) (_ map[uint32]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[uint32]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]uint)
- v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d)
- }
-}
-func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) {
- v, changed := f.DecMapUint32UintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool,
- d *Decoder) (_ map[uint32]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint32]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]uint8)
- v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) {
- v, changed := f.DecMapUint32Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool,
- d *Decoder) (_ map[uint32]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[uint32]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]uint16)
- v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) {
- v, changed := f.DecMapUint32Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool,
- d *Decoder) (_ map[uint32]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[uint32]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]uint32)
- v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) {
- v, changed := f.DecMapUint32Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool,
- d *Decoder) (_ map[uint32]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
- v = make(map[uint32]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]uint64)
- v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) {
- v, changed := f.DecMapUint32Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool,
- d *Decoder) (_ map[uint32]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint32]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]uintptr)
- v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) {
- v, changed := f.DecMapUint32UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool,
- d *Decoder) (_ map[uint32]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint32]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]int)
- v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d)
- }
-}
-func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) {
- v, changed := f.DecMapUint32IntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool,
- d *Decoder) (_ map[uint32]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint32]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]int8)
- v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d)
- }
-}
-func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) {
- v, changed := f.DecMapUint32Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool,
- d *Decoder) (_ map[uint32]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[uint32]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]int16)
- v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d)
- }
-}
-func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) {
- v, changed := f.DecMapUint32Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool,
- d *Decoder) (_ map[uint32]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[uint32]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]int32)
- v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d)
- }
-}
-func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) {
- v, changed := f.DecMapUint32Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool,
- d *Decoder) (_ map[uint32]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
- v = make(map[uint32]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]int64)
- v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d)
- }
-}
-func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) {
- v, changed := f.DecMapUint32Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool,
- d *Decoder) (_ map[uint32]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint32]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]float32)
- v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d)
- }
-}
-func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) {
- v, changed := f.DecMapUint32Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool,
- d *Decoder) (_ map[uint32]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
- v = make(map[uint32]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]float64)
- v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d)
- }
-}
-func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) {
- v, changed := f.DecMapUint32Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool,
- d *Decoder) (_ map[uint32]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint32]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint32]bool)
- v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d)
- }
-}
-func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) {
- v, changed := f.DecMapUint32BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool,
- d *Decoder) (_ map[uint32]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[uint32]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint32
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]interface{})
- v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) {
- v, changed := f.DecMapUint64IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool,
- d *Decoder) (_ map[uint64]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[uint64]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk uint64
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]string)
- v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d)
- }
-}
-func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) {
- v, changed := f.DecMapUint64StringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool,
- d *Decoder) (_ map[uint64]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[uint64]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]uint)
- v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d)
- }
-}
-func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) {
- v, changed := f.DecMapUint64UintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool,
- d *Decoder) (_ map[uint64]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint64]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]uint8)
- v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) {
- v, changed := f.DecMapUint64Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool,
- d *Decoder) (_ map[uint64]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint64]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]uint16)
- v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) {
- v, changed := f.DecMapUint64Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool,
- d *Decoder) (_ map[uint64]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uint64]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]uint32)
- v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) {
- v, changed := f.DecMapUint64Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool,
- d *Decoder) (_ map[uint64]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint64]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]uint64)
- v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) {
- v, changed := f.DecMapUint64Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool,
- d *Decoder) (_ map[uint64]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint64]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]uintptr)
- v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) {
- v, changed := f.DecMapUint64UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool,
- d *Decoder) (_ map[uint64]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint64]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]int)
- v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d)
- }
-}
-func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) {
- v, changed := f.DecMapUint64IntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool,
- d *Decoder) (_ map[uint64]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint64]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]int8)
- v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d)
- }
-}
-func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) {
- v, changed := f.DecMapUint64Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool,
- d *Decoder) (_ map[uint64]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint64]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]int16)
- v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d)
- }
-}
-func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) {
- v, changed := f.DecMapUint64Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool,
- d *Decoder) (_ map[uint64]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uint64]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]int32)
- v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d)
- }
-}
-func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) {
- v, changed := f.DecMapUint64Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool,
- d *Decoder) (_ map[uint64]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint64]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]int64)
- v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d)
- }
-}
-func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) {
- v, changed := f.DecMapUint64Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool,
- d *Decoder) (_ map[uint64]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint64]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]float32)
- v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d)
- }
-}
-func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) {
- v, changed := f.DecMapUint64Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool,
- d *Decoder) (_ map[uint64]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uint64]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]float64)
- v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d)
- }
-}
-func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) {
- v, changed := f.DecMapUint64Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool,
- d *Decoder) (_ map[uint64]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uint64]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uint64]bool)
- v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d)
- }
-}
-func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) {
- v, changed := f.DecMapUint64BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool,
- d *Decoder) (_ map[uint64]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uint64]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uint64
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeUint64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]interface{})
- v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) {
- v, changed := f.DecMapUintptrIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool,
- d *Decoder) (_ map[uintptr]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[uintptr]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk uintptr
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]string)
- v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) {
- v, changed := f.DecMapUintptrStringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool,
- d *Decoder) (_ map[uintptr]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[uintptr]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]uint)
- v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) {
- v, changed := f.DecMapUintptrUintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool,
- d *Decoder) (_ map[uintptr]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uintptr]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]uint8)
- v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) {
- v, changed := f.DecMapUintptrUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool,
- d *Decoder) (_ map[uintptr]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uintptr]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]uint16)
- v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) {
- v, changed := f.DecMapUintptrUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool,
- d *Decoder) (_ map[uintptr]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uintptr]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]uint32)
- v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) {
- v, changed := f.DecMapUintptrUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool,
- d *Decoder) (_ map[uintptr]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uintptr]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]uint64)
- v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) {
- v, changed := f.DecMapUintptrUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool,
- d *Decoder) (_ map[uintptr]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uintptr]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]uintptr)
- v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) {
- v, changed := f.DecMapUintptrUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool,
- d *Decoder) (_ map[uintptr]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uintptr]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]int)
- v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) {
- v, changed := f.DecMapUintptrIntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool,
- d *Decoder) (_ map[uintptr]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uintptr]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]int8)
- v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) {
- v, changed := f.DecMapUintptrInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool,
- d *Decoder) (_ map[uintptr]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uintptr]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]int16)
- v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) {
- v, changed := f.DecMapUintptrInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool,
- d *Decoder) (_ map[uintptr]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[uintptr]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]int32)
- v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) {
- v, changed := f.DecMapUintptrInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool,
- d *Decoder) (_ map[uintptr]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uintptr]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]int64)
- v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) {
- v, changed := f.DecMapUintptrInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool,
- d *Decoder) (_ map[uintptr]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uintptr]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]float32)
- v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) {
- v, changed := f.DecMapUintptrFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool,
- d *Decoder) (_ map[uintptr]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[uintptr]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]float64)
- v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) {
- v, changed := f.DecMapUintptrFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool,
- d *Decoder) (_ map[uintptr]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[uintptr]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[uintptr]bool)
- v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d)
- }
-}
-func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) {
- v, changed := f.DecMapUintptrBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool,
- d *Decoder) (_ map[uintptr]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[uintptr]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk uintptr
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]interface{})
- v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) {
- v, changed := f.DecMapIntIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool,
- d *Decoder) (_ map[int]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[int]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk int
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]string)
- v, changed := fastpathTV.DecMapIntStringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d)
- }
-}
-func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) {
- v, changed := f.DecMapIntStringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool,
- d *Decoder) (_ map[int]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[int]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]uint)
- v, changed := fastpathTV.DecMapIntUintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d)
- }
-}
-func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) {
- v, changed := f.DecMapIntUintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool,
- d *Decoder) (_ map[int]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]uint8)
- v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) {
- v, changed := f.DecMapIntUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool,
- d *Decoder) (_ map[int]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]uint16)
- v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) {
- v, changed := f.DecMapIntUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool,
- d *Decoder) (_ map[int]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[int]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]uint32)
- v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) {
- v, changed := f.DecMapIntUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool,
- d *Decoder) (_ map[int]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]uint64)
- v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) {
- v, changed := f.DecMapIntUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool,
- d *Decoder) (_ map[int]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]uintptr)
- v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) {
- v, changed := f.DecMapIntUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool,
- d *Decoder) (_ map[int]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]int)
- v, changed := fastpathTV.DecMapIntIntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d)
- }
-}
-func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) {
- v, changed := f.DecMapIntIntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool,
- d *Decoder) (_ map[int]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]int8)
- v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d)
- }
-}
-func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) {
- v, changed := f.DecMapIntInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool,
- d *Decoder) (_ map[int]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]int16)
- v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d)
- }
-}
-func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) {
- v, changed := f.DecMapIntInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool,
- d *Decoder) (_ map[int]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[int]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]int32)
- v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d)
- }
-}
-func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) {
- v, changed := f.DecMapIntInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool,
- d *Decoder) (_ map[int]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]int64)
- v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d)
- }
-}
-func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) {
- v, changed := f.DecMapIntInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool,
- d *Decoder) (_ map[int]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]float32)
- v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d)
- }
-}
-func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) {
- v, changed := f.DecMapIntFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool,
- d *Decoder) (_ map[int]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]float64)
- v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d)
- }
-}
-func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) {
- v, changed := f.DecMapIntFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool,
- d *Decoder) (_ map[int]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int]bool)
- v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d)
- }
-}
-func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) {
- v, changed := f.DecMapIntBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool,
- d *Decoder) (_ map[int]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]interface{})
- v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) {
- v, changed := f.DecMapInt8IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool,
- d *Decoder) (_ map[int8]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[int8]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk int8
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]string)
- v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d)
- }
-}
-func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) {
- v, changed := f.DecMapInt8StringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool,
- d *Decoder) (_ map[int8]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[int8]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]uint)
- v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d)
- }
-}
-func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) {
- v, changed := f.DecMapInt8UintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool,
- d *Decoder) (_ map[int8]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int8]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]uint8)
- v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) {
- v, changed := f.DecMapInt8Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool,
- d *Decoder) (_ map[int8]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
- v = make(map[int8]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]uint16)
- v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) {
- v, changed := f.DecMapInt8Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool,
- d *Decoder) (_ map[int8]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[int8]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]uint32)
- v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) {
- v, changed := f.DecMapInt8Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool,
- d *Decoder) (_ map[int8]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[int8]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]uint64)
- v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) {
- v, changed := f.DecMapInt8Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool,
- d *Decoder) (_ map[int8]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int8]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]uintptr)
- v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) {
- v, changed := f.DecMapInt8UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool,
- d *Decoder) (_ map[int8]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int8]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]int)
- v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d)
- }
-}
-func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) {
- v, changed := f.DecMapInt8IntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool,
- d *Decoder) (_ map[int8]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int8]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]int8)
- v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d)
- }
-}
-func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) {
- v, changed := f.DecMapInt8Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool,
- d *Decoder) (_ map[int8]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
- v = make(map[int8]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]int16)
- v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d)
- }
-}
-func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) {
- v, changed := f.DecMapInt8Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool,
- d *Decoder) (_ map[int8]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[int8]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]int32)
- v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d)
- }
-}
-func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) {
- v, changed := f.DecMapInt8Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool,
- d *Decoder) (_ map[int8]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[int8]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]int64)
- v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d)
- }
-}
-func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) {
- v, changed := f.DecMapInt8Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool,
- d *Decoder) (_ map[int8]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int8]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]float32)
- v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d)
- }
-}
-func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) {
- v, changed := f.DecMapInt8Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool,
- d *Decoder) (_ map[int8]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[int8]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]float64)
- v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d)
- }
-}
-func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) {
- v, changed := f.DecMapInt8Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool,
- d *Decoder) (_ map[int8]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int8]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int8]bool)
- v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d)
- }
-}
-func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) {
- v, changed := f.DecMapInt8BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool,
- d *Decoder) (_ map[int8]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
- v = make(map[int8]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int8
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]interface{})
- v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) {
- v, changed := f.DecMapInt16IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool,
- d *Decoder) (_ map[int16]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
- v = make(map[int16]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk int16
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]string)
- v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d)
- }
-}
-func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) {
- v, changed := f.DecMapInt16StringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool,
- d *Decoder) (_ map[int16]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 18)
- v = make(map[int16]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]uint)
- v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d)
- }
-}
-func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) {
- v, changed := f.DecMapInt16UintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool,
- d *Decoder) (_ map[int16]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[int16]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]uint8)
- v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) {
- v, changed := f.DecMapInt16Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool,
- d *Decoder) (_ map[int16]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[int16]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]uint16)
- v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) {
- v, changed := f.DecMapInt16Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool,
- d *Decoder) (_ map[int16]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
- v = make(map[int16]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]uint32)
- v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) {
- v, changed := f.DecMapInt16Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool,
- d *Decoder) (_ map[int16]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[int16]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]uint64)
- v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) {
- v, changed := f.DecMapInt16Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool,
- d *Decoder) (_ map[int16]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[int16]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]uintptr)
- v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) {
- v, changed := f.DecMapInt16UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool,
- d *Decoder) (_ map[int16]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[int16]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]int)
- v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d)
- }
-}
-func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) {
- v, changed := f.DecMapInt16IntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool,
- d *Decoder) (_ map[int16]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[int16]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]int8)
- v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d)
- }
-}
-func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) {
- v, changed := f.DecMapInt16Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool,
- d *Decoder) (_ map[int16]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[int16]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]int16)
- v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d)
- }
-}
-func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) {
- v, changed := f.DecMapInt16Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool,
- d *Decoder) (_ map[int16]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 4)
- v = make(map[int16]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]int32)
- v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d)
- }
-}
-func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) {
- v, changed := f.DecMapInt16Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool,
- d *Decoder) (_ map[int16]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[int16]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]int64)
- v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d)
- }
-}
-func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) {
- v, changed := f.DecMapInt16Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool,
- d *Decoder) (_ map[int16]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[int16]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]float32)
- v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d)
- }
-}
-func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) {
- v, changed := f.DecMapInt16Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool,
- d *Decoder) (_ map[int16]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[int16]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]float64)
- v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d)
- }
-}
-func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) {
- v, changed := f.DecMapInt16Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool,
- d *Decoder) (_ map[int16]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[int16]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int16]bool)
- v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d)
- }
-}
-func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) {
- v, changed := f.DecMapInt16BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool,
- d *Decoder) (_ map[int16]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[int16]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int16
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]interface{})
- v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) {
- v, changed := f.DecMapInt32IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool,
- d *Decoder) (_ map[int32]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[int32]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk int32
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]string)
- v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d)
- }
-}
-func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) {
- v, changed := f.DecMapInt32StringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool,
- d *Decoder) (_ map[int32]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 20)
- v = make(map[int32]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]uint)
- v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d)
- }
-}
-func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) {
- v, changed := f.DecMapInt32UintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool,
- d *Decoder) (_ map[int32]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int32]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]uint8)
- v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) {
- v, changed := f.DecMapInt32Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool,
- d *Decoder) (_ map[int32]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[int32]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]uint16)
- v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) {
- v, changed := f.DecMapInt32Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool,
- d *Decoder) (_ map[int32]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[int32]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]uint32)
- v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) {
- v, changed := f.DecMapInt32Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool,
- d *Decoder) (_ map[int32]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
- v = make(map[int32]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]uint64)
- v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) {
- v, changed := f.DecMapInt32Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool,
- d *Decoder) (_ map[int32]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int32]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]uintptr)
- v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) {
- v, changed := f.DecMapInt32UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool,
- d *Decoder) (_ map[int32]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int32]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]int)
- v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d)
- }
-}
-func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) {
- v, changed := f.DecMapInt32IntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool,
- d *Decoder) (_ map[int32]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int32]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]int8)
- v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d)
- }
-}
-func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) {
- v, changed := f.DecMapInt32Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool,
- d *Decoder) (_ map[int32]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[int32]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]int16)
- v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d)
- }
-}
-func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) {
- v, changed := f.DecMapInt32Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool,
- d *Decoder) (_ map[int32]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 6)
- v = make(map[int32]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]int32)
- v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d)
- }
-}
-func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) {
- v, changed := f.DecMapInt32Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool,
- d *Decoder) (_ map[int32]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
- v = make(map[int32]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]int64)
- v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d)
- }
-}
-func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) {
- v, changed := f.DecMapInt32Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool,
- d *Decoder) (_ map[int32]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int32]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]float32)
- v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d)
- }
-}
-func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) {
- v, changed := f.DecMapInt32Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool,
- d *Decoder) (_ map[int32]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 8)
- v = make(map[int32]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]float64)
- v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d)
- }
-}
-func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) {
- v, changed := f.DecMapInt32Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool,
- d *Decoder) (_ map[int32]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int32]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int32]bool)
- v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d)
- }
-}
-func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) {
- v, changed := f.DecMapInt32BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool,
- d *Decoder) (_ map[int32]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[int32]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int32
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]interface{})
- v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) {
- v, changed := f.DecMapInt64IntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool,
- d *Decoder) (_ map[int64]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[int64]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk int64
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]string)
- v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d)
- }
-}
-func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) {
- v, changed := f.DecMapInt64StringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool,
- d *Decoder) (_ map[int64]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 24)
- v = make(map[int64]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]uint)
- v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d)
- }
-}
-func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) {
- v, changed := f.DecMapInt64UintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool,
- d *Decoder) (_ map[int64]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int64]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]uint8)
- v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) {
- v, changed := f.DecMapInt64Uint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool,
- d *Decoder) (_ map[int64]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int64]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]uint16)
- v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) {
- v, changed := f.DecMapInt64Uint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool,
- d *Decoder) (_ map[int64]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[int64]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]uint32)
- v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) {
- v, changed := f.DecMapInt64Uint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool,
- d *Decoder) (_ map[int64]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int64]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]uint64)
- v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) {
- v, changed := f.DecMapInt64Uint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool,
- d *Decoder) (_ map[int64]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int64]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]uintptr)
- v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) {
- v, changed := f.DecMapInt64UintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool,
- d *Decoder) (_ map[int64]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int64]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]int)
- v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d)
- }
-}
-func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) {
- v, changed := f.DecMapInt64IntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool,
- d *Decoder) (_ map[int64]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int64]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]int8)
- v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d)
- }
-}
-func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) {
- v, changed := f.DecMapInt64Int8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool,
- d *Decoder) (_ map[int64]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int64]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]int16)
- v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d)
- }
-}
-func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) {
- v, changed := f.DecMapInt64Int16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool,
- d *Decoder) (_ map[int64]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 10)
- v = make(map[int64]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]int32)
- v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d)
- }
-}
-func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) {
- v, changed := f.DecMapInt64Int32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool,
- d *Decoder) (_ map[int64]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int64]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]int64)
- v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d)
- }
-}
-func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) {
- v, changed := f.DecMapInt64Int64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool,
- d *Decoder) (_ map[int64]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int64]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]float32)
- v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d)
- }
-}
-func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) {
- v, changed := f.DecMapInt64Float32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool,
- d *Decoder) (_ map[int64]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 12)
- v = make(map[int64]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]float64)
- v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d)
- }
-}
-func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) {
- v, changed := f.DecMapInt64Float64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool,
- d *Decoder) (_ map[int64]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 16)
- v = make(map[int64]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[int64]bool)
- v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d)
- }
-}
-func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) {
- v, changed := f.DecMapInt64BoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool,
- d *Decoder) (_ map[int64]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[int64]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk int64
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeInt64()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]interface{})
- v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d)
- }
-}
-func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) {
- v, changed := f.DecMapBoolIntfV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool,
- d *Decoder) (_ map[bool]interface{}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[bool]interface{}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- var mk bool
- var mv interface{}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = nil
- }
- continue
- }
- if mapGet {
- mv = v[mk]
- } else {
- mv = nil
- }
- d.decode(&mv)
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]string)
- v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d)
- }
-}
-func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) {
- v, changed := f.DecMapBoolStringV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool,
- d *Decoder) (_ map[bool]string, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 17)
- v = make(map[bool]string, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv string
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = ""
- }
- continue
- }
- mv = dd.DecodeString()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]uint)
- v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d)
- }
-}
-func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) {
- v, changed := f.DecMapBoolUintV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool,
- d *Decoder) (_ map[bool]uint, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[bool]uint, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv uint
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]uint8)
- v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d)
- }
-}
-func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) {
- v, changed := f.DecMapBoolUint8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool,
- d *Decoder) (_ map[bool]uint8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
- v = make(map[bool]uint8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv uint8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]uint16)
- v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d)
- }
-}
-func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) {
- v, changed := f.DecMapBoolUint16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool,
- d *Decoder) (_ map[bool]uint16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[bool]uint16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv uint16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]uint32)
- v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d)
- }
-}
-func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) {
- v, changed := f.DecMapBoolUint32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool,
- d *Decoder) (_ map[bool]uint32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[bool]uint32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv uint32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]uint64)
- v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d)
- }
-}
-func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) {
- v, changed := f.DecMapBoolUint64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool,
- d *Decoder) (_ map[bool]uint64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[bool]uint64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv uint64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeUint64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]uintptr)
- v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d)
- }
-}
-func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) {
- v, changed := f.DecMapBoolUintptrV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool,
- d *Decoder) (_ map[bool]uintptr, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[bool]uintptr, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv uintptr
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]int)
- v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d)
- }
-}
-func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) {
- v, changed := f.DecMapBoolIntV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool,
- d *Decoder) (_ map[bool]int, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[bool]int, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv int
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]int8)
- v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d)
- }
-}
-func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) {
- v, changed := f.DecMapBoolInt8V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool,
- d *Decoder) (_ map[bool]int8, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
- v = make(map[bool]int8, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv int8
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]int16)
- v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d)
- }
-}
-func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) {
- v, changed := f.DecMapBoolInt16V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool,
- d *Decoder) (_ map[bool]int16, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 3)
- v = make(map[bool]int16, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv int16
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]int32)
- v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d)
- }
-}
-func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) {
- v, changed := f.DecMapBoolInt32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool,
- d *Decoder) (_ map[bool]int32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[bool]int32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv int32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]int64)
- v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d)
- }
-}
-func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) {
- v, changed := f.DecMapBoolInt64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool,
- d *Decoder) (_ map[bool]int64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[bool]int64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv int64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeInt64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]float32)
- v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d)
- }
-}
-func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) {
- v, changed := f.DecMapBoolFloat32V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool,
- d *Decoder) (_ map[bool]float32, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 5)
- v = make(map[bool]float32, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv float32
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = float32(chkOvf.Float32V(dd.DecodeFloat64()))
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]float64)
- v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d)
- }
-}
-func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) {
- v, changed := f.DecMapBoolFloat64V(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool,
- d *Decoder) (_ map[bool]float64, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 9)
- v = make(map[bool]float64, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv float64
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = 0
- }
- continue
- }
- mv = dd.DecodeFloat64()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-
-func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[bool]bool)
- v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
- } else {
- fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d)
- }
-}
-func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) {
- v, changed := f.DecMapBoolBoolV(*vp, true, d)
- if changed {
- *vp = v
- }
-}
-func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool,
- d *Decoder) (_ map[bool]bool, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators()
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, 2)
- v = make(map[bool]bool, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- var mk bool
- var mv bool
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep {
- dd.ReadMapElemKey()
- }
- mk = dd.DecodeBool()
- if esep {
- dd.ReadMapElemValue()
- }
- if dd.TryDecodeAsNil() {
- if v == nil {
- } else if d.h.DeleteOnNilMapValue {
- delete(v, mk)
- } else {
- v[mk] = false
- }
- continue
- }
- mv = dd.DecodeBool()
- if v != nil {
- v[mk] = mv
- }
- }
- dd.ReadMapEnd()
- return v, changed
-}
diff --git a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
deleted file mode 100644
index 2023e05d..00000000
--- a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
+++ /dev/null
@@ -1,544 +0,0 @@
-// +build !notfastpath
-
-// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
-// Use of this source code is governed by a MIT license found in the LICENSE file.
-
-// Code generated from fast-path.go.tmpl - DO NOT EDIT.
-
-package codec
-
-// Fast path functions try to create a fast path encode or decode implementation
-// for common maps and slices.
-//
-// We define the functions and register then in this single file
-// so as not to pollute the encode.go and decode.go, and create a dependency in there.
-// This file can be omitted without causing a build failure.
-//
-// The advantage of fast paths is:
-// - Many calls bypass reflection altogether
-//
-// Currently support
-// - slice of all builtin types,
-// - map of all builtin types to string or interface value
-// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
-// This should provide adequate "typical" implementations.
-//
-// Note that fast track decode functions must handle values for which an address cannot be obtained.
-// For example:
-// m2 := map[string]int{}
-// p2 := []interface{}{m2}
-// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
-//
-
-import (
- "reflect"
- "sort"
-)
-
-const fastpathEnabled = true
-
-type fastpathT struct {}
-
-var fastpathTV fastpathT
-
-type fastpathE struct {
- rtid uintptr
- rt reflect.Type
- encfn func(*Encoder, *codecFnInfo, reflect.Value)
- decfn func(*Decoder, *codecFnInfo, reflect.Value)
-}
-
-type fastpathA [{{ .FastpathLen }}]fastpathE
-
-func (x *fastpathA) index(rtid uintptr) int {
- // use binary search to grab the index (adapted from sort/search.go)
- h, i, j := 0, 0, {{ .FastpathLen }} // len(x)
- for i < j {
- h = i + (j-i)/2
- if x[h].rtid < rtid {
- i = h + 1
- } else {
- j = h
- }
- }
- if i < {{ .FastpathLen }} && x[i].rtid == rtid {
- return i
- }
- return -1
-}
-
-type fastpathAslice []fastpathE
-
-func (x fastpathAslice) Len() int { return len(x) }
-func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
-func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-var fastpathAV fastpathA
-
-// due to possible initialization loop error, make fastpath in an init()
-func init() {
- i := 0
- fn := func(v interface{},
- fe func(*Encoder, *codecFnInfo, reflect.Value),
- fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) {
- xrt := reflect.TypeOf(v)
- xptr := rt2id(xrt)
- fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
- i++
- return
- }
- {{/* do not register []uint8 in fast-path */}}
- {{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}}
- fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}{{end}}
-
- {{range .Values}}{{if not .Primitive}}{{if .MapKey }}
- fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
-
- sort.Sort(fastpathAslice(fastpathAV[:]))
-}
-
-// -- encode
-
-// -- -- fast path type switch
-func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
- switch v := iv.(type) {
-
-{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}}
- case []{{ .Elem }}:
- fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
- case *[]{{ .Elem }}:
- fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/*
-*/}}{{end}}{{end}}{{end}}{{end}}
-
-{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
- case map[{{ .MapKey }}]{{ .Elem }}:
- fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
- case *map[{{ .MapKey }}]{{ .Elem }}:
- fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e){{/*
-*/}}{{end}}{{end}}{{end}}
-
- default:
- _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
- return false
- }
- return true
-}
-
-{{/*
-**** removing this block, as they are never called directly ****
-
-
-
-**** removing this block, as they are never called directly ****
-
-
-
-func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
- switch v := iv.(type) {
-{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
- case []{{ .Elem }}:
- fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
- case *[]{{ .Elem }}:
- fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
-{{end}}{{end}}{{end}}
- default:
- _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
- return false
- }
- return true
-}
-
-func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
- switch v := iv.(type) {
-{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
- case map[{{ .MapKey }}]{{ .Elem }}:
- fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
- case *map[{{ .MapKey }}]{{ .Elem }}:
- fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
-{{end}}{{end}}{{end}}
- default:
- _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
- return false
- }
- return true
-}
-
-
-
-**** removing this block, as they are never called directly ****
-
-
-
-**** removing this block, as they are never called directly ****
-*/}}
-
-// -- -- fast path functions
-{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
-func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
- if f.ti.mbs {
- fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e)
- } else {
- fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), e)
- }
-}
-func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) {
- if v == nil { e.e.EncodeNil(); return }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteArrayStart(len(v))
- if esep {
- for _, v2 := range v {
- ee.WriteArrayElem()
- {{ encmd .Elem "v2"}}
- }
- } else {
- for _, v2 := range v {
- {{ encmd .Elem "v2"}}
- }
- } {{/*
- for _, v2 := range v {
- if esep { ee.WriteArrayElem() }
- {{ encmd .Elem "v2"}}
- } */}}
- ee.WriteArrayEnd()
-}
-func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) {
- ee, esep := e.e, e.hh.hasElemSeparators()
- if len(v)%2 == 1 {
- e.errorf("mapBySlice requires even slice length, but got %v", len(v))
- return
- }
- ee.WriteMapStart(len(v) / 2)
- if esep {
- for j, v2 := range v {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- {{ encmd .Elem "v2"}}
- }
- } else {
- for _, v2 := range v {
- {{ encmd .Elem "v2"}}
- }
- } {{/*
- for j, v2 := range v {
- if esep {
- if j%2 == 0 {
- ee.WriteMapElemKey()
- } else {
- ee.WriteMapElemValue()
- }
- }
- {{ encmd .Elem "v2"}}
- } */}}
- ee.WriteMapEnd()
-}
-{{end}}{{end}}{{end}}
-
-{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
-func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
- fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e)
-}
-func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) {
- if v == nil { e.e.EncodeNil(); return }
- ee, esep := e.e, e.hh.hasElemSeparators()
- ee.WriteMapStart(len(v))
- if e.h.Canonical {
- {{if eq .MapKey "interface{}"}}{{/* out of band
- */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
- e2 := NewEncoderBytes(&mksv, e.hh)
- v2 := make([]bytesI, len(v))
- var i, l int
- var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}}
- for k2, _ := range v {
- l = len(mksv)
- e2.MustEncode(k2)
- vp = &v2[i]
- vp.v = mksv[l:]
- vp.i = k2
- i++
- }
- sort.Sort(bytesISlice(v2))
- if esep {
- for j := range v2 {
- ee.WriteMapElemKey()
- e.asis(v2[j].v)
- ee.WriteMapElemValue()
- e.encode(v[v2[j].i])
- }
- } else {
- for j := range v2 {
- e.asis(v2[j].v)
- e.encode(v[v2[j].i])
- }
- } {{/*
- for j := range v2 {
- if esep { ee.WriteMapElemKey() }
- e.asis(v2[j].v)
- if esep { ee.WriteMapElemValue() }
- e.encode(v[v2[j].i])
- } */}} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
- var i int
- for k, _ := range v {
- v2[i] = {{ $x }}(k)
- i++
- }
- sort.Sort({{ sorttype .MapKey false}}(v2))
- if esep {
- for _, k2 := range v2 {
- ee.WriteMapElemKey()
- {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
- ee.WriteMapElemValue()
- {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
- }
- } else {
- for _, k2 := range v2 {
- {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
- {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
- }
- } {{/*
- for _, k2 := range v2 {
- if esep { ee.WriteMapElemKey() }
- {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
- if esep { ee.WriteMapElemValue() }
- {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
- } */}} {{end}}
- } else {
- if esep {
- for k2, v2 := range v {
- ee.WriteMapElemKey()
- {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
- ee.WriteMapElemValue()
- {{ encmd .Elem "v2"}}
- }
- } else {
- for k2, v2 := range v {
- {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
- {{ encmd .Elem "v2"}}
- }
- } {{/*
- for k2, v2 := range v {
- if esep { ee.WriteMapElemKey() }
- {{if eq .MapKey "string"}}ee.EncodeString(cUTF8, k2){{else}}{{ encmd .MapKey "k2"}}{{end}}
- if esep { ee.WriteMapElemValue() }
- {{ encmd .Elem "v2"}}
- } */}}
- }
- ee.WriteMapEnd()
-}
-{{end}}{{end}}{{end}}
-
-// -- decode
-
-// -- -- fast path type switch
-func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
- var changed bool
- switch v := iv.(type) {
-{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}{{if ne .Elem "uint8"}}
- case []{{ .Elem }}:
- var v2 []{{ .Elem }}
- v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- case *[]{{ .Elem }}:
- var v2 []{{ .Elem }}
- v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d)
- if changed {
- *v = v2
- }{{/*
-*/}}{{end}}{{end}}{{end}}{{end}}
-{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/*
-// maps only change if nil, and in that case, there's no point copying
-*/}}
- case map[{{ .MapKey }}]{{ .Elem }}:
- fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d)
- case *map[{{ .MapKey }}]{{ .Elem }}:
- var v2 map[{{ .MapKey }}]{{ .Elem }}
- v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d)
- if changed {
- *v = v2
- }{{/*
-*/}}{{end}}{{end}}{{end}}
- default:
- _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
- return false
- }
- return true
-}
-
-func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
- switch v := iv.(type) {
-{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
- case *[]{{ .Elem }}:
- *v = nil {{/*
-*/}}{{end}}{{end}}{{end}}
-{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
- case *map[{{ .MapKey }}]{{ .Elem }}:
- *v = nil {{/*
-*/}}{{end}}{{end}}{{end}}
- default:
- _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
- return false
- }
- return true
-}
-
-// -- -- fast path functions
-{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
-{{/*
-Slices can change if they
-- did not come from an array
-- are addressable (from a ptr)
-- are settable (e.g. contained in an interface{})
-*/}}
-func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
- if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*[]{{ .Elem }})
- v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d)
- if changed { *vp = v }
- } else {
- v := rv2i(rv).([]{{ .Elem }})
- v2, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, !array, d)
- if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) {
- copy(v, v2)
- }
- }
-}
-func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) {
- v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d)
- if changed { *vp = v }
-}
-func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
- dd := d.d{{/*
- // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil()
- */}}
- slh, containerLenS := d.decSliceHelperStart()
- if containerLenS == 0 {
- if canChange {
- if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] }
- changed = true
- }
- slh.End()
- return v, changed
- }
- hasLen := containerLenS > 0
- var xlen int
- if hasLen && canChange {
- if containerLenS > cap(v) {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
- if xlen <= cap(v) {
- v = v[:xlen]
- } else {
- v = make([]{{ .Elem }}, xlen)
- }
- changed = true
- } else if containerLenS != len(v) {
- v = v[:containerLenS]
- changed = true
- }
- }
- j := 0
- for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ {
- if j == 0 && len(v) == 0 && canChange {
- if hasLen {
- xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
- } else {
- xlen = 8
- }
- v = make([]{{ .Elem }}, xlen)
- changed = true
- }
- // if indefinite, etc, then expand the slice if necessary
- var decodeIntoBlank bool
- if j >= len(v) {
- if canChange {
- v = append(v, {{ zerocmd .Elem }})
- changed = true
- } else {
- d.arrayCannotExpand(len(v), j+1)
- decodeIntoBlank = true
- }
- }
- slh.ElemContainerState(j)
- if decodeIntoBlank {
- d.swallow()
- } else if dd.TryDecodeAsNil() {
- v[j] = {{ zerocmd .Elem }}
- } else {
- {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
- }
- }
- if canChange {
- if j < len(v) {
- v = v[:j]
- changed = true
- } else if j == 0 && v == nil {
- v = make([]{{ .Elem }}, 0)
- changed = true
- }
- }
- slh.End()
- return v, changed
-}
-{{end}}{{end}}{{end}}
-
-{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
-{{/*
-Maps can change if they are
-- addressable (from a ptr)
-- settable (e.g. contained in an interface{})
-*/}}
-func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
- if rv.Kind() == reflect.Ptr {
- vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
- v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d);
- if changed { *vp = v }
- } else {
- fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d)
- }
-}
-func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) {
- v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d)
- if changed { *vp = v }
-}
-func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool,
- d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
- dd, esep := d.d, d.hh.hasElemSeparators(){{/*
- // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil()
- */}}
- containerLen := dd.ReadMapStart()
- if canChange && v == nil {
- xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})
- v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen)
- changed = true
- }
- if containerLen == 0 {
- dd.ReadMapEnd()
- return v, changed
- }
- {{ if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
- {{end}}var mk {{ .MapKey }}
- var mv {{ .Elem }}
- hasLen := containerLen > 0
- for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
- if esep { dd.ReadMapElemKey() }
- {{ if eq .MapKey "interface{}" }}mk = nil
- d.decode(&mk)
- if bv, bok := mk.([]byte); bok {
- mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
- }{{ else }}mk = {{ decmd .MapKey }}{{ end }}
- if esep { dd.ReadMapElemValue() }
- if dd.TryDecodeAsNil() {
- if v == nil {} else if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} }
- continue
- }
- {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
- d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
- if v != nil { v[mk] = mv }
- }
- dd.ReadMapEnd()
- return v, changed
-}
-{{end}}{{end}}{{end}}
diff --git a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
deleted file mode 100644
index 8323b549..00000000
--- a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
+++ /dev/null
@@ -1,42 +0,0 @@
-{{var "v"}} := *{{ .Varname }}
-{{var "l"}} := r.ReadMapStart()
-{{var "bh"}} := z.DecBasicHandle()
-if {{var "v"}} == nil {
- {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
- {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
- *{{ .Varname }} = {{var "v"}}
-}
-var {{var "mk"}} {{ .KTyp }}
-var {{var "mv"}} {{ .Typ }}
-var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
-if {{var "bh"}}.MapValueReset {
- {{if decElemKindPtr}}{{var "mg"}} = true
- {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
- {{else if not decElemKindImmutable}}{{var "mg"}} = true
- {{end}} }
-if {{var "l"}} != 0 {
-{{var "hl"}} := {{var "l"}} > 0
- for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
- r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}}
- {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
-{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
- {{var "mk"}} = string({{var "bv"}})
- }{{ end }}{{if decElemKindPtr}}
- {{var "ms"}} = true{{end}}
- if {{var "mg"}} {
- {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
- if {{var "mok"}} {
- {{var "ms"}} = false
- } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
- } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
- r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}}
- {{var "mdn"}} = false
- {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }}
- if {{var "mdn"}} {
- if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} }
- } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
- {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
- }
-}
-} // else len==0: TODO: Should we clear map entries?
-r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}}
diff --git a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
deleted file mode 100644
index fd52690c..00000000
--- a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
+++ /dev/null
@@ -1,272 +0,0 @@
-// +build !go1.7 safe appengine
-
-// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
-// Use of this source code is governed by a MIT license found in the LICENSE file.
-
-package codec
-
-import (
- "reflect"
- "sync/atomic"
- "time"
-)
-
-const safeMode = true
-
-// stringView returns a view of the []byte as a string.
-// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
-// In regular safe mode, it is an allocation and copy.
-//
-// Usage: Always maintain a reference to v while result of this call is in use,
-// and call keepAlive4BytesView(v) at point where done with view.
-func stringView(v []byte) string {
- return string(v)
-}
-
-// bytesView returns a view of the string as a []byte.
-// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
-// In regular safe mode, it is an allocation and copy.
-//
-// Usage: Always maintain a reference to v while result of this call is in use,
-// and call keepAlive4BytesView(v) at point where done with view.
-func bytesView(v string) []byte {
- return []byte(v)
-}
-
-func definitelyNil(v interface{}) bool {
- // this is a best-effort option.
- // We just return false, so we don't unnecessarily incur the cost of reflection this early.
- return false
-}
-
-func rv2i(rv reflect.Value) interface{} {
- return rv.Interface()
-}
-
-func rt2id(rt reflect.Type) uintptr {
- return reflect.ValueOf(rt).Pointer()
-}
-
-func rv2rtid(rv reflect.Value) uintptr {
- return reflect.ValueOf(rv.Type()).Pointer()
-}
-
-func i2rtid(i interface{}) uintptr {
- return reflect.ValueOf(reflect.TypeOf(i)).Pointer()
-}
-
-// --------------------------
-
-func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
- switch v.Kind() {
- case reflect.Invalid:
- return true
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- if deref {
- if v.IsNil() {
- return true
- }
- return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
- }
- return v.IsNil()
- case reflect.Struct:
- return isEmptyStruct(v, tinfos, deref, checkStruct)
- }
- return false
-}
-
-// --------------------------
-// type ptrToRvMap struct{}
-
-// func (*ptrToRvMap) init() {}
-// func (*ptrToRvMap) get(i interface{}) reflect.Value {
-// return reflect.ValueOf(i).Elem()
-// }
-
-// --------------------------
-type atomicTypeInfoSlice struct { // expected to be 2 words
- v atomic.Value
-}
-
-func (x *atomicTypeInfoSlice) load() []rtid2ti {
- i := x.v.Load()
- if i == nil {
- return nil
- }
- return i.([]rtid2ti)
-}
-
-func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
- x.v.Store(p)
-}
-
-// --------------------------
-func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
- rv.SetBytes(d.rawBytes())
-}
-
-func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
- rv.SetString(d.d.DecodeString())
-}
-
-func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
- rv.SetBool(d.d.DecodeBool())
-}
-
-func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
- rv.Set(reflect.ValueOf(d.d.DecodeTime()))
-}
-
-func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
- fv := d.d.DecodeFloat64()
- if chkOvf.Float32(fv) {
- d.errorf("float32 overflow: %v", fv)
- }
- rv.SetFloat(fv)
-}
-
-func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
- rv.SetFloat(d.d.DecodeFloat64())
-}
-
-func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
- rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
-}
-
-func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
- rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8))
-}
-
-func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
- rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16))
-}
-
-func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
- rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32))
-}
-
-func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
- rv.SetInt(d.d.DecodeInt64())
-}
-
-func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
- rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
-}
-
-func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
- rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
-}
-
-func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
- rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8))
-}
-
-func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
- rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16))
-}
-
-func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
- rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32))
-}
-
-func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
- rv.SetUint(d.d.DecodeUint64())
-}
-
-// ----------------
-
-func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeBool(rv.Bool())
-}
-
-func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeTime(rv2i(rv).(time.Time))
-}
-
-func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeString(cUTF8, rv.String())
-}
-
-func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeFloat64(rv.Float())
-}
-
-func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeFloat32(float32(rv.Float()))
-}
-
-func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeInt(rv.Int())
-}
-
-func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeInt(rv.Int())
-}
-
-func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeInt(rv.Int())
-}
-
-func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeInt(rv.Int())
-}
-
-func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeInt(rv.Int())
-}
-
-func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeUint(rv.Uint())
-}
-
-func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeUint(rv.Uint())
-}
-
-func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeUint(rv.Uint())
-}
-
-func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeUint(rv.Uint())
-}
-
-func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeUint(rv.Uint())
-}
-
-func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
- e.e.EncodeUint(rv.Uint())
-}
-
-// // keepAlive4BytesView maintains a reference to the input parameter for bytesView.
-// //
-// // Usage: call this at point where done with the bytes view.
-// func keepAlive4BytesView(v string) {}
-
-// // keepAlive4BytesView maintains a reference to the input parameter for stringView.
-// //
-// // Usage: call this at point where done with the string view.
-// func keepAlive4StringView(v []byte) {}
-
-// func definitelyNil(v interface{}) bool {
-// rv := reflect.ValueOf(v)
-// switch rv.Kind() {
-// case reflect.Invalid:
-// return true
-// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func:
-// return rv.IsNil()
-// default:
-// return false
-// }
-// }
diff --git a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/helper_unsafe.go
deleted file mode 100644
index 1fb6d202..00000000
--- a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/helper_unsafe.go
+++ /dev/null
@@ -1,638 +0,0 @@
-// +build !safe
-// +build !appengine
-// +build go1.7
-
-// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
-// Use of this source code is governed by a MIT license found in the LICENSE file.
-
-package codec
-
-import (
- "reflect"
- "sync/atomic"
- "time"
- "unsafe"
-)
-
-// This file has unsafe variants of some helper methods.
-// NOTE: See helper_not_unsafe.go for the usage information.
-
-// var zeroRTv [4]uintptr
-
-const safeMode = false
-const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go
-
-type unsafeString struct {
- Data unsafe.Pointer
- Len int
-}
-
-type unsafeSlice struct {
- Data unsafe.Pointer
- Len int
- Cap int
-}
-
-type unsafeIntf struct {
- typ unsafe.Pointer
- word unsafe.Pointer
-}
-
-type unsafeReflectValue struct {
- typ unsafe.Pointer
- ptr unsafe.Pointer
- flag uintptr
-}
-
-func stringView(v []byte) string {
- if len(v) == 0 {
- return ""
- }
- bx := (*unsafeSlice)(unsafe.Pointer(&v))
- return *(*string)(unsafe.Pointer(&unsafeString{bx.Data, bx.Len}))
-}
-
-func bytesView(v string) []byte {
- if len(v) == 0 {
- return zeroByteSlice
- }
- sx := (*unsafeString)(unsafe.Pointer(&v))
- return *(*[]byte)(unsafe.Pointer(&unsafeSlice{sx.Data, sx.Len, sx.Len}))
-}
-
-func definitelyNil(v interface{}) bool {
- // There is no global way of checking if an interface is nil.
- // For true references (map, ptr, func, chan), you can just look
- // at the word of the interface. However, for slices, you have to dereference
- // the word, and get a pointer to the 3-word interface value.
- //
- // However, the following are cheap calls
- // - TypeOf(interface): cheap 2-line call.
- // - ValueOf(interface{}): expensive
- // - type.Kind: cheap call through an interface
- // - Value.Type(): cheap call
- // except it's a method value (e.g. r.Read, which implies that it is a Func)
-
- return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil
-}
-
-func rv2i(rv reflect.Value) interface{} {
- // TODO: consider a more generally-known optimization for reflect.Value ==> Interface
- //
- // Currently, we use this fragile method that taps into implememtation details from
- // the source go stdlib reflect/value.go, and trims the implementation.
-
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir
- var ptr unsafe.Pointer
- if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
- ptr = *(*unsafe.Pointer)(urv.ptr)
- } else {
- ptr = urv.ptr
- }
- return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr}))
-}
-
-func rt2id(rt reflect.Type) uintptr {
- return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
-}
-
-func rv2rtid(rv reflect.Value) uintptr {
- return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ)
-}
-
-func i2rtid(i interface{}) uintptr {
- return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ)
-}
-
-// --------------------------
-
-func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
- if urv.flag == 0 {
- return true
- }
- switch v.Kind() {
- case reflect.Invalid:
- return true
- case reflect.String:
- return (*unsafeString)(urv.ptr).Len == 0
- case reflect.Slice:
- return (*unsafeSlice)(urv.ptr).Len == 0
- case reflect.Bool:
- return !*(*bool)(urv.ptr)
- case reflect.Int:
- return *(*int)(urv.ptr) == 0
- case reflect.Int8:
- return *(*int8)(urv.ptr) == 0
- case reflect.Int16:
- return *(*int16)(urv.ptr) == 0
- case reflect.Int32:
- return *(*int32)(urv.ptr) == 0
- case reflect.Int64:
- return *(*int64)(urv.ptr) == 0
- case reflect.Uint:
- return *(*uint)(urv.ptr) == 0
- case reflect.Uint8:
- return *(*uint8)(urv.ptr) == 0
- case reflect.Uint16:
- return *(*uint16)(urv.ptr) == 0
- case reflect.Uint32:
- return *(*uint32)(urv.ptr) == 0
- case reflect.Uint64:
- return *(*uint64)(urv.ptr) == 0
- case reflect.Uintptr:
- return *(*uintptr)(urv.ptr) == 0
- case reflect.Float32:
- return *(*float32)(urv.ptr) == 0
- case reflect.Float64:
- return *(*float64)(urv.ptr) == 0
- case reflect.Interface:
- isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
- if deref {
- if isnil {
- return true
- }
- return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
- }
- return isnil
- case reflect.Ptr:
- isnil := urv.ptr == nil
- if deref {
- if isnil {
- return true
- }
- return isEmptyValue(v.Elem(), tinfos, deref, checkStruct)
- }
- return isnil
- case reflect.Struct:
- return isEmptyStruct(v, tinfos, deref, checkStruct)
- case reflect.Map, reflect.Array, reflect.Chan:
- return v.Len() == 0
- }
- return false
-}
-
-// --------------------------
-
-// atomicTypeInfoSlice contains length and pointer to the array for a slice.
-// It is expected to be 2 words.
-//
-// Previously, we atomically loaded and stored the length and array pointer separately,
-// which could lead to some races.
-// We now just atomically store and load the pointer to the value directly.
-
-type atomicTypeInfoSlice struct { // expected to be 2 words
- l int // length of the data array (must be first in struct, for 64-bit alignment necessary for 386)
- v unsafe.Pointer // data array - Pointer (not uintptr) to maintain GC reference
-}
-
-func (x *atomicTypeInfoSlice) load() []rtid2ti {
- xp := unsafe.Pointer(x)
- x2 := *(*atomicTypeInfoSlice)(atomic.LoadPointer(&xp))
- if x2.l == 0 {
- return nil
- }
- return *(*[]rtid2ti)(unsafe.Pointer(&unsafeSlice{Data: x2.v, Len: x2.l, Cap: x2.l}))
-}
-
-func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
- s := (*unsafeSlice)(unsafe.Pointer(&p))
- xp := unsafe.Pointer(x)
- atomic.StorePointer(&xp, unsafe.Pointer(&atomicTypeInfoSlice{l: s.Len, v: s.Data}))
-}
-
-// --------------------------
-func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*[]byte)(urv.ptr) = d.rawBytes()
-}
-
-func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*string)(urv.ptr) = d.d.DecodeString()
-}
-
-func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*bool)(urv.ptr) = d.d.DecodeBool()
-}
-
-func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*time.Time)(urv.ptr) = d.d.DecodeTime()
-}
-
-func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
- fv := d.d.DecodeFloat64()
- if chkOvf.Float32(fv) {
- d.errorf("float32 overflow: %v", fv)
- }
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*float32)(urv.ptr) = float32(fv)
-}
-
-func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*float64)(urv.ptr) = d.d.DecodeFloat64()
-}
-
-func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*int)(urv.ptr) = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
-}
-
-func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*int8)(urv.ptr) = int8(chkOvf.IntV(d.d.DecodeInt64(), 8))
-}
-
-func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*int16)(urv.ptr) = int16(chkOvf.IntV(d.d.DecodeInt64(), 16))
-}
-
-func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*int32)(urv.ptr) = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
-}
-
-func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*int64)(urv.ptr) = d.d.DecodeInt64()
-}
-
-func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*uint)(urv.ptr) = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
-}
-
-func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*uintptr)(urv.ptr) = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
-}
-
-func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*uint8)(urv.ptr) = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
-}
-
-func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*uint16)(urv.ptr) = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))
-}
-
-func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*uint32)(urv.ptr) = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))
-}
-
-func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
- urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- *(*uint64)(urv.ptr) = d.d.DecodeUint64()
-}
-
-// ------------
-
-func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeBool(*(*bool)(v.ptr))
-}
-
-func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeTime(*(*time.Time)(v.ptr))
-}
-
-func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeString(cUTF8, *(*string)(v.ptr))
-}
-
-func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeFloat64(*(*float64)(v.ptr))
-}
-
-func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeFloat32(*(*float32)(v.ptr))
-}
-
-func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeInt(int64(*(*int)(v.ptr)))
-}
-
-func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeInt(int64(*(*int8)(v.ptr)))
-}
-
-func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeInt(int64(*(*int16)(v.ptr)))
-}
-
-func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeInt(int64(*(*int32)(v.ptr)))
-}
-
-func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeInt(int64(*(*int64)(v.ptr)))
-}
-
-func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeUint(uint64(*(*uint)(v.ptr)))
-}
-
-func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeUint(uint64(*(*uint8)(v.ptr)))
-}
-
-func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeUint(uint64(*(*uint16)(v.ptr)))
-}
-
-func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeUint(uint64(*(*uint32)(v.ptr)))
-}
-
-func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeUint(uint64(*(*uint64)(v.ptr)))
-}
-
-func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
- v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
- e.e.EncodeUint(uint64(*(*uintptr)(v.ptr)))
-}
-
-// ------------
-
-// func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
-// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
-// // if urv.flag&unsafeFlagIndir != 0 {
-// // urv.ptr = *(*unsafe.Pointer)(urv.ptr)
-// // }
-// *(*[]byte)(urv.ptr) = d.rawBytes()
-// }
-
-// func rv0t(rt reflect.Type) reflect.Value {
-// ut := (*unsafeIntf)(unsafe.Pointer(&rt))
-// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr
-// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())}
-// return *(*reflect.Value)(unsafe.Pointer(&uv})
-// }
-
-// func rv2i(rv reflect.Value) interface{} {
-// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
-// // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir
-// var ptr unsafe.Pointer
-// // kk := reflect.Kind(urv.flag & (1<<5 - 1))
-// // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 {
-// if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
-// ptr = *(*unsafe.Pointer)(urv.ptr)
-// } else {
-// ptr = urv.ptr
-// }
-// return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr}))
-// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
-// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
-// }
-
-// func definitelyNil(v interface{}) bool {
-// var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v))
-// if ui.word == nil {
-// return true
-// }
-// var tk = reflect.TypeOf(v).Kind()
-// return (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.word) == nil
-// fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n",
-// v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil)
-// }
-
-// func keepAlive4BytesView(v string) {
-// runtime.KeepAlive(v)
-// }
-
-// func keepAlive4StringView(v []byte) {
-// runtime.KeepAlive(v)
-// }
-
-// func rt2id(rt reflect.Type) uintptr {
-// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
-// // var i interface{} = rt
-// // // ui := (*unsafeIntf)(unsafe.Pointer(&i))
-// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word
-// }
-
-// func rv2i(rv reflect.Value) interface{} {
-// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
-// // non-reference type: already indir
-// // reference type: depend on flagIndir property ('cos maybe was double-referenced)
-// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 )
-// // rvk := reflect.Kind(urv.flag & (1<<5 - 1))
-// // if (rvk == reflect.Chan ||
-// // rvk == reflect.Func ||
-// // rvk == reflect.Interface ||
-// // rvk == reflect.Map ||
-// // rvk == reflect.Ptr ||
-// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 {
-// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
-// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
-// // }
-// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 {
-// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
-// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
-// }
-// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type())
-// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
-// }
-
-// const (
-// unsafeRvFlagKindMask = 1<<5 - 1
-// unsafeRvKindDirectIface = 1 << 5
-// unsafeRvFlagIndir = 1 << 7
-// unsafeRvFlagAddr = 1 << 8
-// unsafeRvFlagMethod = 1 << 9
-
-// _USE_RV_INTERFACE bool = false
-// _UNSAFE_RV_DEBUG = true
-// )
-
-// type unsafeRtype struct {
-// _ [2]uintptr
-// _ uint32
-// _ uint8
-// _ uint8
-// _ uint8
-// kind uint8
-// _ [2]uintptr
-// _ int32
-// }
-
-// func _rv2i(rv reflect.Value) interface{} {
-// // Note: From use,
-// // - it's never an interface
-// // - the only calls here are for ifaceIndir types.
-// // (though that conditional is wrong)
-// // To know for sure, we need the value of t.kind (which is not exposed).
-// //
-// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct)
-// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string
-// // - Type Direct, Value indirect: ==> map???
-// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map
-// //
-// // TRANSLATES TO:
-// // if typeIndirect { } else if valueIndirect { } else { }
-// //
-// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored.
-
-// if _USE_RV_INTERFACE {
-// return rv.Interface()
-// }
-// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
-
-// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
-// // println("***** IS flag method or interface: delegating to rv.Interface()")
-// // return rv.Interface()
-// // }
-
-// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
-// // println("***** IS Interface: delegate to rv.Interface")
-// // return rv.Interface()
-// // }
-// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 {
-// // if urv.flag&unsafeRvFlagAddr == 0 {
-// // println("***** IS ifaceIndir typ")
-// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ}
-// // // return *(*interface{})(unsafe.Pointer(&ui))
-// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
-// // }
-// // } else if urv.flag&unsafeRvFlagIndir != 0 {
-// // println("***** IS flagindir")
-// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
-// // } else {
-// // println("***** NOT flagindir")
-// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
-// // }
-// // println("***** default: delegate to rv.Interface")
-
-// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ))
-// if _UNSAFE_RV_DEBUG {
-// fmt.Printf(">>>> start: %v: ", rv.Type())
-// fmt.Printf("%v - %v\n", *urv, *urt)
-// }
-// if urt.kind&unsafeRvKindDirectIface == 0 {
-// if _UNSAFE_RV_DEBUG {
-// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type())
-// }
-// // println("***** IS ifaceIndir typ")
-// // if true || urv.flag&unsafeRvFlagAddr == 0 {
-// // // println(" ***** IS NOT addr")
-// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
-// // }
-// } else if urv.flag&unsafeRvFlagIndir != 0 {
-// if _UNSAFE_RV_DEBUG {
-// fmt.Printf("**** +flagIndir type: %v\n", rv.Type())
-// }
-// // println("***** IS flagindir")
-// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
-// } else {
-// if _UNSAFE_RV_DEBUG {
-// fmt.Printf("**** -flagIndir type: %v\n", rv.Type())
-// }
-// // println("***** NOT flagindir")
-// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
-// }
-// // println("***** default: delegating to rv.Interface()")
-// // return rv.Interface()
-// }
-
-// var staticM0 = make(map[string]uint64)
-// var staticI0 = (int32)(-5)
-
-// func staticRv2iTest() {
-// i0 := (int32)(-5)
-// m0 := make(map[string]uint16)
-// m0["1"] = 1
-// for _, i := range []interface{}{
-// (int)(7),
-// (uint)(8),
-// (int16)(-9),
-// (uint16)(19),
-// (uintptr)(77),
-// (bool)(true),
-// float32(-32.7),
-// float64(64.9),
-// complex(float32(19), 5),
-// complex(float64(-32), 7),
-// [4]uint64{1, 2, 3, 4},
-// (chan<- int)(nil), // chan,
-// rv2i, // func
-// io.Writer(ioutil.Discard),
-// make(map[string]uint),
-// (map[string]uint)(nil),
-// staticM0,
-// m0,
-// &m0,
-// i0,
-// &i0,
-// &staticI0,
-// &staticM0,
-// []uint32{6, 7, 8},
-// "abc",
-// Raw{},
-// RawExt{},
-// &Raw{},
-// &RawExt{},
-// unsafe.Pointer(&i0),
-// } {
-// i2 := rv2i(reflect.ValueOf(i))
-// eq := reflect.DeepEqual(i, i2)
-// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq)
-// }
-// // os.Exit(0)
-// }
-
-// func init() {
-// staticRv2iTest()
-// }
-
-// func rv2i(rv reflect.Value) interface{} {
-// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() {
-// return rv.Interface()
-// }
-// // var i interface{}
-// // ui := (*unsafeIntf)(unsafe.Pointer(&i))
-// var ui unsafeIntf
-// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
-// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr))
-// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 {
-// if urv.flag&unsafeRvFlagAddr != 0 {
-// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()")
-// return rv.Interface()
-// }
-// println("****** indirect type/kind")
-// ui.word = urv.ptr
-// } else if urv.flag&unsafeRvFlagIndir != 0 {
-// println("****** unsafe rv flag indir")
-// ui.word = *(*unsafe.Pointer)(urv.ptr)
-// } else {
-// println("****** default: assign prt to word directly")
-// ui.word = urv.ptr
-// }
-// // ui.word = urv.ptr
-// ui.typ = urv.typ
-// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word)
-// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word))
-// return *(*interface{})(unsafe.Pointer(&ui))
-// // return i
-// }
diff --git a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/json.go b/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/json.go
deleted file mode 100644
index bdd19966..00000000
--- a/src/disposa.blue/margo/vendor/github.com/ugorji/go/codec/json.go
+++ /dev/null
@@ -1,1423 +0,0 @@
-// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
-// Use of this source code is governed by a MIT license found in the LICENSE file.
-
-package codec
-
-// By default, this json support uses base64 encoding for bytes, because you cannot
-// store and read any arbitrary string in json (only unicode).
-// However, the user can configre how to encode/decode bytes.
-//
-// This library specifically supports UTF-8 for encoding and decoding only.
-//
-// Note that the library will happily encode/decode things which are not valid
-// json e.g. a map[int64]string. We do it for consistency. With valid json,
-// we will encode and decode appropriately.
-// Users can specify their map type if necessary to force it.
-//
-// Note:
-// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently.
-// We implement it here.
-
-// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver
-// MUST not call one-another.
-
-import (
- "bytes"
- "encoding/base64"
- "math"
- "reflect"
- "strconv"
- "time"
- "unicode"
- "unicode/utf16"
- "unicode/utf8"
-)
-
-//--------------------------------
-
-var jsonLiterals = [...]byte{
- '"', 't', 'r', 'u', 'e', '"',
- '"', 'f', 'a', 'l', 's', 'e', '"',
- '"', 'n', 'u', 'l', 'l', '"',
-}
-
-const (
- jsonLitTrueQ = 0
- jsonLitTrue = 1
- jsonLitFalseQ = 6
- jsonLitFalse = 7
- jsonLitNullQ = 13
- jsonLitNull = 14
-)
-
-const (
- jsonU4Chk2 = '0'
- jsonU4Chk1 = 'a' - 10
- jsonU4Chk0 = 'A' - 10
-
- jsonScratchArrayLen = 64
-)
-
-const (
- // If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
- // - If we see first character of null, false or true,
- // do not validate subsequent characters.
- // - e.g. if we see a n, assume null and skip next 3 characters,
- // and do not validate they are ull.
- // P.S. Do not expect a significant decoding boost from this.
- jsonValidateSymbols = true
-
- jsonSpacesOrTabsLen = 128
-
- jsonAlwaysReturnInternString = false
-)
-
-var (
- // jsonTabs and jsonSpaces are used as caches for indents
- jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte
-
- jsonCharHtmlSafeSet bitset128
- jsonCharSafeSet bitset128
- jsonCharWhitespaceSet bitset256
- jsonNumSet bitset256
-)
-
-func init() {
- for i := 0; i < jsonSpacesOrTabsLen; i++ {
- jsonSpaces[i] = ' '
- jsonTabs[i] = '\t'
- }
-
- // populate the safe values as true: note: ASCII control characters are (0-31)
- // jsonCharSafeSet: all true except (0-31) " \
- // jsonCharHtmlSafeSet: all true except (0-31) " \ < > &
- var i byte
- for i = 32; i < utf8.RuneSelf; i++ {
- switch i {
- case '"', '\\':
- case '<', '>', '&':
- jsonCharSafeSet.set(i) // = true
- default:
- jsonCharSafeSet.set(i)
- jsonCharHtmlSafeSet.set(i)
- }
- }
- for i = 0; i <= utf8.RuneSelf; i++ {
- switch i {
- case ' ', '\t', '\r', '\n':
- jsonCharWhitespaceSet.set(i)
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-':
- jsonNumSet.set(i)
- }
- }
-}
-
-// ----------------
-
-type jsonEncDriverTypical struct {
- w encWriter
- // w *encWriterSwitch
- b *[jsonScratchArrayLen]byte
- tw bool // term white space
- c containerState
-}
-
-func (e *jsonEncDriverTypical) typical() {}
-
-func (e *jsonEncDriverTypical) reset(ee *jsonEncDriver) {
- e.w = ee.ew
- // e.w = &ee.e.encWriterSwitch
- e.b = &ee.b
- e.tw = ee.h.TermWhitespace
- e.c = 0
-}
-
-func (e *jsonEncDriverTypical) WriteArrayStart(length int) {
- e.w.writen1('[')
- e.c = containerArrayStart
-}
-
-func (e *jsonEncDriverTypical) WriteArrayElem() {
- if e.c != containerArrayStart {
- e.w.writen1(',')
- }
- e.c = containerArrayElem
-}
-
-func (e *jsonEncDriverTypical) WriteArrayEnd() {
- e.w.writen1(']')
- e.c = containerArrayEnd
-}
-
-func (e *jsonEncDriverTypical) WriteMapStart(length int) {
- e.w.writen1('{')
- e.c = containerMapStart
-}
-
-func (e *jsonEncDriverTypical) WriteMapElemKey() {
- if e.c != containerMapStart {
- e.w.writen1(',')
- }
- e.c = containerMapKey
-}
-
-func (e *jsonEncDriverTypical) WriteMapElemValue() {
- e.w.writen1(':')
- e.c = containerMapValue
-}
-
-func (e *jsonEncDriverTypical) WriteMapEnd() {
- e.w.writen1('}')
- e.c = containerMapEnd
-}
-
-func (e *jsonEncDriverTypical) EncodeBool(b bool) {
- if b {
- e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4])
- } else {
- e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5])
- }
-}
-
-func (e *jsonEncDriverTypical) EncodeFloat64(f float64) {
- fmt, prec := jsonFloatStrconvFmtPrec(f)
- e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64))
-}
-
-func (e *jsonEncDriverTypical) EncodeInt(v int64) {
- e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
-}
-
-func (e *jsonEncDriverTypical) EncodeUint(v uint64) {
- e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
-}
-
-func (e *jsonEncDriverTypical) EncodeFloat32(f float32) {
- e.EncodeFloat64(float64(f))
-}
-
-func (e *jsonEncDriverTypical) atEndOfEncode() {
- if e.tw {
- e.w.writen1(' ')
- }
-}
-
-// ----------------
-
-type jsonEncDriverGeneric struct {
- w encWriter // encWriter // *encWriterSwitch
- b *[jsonScratchArrayLen]byte
- c containerState
- // ds string // indent string
- di int8 // indent per
- d bool // indenting?
- dt bool // indent using tabs
- dl uint16 // indent level
- ks bool // map key as string
- is byte // integer as string
- tw bool // term white space
- _ [7]byte // padding
-}
-
-// indent is done as below:
-// - newline and indent are added before each mapKey or arrayElem
-// - newline and indent are added before each ending,
-// except there was no entry (so we can have {} or [])
-
-func (e *jsonEncDriverGeneric) reset(ee *jsonEncDriver) {
- e.w = ee.ew
- e.b = &ee.b
- e.tw = ee.h.TermWhitespace
- e.c = 0
- e.d, e.dt, e.dl, e.di = false, false, 0, 0
- h := ee.h
- if h.Indent > 0 {
- e.d = true
- e.di = int8(h.Indent)
- } else if h.Indent < 0 {
- e.d = true
- e.dt = true
- e.di = int8(-h.Indent)
- }
- e.ks = h.MapKeyAsString
- e.is = h.IntegerAsString
-}
-
-func (e *jsonEncDriverGeneric) WriteArrayStart(length int) {
- if e.d {
- e.dl++
- }
- e.w.writen1('[')
- e.c = containerArrayStart
-}
-
-func (e *jsonEncDriverGeneric) WriteArrayElem() {
- if e.c != containerArrayStart {
- e.w.writen1(',')
- }
- if e.d {
- e.writeIndent()
- }
- e.c = containerArrayElem
-}
-
-func (e *jsonEncDriverGeneric) WriteArrayEnd() {
- if e.d {
- e.dl--
- if e.c != containerArrayStart {
- e.writeIndent()
- }
- }
- e.w.writen1(']')
- e.c = containerArrayEnd
-}
-
-func (e *jsonEncDriverGeneric) WriteMapStart(length int) {
- if e.d {
- e.dl++
- }
- e.w.writen1('{')
- e.c = containerMapStart
-}
-
-func (e *jsonEncDriverGeneric) WriteMapElemKey() {
- if e.c != containerMapStart {
- e.w.writen1(',')
- }
- if e.d {
- e.writeIndent()
- }
- e.c = containerMapKey
-}
-
-func (e *jsonEncDriverGeneric) WriteMapElemValue() {
- if e.d {
- e.w.writen2(':', ' ')
- } else {
- e.w.writen1(':')
- }
- e.c = containerMapValue
-}
-
-func (e *jsonEncDriverGeneric) WriteMapEnd() {
- if e.d {
- e.dl--
- if e.c != containerMapStart {
- e.writeIndent()
- }
- }
- e.w.writen1('}')
- e.c = containerMapEnd
-}
-
-func (e *jsonEncDriverGeneric) writeIndent() {
- e.w.writen1('\n')
- x := int(e.di) * int(e.dl)
- if e.dt {
- for x > jsonSpacesOrTabsLen {
- e.w.writeb(jsonTabs[:])
- x -= jsonSpacesOrTabsLen
- }
- e.w.writeb(jsonTabs[:x])
- } else {
- for x > jsonSpacesOrTabsLen {
- e.w.writeb(jsonSpaces[:])
- x -= jsonSpacesOrTabsLen
- }
- e.w.writeb(jsonSpaces[:x])
- }
-}
-
-func (e *jsonEncDriverGeneric) EncodeBool(b bool) {
- if e.ks && e.c == containerMapKey {
- if b {
- e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6])
- } else {
- e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7])
- }
- } else {
- if b {
- e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4])
- } else {
- e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5])
- }
- }
-}
-
-func (e *jsonEncDriverGeneric) EncodeFloat64(f float64) {
- // instead of using 'g', specify whether to use 'e' or 'f'
- fmt, prec := jsonFloatStrconvFmtPrec(f)
-
- var blen int
- if e.ks && e.c == containerMapKey {
- blen = 2 + len(strconv.AppendFloat(e.b[1:1], f, fmt, prec, 64))
- e.b[0] = '"'
- e.b[blen-1] = '"'
- } else {
- blen = len(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64))
- }
- e.w.writeb(e.b[:blen])
-}
-
-func (e *jsonEncDriverGeneric) EncodeInt(v int64) {
- x := e.is
- if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.ks && e.c == containerMapKey) {
- blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10))
- e.b[0] = '"'
- e.b[blen-1] = '"'
- e.w.writeb(e.b[:blen])
- return
- }
- e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
-}
-
-func (e *jsonEncDriverGeneric) EncodeUint(v uint64) {
- x := e.is
- if x == 'A' || x == 'L' && v > 1<<53 || (e.ks && e.c == containerMapKey) {
- blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10))
- e.b[0] = '"'
- e.b[blen-1] = '"'
- e.w.writeb(e.b[:blen])
- return
- }
- e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
-}
-
-func (e *jsonEncDriverGeneric) EncodeFloat32(f float32) {
- // e.encodeFloat(float64(f), 32)
- // always encode all floats as IEEE 64-bit floating point.
- // It also ensures that we can decode in full precision even if into a float32,
- // as what is written is always to float64 precision.
- e.EncodeFloat64(float64(f))
-}
-
-func (e *jsonEncDriverGeneric) atEndOfEncode() {
- if e.tw {
- if e.d {
- e.w.writen1('\n')
- } else {
- e.w.writen1(' ')
- }
- }
-}
-
-// --------------------
-
-type jsonEncDriver struct {
- noBuiltInTypes
- e *Encoder
- h *JsonHandle
- ew encWriter // encWriter // *encWriterSwitch
- se extWrapper
- // ---- cpu cache line boundary?
- bs []byte // scratch
- // ---- cpu cache line boundary?
- b [jsonScratchArrayLen]byte // scratch (encode time,
-}
-
-func (e *jsonEncDriver) EncodeNil() {
- // We always encode nil as just null (never in quotes)
- // This allows us to easily decode if a nil in the json stream
- // ie if initial token is n.
- e.ew.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4])
-
- // if e.h.MapKeyAsString && e.c == containerMapKey {
- // e.ew.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6])
- // } else {
- // e.ew.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4])
- // }
-}
-
-func (e *jsonEncDriver) EncodeTime(t time.Time) {
- // Do NOT use MarshalJSON, as it allocates internally.
- // instead, we call AppendFormat directly, using our scratch buffer (e.b)
- if t.IsZero() {
- e.EncodeNil()
- } else {
- e.b[0] = '"'
- b := t.AppendFormat(e.b[1:1], time.RFC3339Nano)
- e.b[len(b)+1] = '"'
- e.ew.writeb(e.b[:len(b)+2])
- }
- // v, err := t.MarshalJSON(); if err != nil { e.e.error(err) } e.ew.writeb(v)
-}
-
-func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
- if v := ext.ConvertExt(rv); v == nil {
- e.EncodeNil()
- } else {
- en.encode(v)
- }
-}
-
-func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
- // only encodes re.Value (never re.Data)
- if re.Value == nil {
- e.EncodeNil()
- } else {
- en.encode(re.Value)
- }
-}
-
-func (e *jsonEncDriver) EncodeString(c charEncoding, v string) {
- e.quoteStr(v)
-}
-
-func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
- // if encoding raw bytes and RawBytesExt is configured, use it to encode
- if v == nil {
- e.EncodeNil()
- return
- }
- if c == cRAW {
- if e.se.InterfaceExt != nil {
- e.EncodeExt(v, 0, &e.se, e.e)
- return
- }
-
- slen := base64.StdEncoding.EncodedLen(len(v))
- if cap(e.bs) >= slen+2 {
- e.bs = e.bs[:slen+2]
- } else {
- e.bs = make([]byte, slen+2)
- }
- e.bs[0] = '"'
- base64.StdEncoding.Encode(e.bs[1:], v)
- e.bs[slen+1] = '"'
- e.ew.writeb(e.bs)
- } else {
- e.quoteStr(stringView(v))
- }
-}
-
-func (e *jsonEncDriver) EncodeAsis(v []byte) {
- e.ew.writeb(v)
-}
-
-func (e *jsonEncDriver) quoteStr(s string) {
- // adapted from std pkg encoding/json
- const hex = "0123456789abcdef"
- w := e.ew
- htmlasis := e.h.HTMLCharsAsIs
- w.writen1('"')
- var start int
- for i, slen := 0, len(s); i < slen; {
- // encode all bytes < 0x20 (except \r, \n).
- // also encode < > & to prevent security holes when served to some browsers.
- if b := s[i]; b < utf8.RuneSelf {
- // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
- // if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) {
- if jsonCharHtmlSafeSet.isset(b) || (htmlasis && jsonCharSafeSet.isset(b)) {
- i++
- continue
- }
- if start < i {
- w.writestr(s[start:i])
- }
- switch b {
- case '\\', '"':
- w.writen2('\\', b)
- case '\n':
- w.writen2('\\', 'n')
- case '\r':
- w.writen2('\\', 'r')
- case '\b':
- w.writen2('\\', 'b')
- case '\f':
- w.writen2('\\', 'f')
- case '\t':
- w.writen2('\\', 't')
- default:
- w.writestr(`\u00`)
- w.writen2(hex[b>>4], hex[b&0xF])
- }
- i++
- start = i
- continue
- }
- c, size := utf8.DecodeRuneInString(s[i:])
- if c == utf8.RuneError && size == 1 {
- if start < i {
- w.writestr(s[start:i])
- }
- w.writestr(`\ufffd`)
- i += size
- start = i
- continue
- }
- // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
- // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally.
- if c == '\u2028' || c == '\u2029' {
- if start < i {
- w.writestr(s[start:i])
- }
- w.writestr(`\u202`)
- w.writen1(hex[c&0xF])
- i += size
- start = i
- continue
- }
- i += size
- }
- if start < len(s) {
- w.writestr(s[start:])
- }
- w.writen1('"')
-}
-
-type jsonDecDriver struct {
- noBuiltInTypes
- d *Decoder
- h *JsonHandle
- r decReader // *decReaderSwitch // decReader
- se extWrapper
-
- // ---- writable fields during execution --- *try* to keep in sep cache line
-
- c containerState
- // tok is used to store the token read right after skipWhiteSpace.
- tok uint8
- fnull bool // found null from appendStringAsBytes
- bs []byte // scratch. Initialized from b. Used for parsing strings or numbers.
- bstr [8]byte // scratch used for string \UXXX parsing
- // ---- cpu cache line boundary?
- b [jsonScratchArrayLen]byte // scratch 1, used for parsing strings or numbers or time.Time
- b2 [jsonScratchArrayLen]byte // scratch 2, used only for readUntil, decNumBytes
-
- _ [3]uint64 // padding
- // n jsonNum
-}
-
-// func jsonIsWS(b byte) bool {
-// // return b == ' ' || b == '\t' || b == '\r' || b == '\n'
-// return jsonCharWhitespaceSet.isset(b)
-// }
-
-func (d *jsonDecDriver) uncacheRead() {
- if d.tok != 0 {
- d.r.unreadn1()
- d.tok = 0
- }
-}
-
-func (d *jsonDecDriver) ReadMapStart() int {
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- const xc uint8 = '{'
- if d.tok != xc {
- d.d.errorf("read map - expect char '%c' but got char '%c'", xc, d.tok)
- }
- d.tok = 0
- d.c = containerMapStart
- return -1
-}
-
-func (d *jsonDecDriver) ReadArrayStart() int {
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- const xc uint8 = '['
- if d.tok != xc {
- d.d.errorf("read array - expect char '%c' but got char '%c'", xc, d.tok)
- }
- d.tok = 0
- d.c = containerArrayStart
- return -1
-}
-
-func (d *jsonDecDriver) CheckBreak() bool {
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- return d.tok == '}' || d.tok == ']'
-}
-
-// For the ReadXXX methods below, we could just delegate to helper functions
-// readContainerState(c containerState, xc uint8, check bool)
-// - ReadArrayElem would become:
-// readContainerState(containerArrayElem, ',', d.c != containerArrayStart)
-//
-// However, until mid-stack inlining comes in go1.11 which supports inlining of
-// one-liners, we explicitly write them all 5 out to elide the extra func call.
-//
-// TODO: For Go 1.11, if inlined, consider consolidating these.
-
-func (d *jsonDecDriver) ReadArrayElem() {
- const xc uint8 = ','
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- if d.c != containerArrayStart {
- if d.tok != xc {
- d.d.errorf("read array element - expect char '%c' but got char '%c'", xc, d.tok)
- }
- d.tok = 0
- }
- d.c = containerArrayElem
-}
-
-func (d *jsonDecDriver) ReadArrayEnd() {
- const xc uint8 = ']'
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- if d.tok != xc {
- d.d.errorf("read array end - expect char '%c' but got char '%c'", xc, d.tok)
- }
- d.tok = 0
- d.c = containerArrayEnd
-}
-
-func (d *jsonDecDriver) ReadMapElemKey() {
- const xc uint8 = ','
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- if d.c != containerMapStart {
- if d.tok != xc {
- d.d.errorf("read map key - expect char '%c' but got char '%c'", xc, d.tok)
- }
- d.tok = 0
- }
- d.c = containerMapKey
-}
-
-func (d *jsonDecDriver) ReadMapElemValue() {
- const xc uint8 = ':'
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- if d.tok != xc {
- d.d.errorf("read map value - expect char '%c' but got char '%c'", xc, d.tok)
- }
- d.tok = 0
- d.c = containerMapValue
-}
-
-func (d *jsonDecDriver) ReadMapEnd() {
- const xc uint8 = '}'
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- if d.tok != xc {
- d.d.errorf("read map end - expect char '%c' but got char '%c'", xc, d.tok)
- }
- d.tok = 0
- d.c = containerMapEnd
-}
-
-func (d *jsonDecDriver) readLit(length, fromIdx uint8) {
- bs := d.r.readx(int(length))
- d.tok = 0
- if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) {
- d.d.errorf("expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs)
- return
- }
-}
-
-func (d *jsonDecDriver) TryDecodeAsNil() bool {
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- // we shouldn't try to see if "null" was here, right?
- // only the plain string: `null` denotes a nil (ie not quotes)
- if d.tok == 'n' {
- d.readLit(3, jsonLitNull+1) // (n)ull
- return true
- }
- return false
-}
-
-func (d *jsonDecDriver) DecodeBool() (v bool) {
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- fquot := d.c == containerMapKey && d.tok == '"'
- if fquot {
- d.tok = d.r.readn1()
- }
- switch d.tok {
- case 'f':
- d.readLit(4, jsonLitFalse+1) // (f)alse
- // v = false
- case 't':
- d.readLit(3, jsonLitTrue+1) // (t)rue
- v = true
- default:
- d.d.errorf("decode bool: got first char %c", d.tok)
- // v = false // "unreachable"
- }
- if fquot {
- d.r.readn1()
- }
- return
-}
-
-func (d *jsonDecDriver) DecodeTime() (t time.Time) {
- // read string, and pass the string into json.unmarshal
- d.appendStringAsBytes()
- if d.fnull {
- return
- }
- t, err := time.Parse(time.RFC3339, stringView(d.bs))
- if err != nil {
- d.d.errorv(err)
- }
- return
-}
-
-func (d *jsonDecDriver) ContainerType() (vt valueType) {
- // check container type by checking the first char
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
-
- // optimize this, so we don't do 4 checks but do one computation.
- // return jsonContainerSet[d.tok]
-
- // ContainerType is mostly called for Map and Array,
- // so this conditional is good enough (max 2 checks typically)
- if b := d.tok; b == '{' {
- return valueTypeMap
- } else if b == '[' {
- return valueTypeArray
- } else if b == 'n' {
- return valueTypeNil
- } else if b == '"' {
- return valueTypeString
- }
- return valueTypeUnset
-}
-
-func (d *jsonDecDriver) decNumBytes() (bs []byte) {
- // stores num bytes in d.bs
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- if d.tok == '"' {
- bs = d.r.readUntil(d.b2[:0], '"')
- bs = bs[:len(bs)-1]
- } else {
- d.r.unreadn1()
- bs = d.r.readTo(d.bs[:0], &jsonNumSet)
- }
- d.tok = 0
- return bs
-}
-
-func (d *jsonDecDriver) DecodeUint64() (u uint64) {
- bs := d.decNumBytes()
- n, neg, badsyntax, overflow := jsonParseInteger(bs)
- if overflow {
- d.d.errorf("overflow parsing unsigned integer: %s", bs)
- } else if neg {
- d.d.errorf("minus found parsing unsigned integer: %s", bs)
- } else if badsyntax {
- // fallback: try to decode as float, and cast
- n = d.decUint64ViaFloat(stringView(bs))
- }
- return n
-}
-
-func (d *jsonDecDriver) DecodeInt64() (i int64) {
- const cutoff = uint64(1 << uint(64-1))
- bs := d.decNumBytes()
- n, neg, badsyntax, overflow := jsonParseInteger(bs)
- if overflow {
- d.d.errorf("overflow parsing integer: %s", bs)
- } else if badsyntax {
- // d.d.errorf("invalid syntax for integer: %s", bs)
- // fallback: try to decode as float, and cast
- if neg {
- n = d.decUint64ViaFloat(stringView(bs[1:]))
- } else {
- n = d.decUint64ViaFloat(stringView(bs))
- }
- }
- if neg {
- if n > cutoff {
- d.d.errorf("overflow parsing integer: %s", bs)
- }
- i = -(int64(n))
- } else {
- if n >= cutoff {
- d.d.errorf("overflow parsing integer: %s", bs)
- }
- i = int64(n)
- }
- return
-}
-
-func (d *jsonDecDriver) decUint64ViaFloat(s string) (u uint64) {
- f, err := strconv.ParseFloat(s, 64)
- if err != nil {
- d.d.errorf("invalid syntax for integer: %s", s)
- // d.d.errorv(err)
- }
- fi, ff := math.Modf(f)
- if ff > 0 {
- d.d.errorf("fractional part found parsing integer: %s", s)
- } else if fi > float64(math.MaxUint64) {
- d.d.errorf("overflow parsing integer: %s", s)
- }
- return uint64(fi)
-}
-
-func (d *jsonDecDriver) DecodeFloat64() (f float64) {
- bs := d.decNumBytes()
- f, err := strconv.ParseFloat(stringView(bs), 64)
- if err != nil {
- d.d.errorv(err)
- }
- return
-}
-
-func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
- if ext == nil {
- re := rv.(*RawExt)
- re.Tag = xtag
- d.d.decode(&re.Value)
- } else {
- var v interface{}
- d.d.decode(&v)
- ext.UpdateExt(rv, v)
- }
- return
-}
-
-func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
- // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode.
- if d.se.InterfaceExt != nil {
- bsOut = bs
- d.DecodeExt(&bsOut, 0, &d.se)
- return
- }
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- // check if an "array" of uint8's (see ContainerType for how to infer if an array)
- if d.tok == '[' {
- bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
- return
- }
- d.appendStringAsBytes()
- // base64 encodes []byte{} as "", and we encode nil []byte as null.
- // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}.
- // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs.
- // However, it sets a fnull field to true, so we can check if a null was found.
- if len(d.bs) == 0 {
- if d.fnull {
- return nil
- }
- return []byte{}
- }
- bs0 := d.bs
- slen := base64.StdEncoding.DecodedLen(len(bs0))
- if slen <= cap(bs) {
- bsOut = bs[:slen]
- } else if zerocopy && slen <= cap(d.b2) {
- bsOut = d.b2[:slen]
- } else {
- bsOut = make([]byte, slen)
- }
- slen2, err := base64.StdEncoding.Decode(bsOut, bs0)
- if err != nil {
- d.d.errorf("error decoding base64 binary '%s': %v", bs0, err)
- return nil
- }
- if slen != slen2 {
- bsOut = bsOut[:slen2]
- }
- return
-}
-
-func (d *jsonDecDriver) DecodeString() (s string) {
- d.appendStringAsBytes()
- return d.bsToString()
-}
-
-func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) {
- d.appendStringAsBytes()
- return d.bs
-}
-
-func (d *jsonDecDriver) appendStringAsBytes() {
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
-
- d.fnull = false
- if d.tok != '"' {
- // d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok)
- // handle non-string scalar: null, true, false or a number
- switch d.tok {
- case 'n':
- d.readLit(3, jsonLitNull+1) // (n)ull
- d.bs = d.bs[:0]
- d.fnull = true
- case 'f':
- d.readLit(4, jsonLitFalse+1) // (f)alse
- d.bs = d.bs[:5]
- copy(d.bs, "false")
- case 't':
- d.readLit(3, jsonLitTrue+1) // (t)rue
- d.bs = d.bs[:4]
- copy(d.bs, "true")
- default:
- // try to parse a valid number
- bs := d.decNumBytes()
- if len(bs) <= cap(d.bs) {
- d.bs = d.bs[:len(bs)]
- } else {
- d.bs = make([]byte, len(bs))
- }
- copy(d.bs, bs)
- }
- return
- }
-
- d.tok = 0
- r := d.r
- var cs = r.readUntil(d.b2[:0], '"')
- var cslen = len(cs)
- var c uint8
- v := d.bs[:0]
- // append on each byte seen can be expensive, so we just
- // keep track of where we last read a contiguous set of
- // non-special bytes (using cursor variable),
- // and when we see a special byte
- // e.g. end-of-slice, " or \,
- // we will append the full range into the v slice before proceeding
- for i, cursor := 0, 0; ; {
- if i == cslen {
- v = append(v, cs[cursor:]...)
- cs = r.readUntil(d.b2[:0], '"')
- cslen = len(cs)
- i, cursor = 0, 0
- }
- c = cs[i]
- if c == '"' {
- v = append(v, cs[cursor:i]...)
- break
- }
- if c != '\\' {
- i++
- continue
- }
- v = append(v, cs[cursor:i]...)
- i++
- c = cs[i]
- switch c {
- case '"', '\\', '/', '\'':
- v = append(v, c)
- case 'b':
- v = append(v, '\b')
- case 'f':
- v = append(v, '\f')
- case 'n':
- v = append(v, '\n')
- case 'r':
- v = append(v, '\r')
- case 't':
- v = append(v, '\t')
- case 'u':
- var r rune
- var rr uint32
- if len(cs) < i+4 { // may help reduce bounds-checking
- d.d.errorf("need at least 4 more bytes for unicode sequence")
- }
- // c = cs[i+4] // may help reduce bounds-checking
- for j := 1; j < 5; j++ {
- // best to use explicit if-else
- // - not a table, etc which involve memory loads, array lookup with bounds checks, etc
- c = cs[i+j]
- if c >= '0' && c <= '9' {
- rr = rr*16 + uint32(c-jsonU4Chk2)
- } else if c >= 'a' && c <= 'f' {
- rr = rr*16 + uint32(c-jsonU4Chk1)
- } else if c >= 'A' && c <= 'F' {
- rr = rr*16 + uint32(c-jsonU4Chk0)
- } else {
- r = unicode.ReplacementChar
- i += 4
- goto encode_rune
- }
- }
- r = rune(rr)
- i += 4
- if utf16.IsSurrogate(r) {
- if len(cs) >= i+6 && cs[i+2] == 'u' && cs[i+1] == '\\' {
- i += 2
- // c = cs[i+4] // may help reduce bounds-checking
- var rr1 uint32
- for j := 1; j < 5; j++ {
- c = cs[i+j]
- if c >= '0' && c <= '9' {
- rr = rr*16 + uint32(c-jsonU4Chk2)
- } else if c >= 'a' && c <= 'f' {
- rr = rr*16 + uint32(c-jsonU4Chk1)
- } else if c >= 'A' && c <= 'F' {
- rr = rr*16 + uint32(c-jsonU4Chk0)
- } else {
- r = unicode.ReplacementChar
- i += 4
- goto encode_rune
- }
- }
- r = utf16.DecodeRune(r, rune(rr1))
- i += 4
- } else {
- r = unicode.ReplacementChar
- goto encode_rune
- }
- }
- encode_rune:
- w2 := utf8.EncodeRune(d.bstr[:], r)
- v = append(v, d.bstr[:w2]...)
- default:
- d.d.errorf("unsupported escaped value: %c", c)
- }
- i++
- cursor = i
- }
- d.bs = v
-}
-
-func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) {
- const cutoff = uint64(1 << uint(64-1))
- var n uint64
- var neg, badsyntax, overflow bool
-
- if d.h.PreferFloat {
- goto F
- }
- n, neg, badsyntax, overflow = jsonParseInteger(bs)
- if badsyntax || overflow {
- goto F
- }
- if neg {
- if n > cutoff {
- goto F
- }
- z.v = valueTypeInt
- z.i = -(int64(n))
- } else if d.h.SignedInteger {
- if n >= cutoff {
- goto F
- }
- z.v = valueTypeInt
- z.i = int64(n)
- } else {
- z.v = valueTypeUint
- z.u = n
- }
- return
-F:
- z.v = valueTypeFloat
- z.f, err = strconv.ParseFloat(stringView(bs), 64)
- return
-}
-
-func (d *jsonDecDriver) bsToString() string {
- // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key
- if jsonAlwaysReturnInternString || d.c == containerMapKey {
- return d.d.string(d.bs)
- }
- return string(d.bs)
-}
-
-func (d *jsonDecDriver) DecodeNaked() {
- z := d.d.n
- // var decodeFurther bool
-
- if d.tok == 0 {
- d.tok = d.r.skip(&jsonCharWhitespaceSet)
- }
- switch d.tok {
- case 'n':
- d.readLit(3, jsonLitNull+1) // (n)ull
- z.v = valueTypeNil
- case 'f':
- d.readLit(4, jsonLitFalse+1) // (f)alse
- z.v = valueTypeBool
- z.b = false
- case 't':
- d.readLit(3, jsonLitTrue+1) // (t)rue
- z.v = valueTypeBool
- z.b = true
- case '{':
- z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart
- case '[':
- z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart
- case '"':
- // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first
- d.appendStringAsBytes()
- if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString {
- switch stringView(d.bs) {
- case "null":
- z.v = valueTypeNil
- case "true":
- z.v = valueTypeBool
- z.b = true
- case "false":
- z.v = valueTypeBool
- z.b = false
- default:
- // check if a number: float, int or uint
- if err := d.nakedNum(z, d.bs); err != nil {
- z.v = valueTypeString
- z.s = d.bsToString()
- }
- }
- } else {
- z.v = valueTypeString
- z.s = d.bsToString()
- }
- default: // number
- bs := d.decNumBytes()
- if len(bs) == 0 {
- d.d.errorf("decode number from empty string")
- return
- }
- if err := d.nakedNum(z, bs); err != nil {
- d.d.errorf("decode number from %s: %v", bs, err)
- return
- }
- }
- // if decodeFurther {
- // d.s.sc.retryRead()
- // }
- return
-}
-
-//----------------------
-
-// JsonHandle is a handle for JSON encoding format.
-//
-// Json is comprehensively supported:
-// - decodes numbers into interface{} as int, uint or float64
-// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc.
-// - decode integers from float formatted numbers e.g. 1.27e+8
-// - decode any json value (numbers, bool, etc) from quoted strings
-// - configurable way to encode/decode []byte .
-// by default, encodes and decodes []byte using base64 Std Encoding
-// - UTF-8 support for encoding and decoding
-//
-// It has better performance than the json library in the standard library,
-// by leveraging the performance improvements of the codec library.
-//
-// In addition, it doesn't read more bytes than necessary during a decode, which allows
-// reading multiple values from a stream containing json and non-json content.
-// For example, a user can read a json value, then a cbor value, then a msgpack value,
-// all from the same stream in sequence.
-//
-// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are
-// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD.
-type JsonHandle struct {
- textEncodingType
- BasicHandle
-
- // Indent indicates how a value is encoded.
- // - If positive, indent by that number of spaces.
- // - If negative, indent by that number of tabs.
- Indent int8
-
- // IntegerAsString controls how integers (signed and unsigned) are encoded.
- //
- // Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
- // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision.
- // This can be mitigated by configuring how to encode integers.
- //
- // IntegerAsString interpretes the following values:
- // - if 'L', then encode integers > 2^53 as a json string.
- // - if 'A', then encode all integers as a json string
- // containing the exact integer representation as a decimal.
- // - else encode all integers as a json number (default)
- IntegerAsString byte
-
- // HTMLCharsAsIs controls how to encode some special characters to html: < > &
- //
- // By default, we encode them as \uXXX
- // to prevent security holes when served from some browsers.
- HTMLCharsAsIs bool
-
- // PreferFloat says that we will default to decoding a number as a float.
- // If not set, we will examine the characters of the number and decode as an
- // integer type if it doesn't have any of the characters [.eE].
- PreferFloat bool
-
- // TermWhitespace says that we add a whitespace character
- // at the end of an encoding.
- //
- // The whitespace is important, especially if using numbers in a context
- // where multiple items are written to a stream.
- TermWhitespace bool
-
- // MapKeyAsString says to encode all map keys as strings.
- //
- // Use this to enforce strict json output.
- // The only caveat is that nil value is ALWAYS written as null (never as "null")
- MapKeyAsString bool
-
- // _ [2]byte // padding
-
- // Note: below, we store hardly-used items e.g. RawBytesExt is cached in the (en|de)cDriver.
-
- // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
- // If not configured, raw bytes are encoded to/from base64 text.
- RawBytesExt InterfaceExt
-
- _ [2]uint64 // padding
-}
-
-// Name returns the name of the handle: json
-func (h *JsonHandle) Name() string { return "json" }
-func (h *JsonHandle) hasElemSeparators() bool { return true }
-func (h *JsonHandle) typical() bool {
- return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L'
-}
-
-type jsonTypical interface {
- typical()
-}
-
-func (h *JsonHandle) recreateEncDriver(ed encDriver) (v bool) {
- _, v = ed.(jsonTypical)
- return v != h.typical()
-}
-
-// SetInterfaceExt sets an extension
-func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
- return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
-}
-
-type jsonEncDriverTypicalImpl struct {
- jsonEncDriver
- jsonEncDriverTypical
- _ [1]uint64 // padding
-}
-
-func (x *jsonEncDriverTypicalImpl) reset() {
- x.jsonEncDriver.reset()
- x.jsonEncDriverTypical.reset(&x.jsonEncDriver)
-}
-
-type jsonEncDriverGenericImpl struct {
- jsonEncDriver
- jsonEncDriverGeneric
-}
-
-func (x *jsonEncDriverGenericImpl) reset() {
- x.jsonEncDriver.reset()
- x.jsonEncDriverGeneric.reset(&x.jsonEncDriver)
-}
-
-func (h *JsonHandle) newEncDriver(e *Encoder) (ee encDriver) {
- var hd *jsonEncDriver
- if h.typical() {
- var v jsonEncDriverTypicalImpl
- ee = &v
- hd = &v.jsonEncDriver
- } else {
- var v jsonEncDriverGenericImpl
- ee = &v
- hd = &v.jsonEncDriver
- }
- hd.e, hd.h, hd.bs = e, h, hd.b[:0]
- hd.se.BytesExt = bytesExtFailer{}
- ee.reset()
- return
-}
-
-func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
- // d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
- hd := jsonDecDriver{d: d, h: h}
- hd.se.BytesExt = bytesExtFailer{}
- hd.bs = hd.b[:0]
- hd.reset()
- return &hd
-}
-
-func (e *jsonEncDriver) reset() {
- e.ew = e.e.w // e.e.w // &e.e.encWriterSwitch
- e.se.InterfaceExt = e.h.RawBytesExt
- if e.bs != nil {
- e.bs = e.bs[:0]
- }
-}
-
-func (d *jsonDecDriver) reset() {
- d.r = d.d.r // &d.d.decReaderSwitch // d.d.r
- d.se.InterfaceExt = d.h.RawBytesExt
- if d.bs != nil {
- d.bs = d.bs[:0]
- }
- d.c, d.tok = 0, 0
- // d.n.reset()
-}
-
-func jsonFloatStrconvFmtPrec(f float64) (fmt byte, prec int) {
- prec = -1
- var abs = math.Abs(f)
- if abs != 0 && (abs < 1e-6 || abs >= 1e21) {
- fmt = 'e'
- } else {
- fmt = 'f'
- // set prec to 1 iff mod is 0.
- // better than using jsonIsFloatBytesB2 to check if a . or E in the float bytes.
- // this ensures that every float has an e or .0 in it.
- if abs <= 1 {
- if abs == 0 || abs == 1 {
- prec = 1
- }
- } else if _, mod := math.Modf(abs); mod == 0 {
- prec = 1
- }
- }
- return
-}
-
-// custom-fitted version of strconv.Parse(Ui|I)nt.
-// Also ensures we don't have to search for .eE to determine if a float or not.
-func jsonParseInteger(s []byte) (n uint64, neg, badSyntax, overflow bool) {
- const maxUint64 = (1<<64 - 1)
- const cutoff = maxUint64/10 + 1
-
- if len(s) == 0 {
- badSyntax = true
- return
- }
- switch s[0] {
- case '+':
- s = s[1:]
- case '-':
- s = s[1:]
- neg = true
- }
- for _, c := range s {
- if c < '0' || c > '9' {
- badSyntax = true
- return
- }
- // unsigned integers don't overflow well on multiplication, so check cutoff here
- // e.g. (maxUint64-5)*10 doesn't overflow well ...
- if n >= cutoff {
- overflow = true
- return
- }
- n *= 10
- n1 := n + uint64(c-'0')
- if n1 < n || n1 > maxUint64 {
- overflow = true
- return
- }
- n = n1
- }
- return
-}
-
-var _ decDriver = (*jsonDecDriver)(nil)
-var _ encDriver = (*jsonEncDriverGenericImpl)(nil)
-var _ encDriver = (*jsonEncDriverTypicalImpl)(nil)
-var _ jsonTypical = (*jsonEncDriverTypical)(nil)
diff --git a/src/disposa.blue/margo/vendor/github.com/urfave/cli/.travis.yml b/src/disposa.blue/margo/vendor/github.com/urfave/cli/.travis.yml
deleted file mode 100644
index cf8d0980..00000000
--- a/src/disposa.blue/margo/vendor/github.com/urfave/cli/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-language: go
-sudo: false
-dist: trusty
-osx_image: xcode8.3
-go: 1.8.x
-
-os:
-- linux
-- osx
-
-cache:
- directories:
- - node_modules
-
-before_script:
-- go get github.com/urfave/gfmrun/... || true
-- go get golang.org/x/tools/cmd/goimports
-- if [ ! -f node_modules/.bin/markdown-toc ] ; then
- npm install markdown-toc ;
- fi
-
-script:
-- ./runtests gen
-- ./runtests vet
-- ./runtests test
-- ./runtests gfmrun
-- ./runtests toc
diff --git a/src/disposa.blue/margo/vendor/github.com/urfave/cli/appveyor.yml b/src/disposa.blue/margo/vendor/github.com/urfave/cli/appveyor.yml
deleted file mode 100644
index 1e1489c3..00000000
--- a/src/disposa.blue/margo/vendor/github.com/urfave/cli/appveyor.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-version: "{build}"
-
-os: Windows Server 2016
-
-image: Visual Studio 2017
-
-clone_folder: c:\gopath\src\github.com\urfave\cli
-
-environment:
- GOPATH: C:\gopath
- GOVERSION: 1.8.x
- PYTHON: C:\Python36-x64
- PYTHON_VERSION: 3.6.x
- PYTHON_ARCH: 64
-
-install:
-- set PATH=%GOPATH%\bin;C:\go\bin;%PATH%
-- go version
-- go env
-- go get github.com/urfave/gfmrun/...
-- go get -v -t ./...
-
-build_script:
-- python runtests vet
-- python runtests test
-- python runtests gfmrun
diff --git a/src/disposa.blue/margo/vendor/github.com/urfave/cli/flag-types.json b/src/disposa.blue/margo/vendor/github.com/urfave/cli/flag-types.json
deleted file mode 100644
index 12231078..00000000
--- a/src/disposa.blue/margo/vendor/github.com/urfave/cli/flag-types.json
+++ /dev/null
@@ -1,93 +0,0 @@
-[
- {
- "name": "Bool",
- "type": "bool",
- "value": false,
- "context_default": "false",
- "parser": "strconv.ParseBool(f.Value.String())"
- },
- {
- "name": "BoolT",
- "type": "bool",
- "value": false,
- "doctail": " that is true by default",
- "context_default": "false",
- "parser": "strconv.ParseBool(f.Value.String())"
- },
- {
- "name": "Duration",
- "type": "time.Duration",
- "doctail": " (see https://golang.org/pkg/time/#ParseDuration)",
- "context_default": "0",
- "parser": "time.ParseDuration(f.Value.String())"
- },
- {
- "name": "Float64",
- "type": "float64",
- "context_default": "0",
- "parser": "strconv.ParseFloat(f.Value.String(), 64)"
- },
- {
- "name": "Generic",
- "type": "Generic",
- "dest": false,
- "context_default": "nil",
- "context_type": "interface{}"
- },
- {
- "name": "Int64",
- "type": "int64",
- "context_default": "0",
- "parser": "strconv.ParseInt(f.Value.String(), 0, 64)"
- },
- {
- "name": "Int",
- "type": "int",
- "context_default": "0",
- "parser": "strconv.ParseInt(f.Value.String(), 0, 64)",
- "parser_cast": "int(parsed)"
- },
- {
- "name": "IntSlice",
- "type": "*IntSlice",
- "dest": false,
- "context_default": "nil",
- "context_type": "[]int",
- "parser": "(f.Value.(*IntSlice)).Value(), error(nil)"
- },
- {
- "name": "Int64Slice",
- "type": "*Int64Slice",
- "dest": false,
- "context_default": "nil",
- "context_type": "[]int64",
- "parser": "(f.Value.(*Int64Slice)).Value(), error(nil)"
- },
- {
- "name": "String",
- "type": "string",
- "context_default": "\"\"",
- "parser": "f.Value.String(), error(nil)"
- },
- {
- "name": "StringSlice",
- "type": "*StringSlice",
- "dest": false,
- "context_default": "nil",
- "context_type": "[]string",
- "parser": "(f.Value.(*StringSlice)).Value(), error(nil)"
- },
- {
- "name": "Uint64",
- "type": "uint64",
- "context_default": "0",
- "parser": "strconv.ParseUint(f.Value.String(), 0, 64)"
- },
- {
- "name": "Uint",
- "type": "uint",
- "context_default": "0",
- "parser": "strconv.ParseUint(f.Value.String(), 0, 64)",
- "parser_cast": "uint(parsed)"
- }
-]
diff --git a/src/disposa.blue/margo/vendor/github.com/urfave/cli/generate-flag-types b/src/disposa.blue/margo/vendor/github.com/urfave/cli/generate-flag-types
deleted file mode 100755
index 7147381c..00000000
--- a/src/disposa.blue/margo/vendor/github.com/urfave/cli/generate-flag-types
+++ /dev/null
@@ -1,255 +0,0 @@
-#!/usr/bin/env python
-"""
-The flag types that ship with the cli library have many things in common, and
-so we can take advantage of the `go generate` command to create much of the
-source code from a list of definitions. These definitions attempt to cover
-the parts that vary between flag types, and should evolve as needed.
-
-An example of the minimum definition needed is:
-
- {
- "name": "SomeType",
- "type": "sometype",
- "context_default": "nil"
- }
-
-In this example, the code generated for the `cli` package will include a type
-named `SomeTypeFlag` that is expected to wrap a value of type `sometype`.
-Fetching values by name via `*cli.Context` will default to a value of `nil`.
-
-A more complete, albeit somewhat redundant, example showing all available
-definition keys is:
-
- {
- "name": "VeryMuchType",
- "type": "*VeryMuchType",
- "value": true,
- "dest": false,
- "doctail": " which really only wraps a []float64, oh well!",
- "context_type": "[]float64",
- "context_default": "nil",
- "parser": "parseVeryMuchType(f.Value.String())",
- "parser_cast": "[]float64(parsed)"
- }
-
-The meaning of each field is as follows:
-
- name (string) - The type "name", which will be suffixed with
- `Flag` when generating the type definition
- for `cli` and the wrapper type for `altsrc`
- type (string) - The type that the generated `Flag` type for `cli`
- is expected to "contain" as its `.Value` member
- value (bool) - Should the generated `cli` type have a `Value`
- member?
- dest (bool) - Should the generated `cli` type support a
- destination pointer?
- doctail (string) - Additional docs for the `cli` flag type comment
- context_type (string) - The literal type used in the `*cli.Context`
- reader func signature
- context_default (string) - The literal value used as the default by the
- `*cli.Context` reader funcs when no value is
- present
- parser (string) - Literal code used to parse the flag `f`,
- expected to have a return signature of
- (value, error)
- parser_cast (string) - Literal code used to cast the `parsed` value
- returned from the `parser` code
-"""
-
-from __future__ import print_function, unicode_literals
-
-import argparse
-import json
-import os
-import subprocess
-import sys
-import tempfile
-import textwrap
-
-
-class _FancyFormatter(argparse.ArgumentDefaultsHelpFormatter,
- argparse.RawDescriptionHelpFormatter):
- pass
-
-
-def main(sysargs=sys.argv[:]):
- parser = argparse.ArgumentParser(
- description='Generate flag type code!',
- formatter_class=_FancyFormatter)
- parser.add_argument(
- 'package',
- type=str, default='cli', choices=_WRITEFUNCS.keys(),
- help='Package for which flag types will be generated'
- )
- parser.add_argument(
- '-i', '--in-json',
- type=argparse.FileType('r'),
- default=sys.stdin,
- help='Input JSON file which defines each type to be generated'
- )
- parser.add_argument(
- '-o', '--out-go',
- type=argparse.FileType('w'),
- default=sys.stdout,
- help='Output file/stream to which generated source will be written'
- )
- parser.epilog = __doc__
-
- args = parser.parse_args(sysargs[1:])
- _generate_flag_types(_WRITEFUNCS[args.package], args.out_go, args.in_json)
- return 0
-
-
-def _generate_flag_types(writefunc, output_go, input_json):
- types = json.load(input_json)
-
- tmp = tempfile.NamedTemporaryFile(suffix='.go', delete=False)
- writefunc(tmp, types)
- tmp.close()
-
- new_content = subprocess.check_output(
- ['goimports', tmp.name]
- ).decode('utf-8')
-
- print(new_content, file=output_go, end='')
- output_go.flush()
- os.remove(tmp.name)
-
-
-def _set_typedef_defaults(typedef):
- typedef.setdefault('doctail', '')
- typedef.setdefault('context_type', typedef['type'])
- typedef.setdefault('dest', True)
- typedef.setdefault('value', True)
- typedef.setdefault('parser', 'f.Value, error(nil)')
- typedef.setdefault('parser_cast', 'parsed')
-
-
-def _write_cli_flag_types(outfile, types):
- _fwrite(outfile, """\
- package cli
-
- // WARNING: This file is generated!
-
- """)
-
- for typedef in types:
- _set_typedef_defaults(typedef)
-
- _fwrite(outfile, """\
- // {name}Flag is a flag with type {type}{doctail}
- type {name}Flag struct {{
- Name string
- Usage string
- EnvVar string
- Hidden bool
- """.format(**typedef))
-
- if typedef['value']:
- _fwrite(outfile, """\
- Value {type}
- """.format(**typedef))
-
- if typedef['dest']:
- _fwrite(outfile, """\
- Destination *{type}
- """.format(**typedef))
-
- _fwrite(outfile, "\n}\n\n")
-
- _fwrite(outfile, """\
- // String returns a readable representation of this value
- // (for usage defaults)
- func (f {name}Flag) String() string {{
- return FlagStringer(f)
- }}
-
- // GetName returns the name of the flag
- func (f {name}Flag) GetName() string {{
- return f.Name
- }}
-
- // {name} looks up the value of a local {name}Flag, returns
- // {context_default} if not found
- func (c *Context) {name}(name string) {context_type} {{
- return lookup{name}(name, c.flagSet)
- }}
-
- // Global{name} looks up the value of a global {name}Flag, returns
- // {context_default} if not found
- func (c *Context) Global{name}(name string) {context_type} {{
- if fs := lookupGlobalFlagSet(name, c); fs != nil {{
- return lookup{name}(name, fs)
- }}
- return {context_default}
- }}
-
- func lookup{name}(name string, set *flag.FlagSet) {context_type} {{
- f := set.Lookup(name)
- if f != nil {{
- parsed, err := {parser}
- if err != nil {{
- return {context_default}
- }}
- return {parser_cast}
- }}
- return {context_default}
- }}
- """.format(**typedef))
-
-
-def _write_altsrc_flag_types(outfile, types):
- _fwrite(outfile, """\
- package altsrc
-
- import (
- "gopkg.in/urfave/cli.v1"
- )
-
- // WARNING: This file is generated!
-
- """)
-
- for typedef in types:
- _set_typedef_defaults(typedef)
-
- _fwrite(outfile, """\
- // {name}Flag is the flag type that wraps cli.{name}Flag to allow
- // for other values to be specified
- type {name}Flag struct {{
- cli.{name}Flag
- set *flag.FlagSet
- }}
-
- // New{name}Flag creates a new {name}Flag
- func New{name}Flag(fl cli.{name}Flag) *{name}Flag {{
- return &{name}Flag{{{name}Flag: fl, set: nil}}
- }}
-
- // Apply saves the flagSet for later usage calls, then calls the
- // wrapped {name}Flag.Apply
- func (f *{name}Flag) Apply(set *flag.FlagSet) {{
- f.set = set
- f.{name}Flag.Apply(set)
- }}
-
- // ApplyWithError saves the flagSet for later usage calls, then calls the
- // wrapped {name}Flag.ApplyWithError
- func (f *{name}Flag) ApplyWithError(set *flag.FlagSet) error {{
- f.set = set
- return f.{name}Flag.ApplyWithError(set)
- }}
- """.format(**typedef))
-
-
-def _fwrite(outfile, text):
- print(textwrap.dedent(text), end='', file=outfile)
-
-
-_WRITEFUNCS = {
- 'cli': _write_cli_flag_types,
- 'altsrc': _write_altsrc_flag_types
-}
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/src/margo.sh/.github/workflows/margo-ci.yml b/src/margo.sh/.github/workflows/margo-ci.yml
new file mode 100644
index 00000000..eb025212
--- /dev/null
+++ b/src/margo.sh/.github/workflows/margo-ci.yml
@@ -0,0 +1,22 @@
+on: [push, pull_request]
+name: margo-ci
+jobs:
+ margo-ci:
+ strategy:
+ matrix:
+ go-version: [1.13.x, 1.14.x]
+ platform: [ubuntu-latest, macos-latest, windows-latest]
+ runs-on: ${{ matrix.platform }}
+ steps:
+ - name: Setup
+ uses: actions/setup-go@v1
+ with:
+ go-version: ${{ matrix.go-version }}
+ - name: Checkout
+ uses: actions/checkout@v2
+ with:
+ path: src/margo.sh
+ - name: CI
+ env:
+ GOPATH: ${{ github.workspace }}
+ run: go run margo.sh ci
diff --git a/src/disposa.blue/margo/.gitignore b/src/margo.sh/.gitignore
similarity index 100%
rename from src/disposa.blue/margo/.gitignore
rename to src/margo.sh/.gitignore
diff --git a/src/disposa.blue/margo/AUTHORS b/src/margo.sh/AUTHORS
similarity index 100%
rename from src/disposa.blue/margo/AUTHORS
rename to src/margo.sh/AUTHORS
diff --git a/src/margo.sh/CONTRIBUTING.md b/src/margo.sh/CONTRIBUTING.md
new file mode 100644
index 00000000..f4831e28
--- /dev/null
+++ b/src/margo.sh/CONTRIBUTING.md
@@ -0,0 +1,39 @@
+### Introduction
+
+Thank you for considering contributing to margo!
+
+Although margo is officially a Kuroku Labs product, it's still an open source project, and we welcome all types of contributions - be it bug reports, marketing, code, documentation, etc.
+
+
+### Contributor License Agreement (CLA)
+
+As is the case with many Open Source projects, we can only accept source code contributions from contributors that have signed our CLA. Visit https://cla.kuroku.io/ for more details.
+
+### Dev environment setup
+
+The easiest way to get started with margo development is through GoSublime:
+
+* [install GoSublime](https://github.com/DisposaBoy/GoSublime#installation) with git
+* switch to the `development` branch `git checkout development`
+* while in Sublime Text/GoSublime, press ctrl+. ,ctrl+9 (cmd+. ,cmd+9 on Mac) to open the GoSublime command prompt and run the command `margo.sh dev fork $your-fork` e.g. `margo.sh dev fork git@github.com:DisposaBoy/margo.git`
+
+ this sets the git remote `margo` to the upstream repo from which you will `pull` your updates.
+ the `origin` remote is set to your fork to which you will push your changes.
+
+### Your First Contribution
+
+Working on your first Pull Request? You can learn how from this *free* series, [How to Contribute to an Open Source Project on GitHub](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github).
+
+### Submitting code
+
+Any code change should be submitted as a pull request. The description should explain what the code does and give steps to execute it. The pull request should ideally also contain tests.
+
+### Code review process
+
+The bigger the pull request, the longer it will take to review and merge. Try to break down large pull requests in smaller chunks that are easier to review and merge.
+It is also always helpful to have some context for your pull request. What was the purpose? Why does it matter to you?
+
+
+
+
+
diff --git a/src/margo.sh/Gopkg.lock b/src/margo.sh/Gopkg.lock
new file mode 100644
index 00000000..58584e42
--- /dev/null
+++ b/src/margo.sh/Gopkg.lock
@@ -0,0 +1,206 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ digest = "1:c7db0d7eb0409eda9ac91b3a9d9ed49a019d0c9c82c6dd4e4bfcf97267c7d435"
+ name = "github.com/coreos/bbolt"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "68cc10a767ea1c6b9e8dcb9847317ff192d6d974"
+ version = "v1.3.4"
+
+[[projects]]
+ digest = "1:7cb4fdca4c251b3ef8027c90ea35f70c7b661a593b9eeae34753c65499098bb1"
+ name = "github.com/cpuguy83/go-md2man"
+ packages = ["md2man"]
+ pruneopts = "UT"
+ revision = "7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19"
+ version = "v1.0.10"
+
+[[projects]]
+ digest = "1:6f9339c912bbdda81302633ad7e99a28dfa5a639c864061f1929510a9a64aa74"
+ name = "github.com/dustin/go-humanize"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e"
+ version = "v1.0.0"
+
+[[projects]]
+ digest = "1:1561d3f382a9923816f744264e5ad200a94486328d39216c981e981ab4aba17f"
+ name = "github.com/karrick/godirwalk"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "99bb3743ba1bdd193cd193bcc0aeced2dd577c4d"
+ version = "v1.15.6"
+
+[[projects]]
+ digest = "1:97ef3ad8a33b72948a86ffc5fffccd713e06dda70649a9a320a21a887a242711"
+ name = "github.com/klauspost/asmfmt"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "3da6afce54d96eb4f7ec7ffaa196f53152e9d456"
+ version = "v1.2.1"
+
+[[projects]]
+ digest = "1:e09ada96a5a41deda4748b1659cc8953961799e798aea557257b56baee4ecaf3"
+ name = "github.com/rogpeppe/go-internal"
+ packages = [
+ "modfile",
+ "module",
+ "semver",
+ ]
+ pruneopts = "UT"
+ revision = "bc89b17ba21ce5b8a495fa55a94e3fe32ecf4ed8"
+ version = "v1.5.2"
+
+[[projects]]
+ digest = "1:b36a0ede02c4c2aef7df7f91cbbb7bb88a98b5d253509d4f997dda526e50c88c"
+ name = "github.com/russross/blackfriday"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "05f3235734ad95d0016f6a23902f06461fcf567a"
+ version = "v1.5.2"
+
+[[projects]]
+ digest = "1:5a1cf4e370bc86137b58da2ae065e76526d32b11f62a7665f36dbd5f41fa95ff"
+ name = "github.com/ugorji/go"
+ packages = ["codec"]
+ pruneopts = "UT"
+ revision = "23ab95ef5dc3b70286760af84ce2327a2b64ed62"
+ version = "v1.1.7"
+
+[[projects]]
+ digest = "1:1d3ef3dd057d2eb1819e945f88cc83835296c9b7fb13ad3194c937c4e2891fee"
+ name = "github.com/urfave/cli"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "bfe2e925cfb6d44b40ad3a779165ea7e8aff9212"
+ version = "v1.22.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:bd74b8f4f9c56983b8628a5bc94a63e1e07dfe69084d0473cfd5f577d2d033ec"
+ name = "golang.org/x/crypto"
+ packages = ["blake2b"]
+ pruneopts = "UT"
+ revision = "4bdfaf469ed5a1194e0ec57ec7b1af73991a7ee3"
+
+[[projects]]
+ digest = "1:467bb8fb8fa786448b8d486cd0bb7c1a5577dcd7310441aa02a20110cd9f727d"
+ name = "golang.org/x/mod"
+ packages = [
+ "module",
+ "semver",
+ ]
+ pruneopts = "UT"
+ revision = "ed3ec21bb8e252814c380df79a80f366440ddb2d"
+ version = "v0.2.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70"
+ name = "golang.org/x/net"
+ packages = ["context"]
+ pruneopts = "UT"
+ revision = "e086a090c8fdb9982880f0fb6e3db47af1856533"
+
+[[projects]]
+ branch = "master"
+ digest = "1:b521f10a2d8fa85c04a8ef4e62f2d1e14d303599a55d64dabf9f5a02f84d35eb"
+ name = "golang.org/x/sync"
+ packages = ["errgroup"]
+ pruneopts = "UT"
+ revision = "43a5402ce75a95522677f77c619865d66b8c57ab"
+
+[[projects]]
+ branch = "master"
+ digest = "1:c73fc47a43a9a69f82c3c708032db58320e03af3fe178ebab8e2045c47c1356a"
+ name = "golang.org/x/sys"
+ packages = [
+ "cpu",
+ "unix",
+ ]
+ pruneopts = "UT"
+ revision = "1957bb5e6d1f523308b49060df02171d06ddfc77"
+
+[[projects]]
+ branch = "master"
+ digest = "1:99e4172e8a303da9f7bc32eb7868108b03ac588768988ac788e5ed5e049e075b"
+ name = "golang.org/x/tools"
+ packages = [
+ "cmd/guru",
+ "cmd/guru/serial",
+ "container/intsets",
+ "go/ast/astutil",
+ "go/buildutil",
+ "go/callgraph",
+ "go/callgraph/static",
+ "go/gcexportdata",
+ "go/internal/cgo",
+ "go/internal/gcimporter",
+ "go/internal/packagesdriver",
+ "go/loader",
+ "go/packages",
+ "go/pointer",
+ "go/ssa",
+ "go/ssa/ssautil",
+ "go/types/typeutil",
+ "imports",
+ "internal/event",
+ "internal/event/core",
+ "internal/event/keys",
+ "internal/event/label",
+ "internal/fastwalk",
+ "internal/gocommand",
+ "internal/gopathwalk",
+ "internal/imports",
+ "internal/packagesinternal",
+ "refactor/importgraph",
+ ]
+ pruneopts = "UT"
+ revision = "8463f397d07cfd2b3f1442fb1daa5e6bc2178a6e"
+
+[[projects]]
+ branch = "master"
+ digest = "1:918a46e4a2fb83df33f668f5a6bd51b2996775d073fce1800d3ec01b0a5ddd2b"
+ name = "golang.org/x/xerrors"
+ packages = [
+ ".",
+ "internal",
+ ]
+ pruneopts = "UT"
+ revision = "9bdfabe68543c54f90421aeb9a60ef8061b5b544"
+
+[[projects]]
+ branch = "master"
+ digest = "1:c6125c56c22c64c3fec0f2e3ad37490545b55d7d001b5306b89bf7d0e959a9ba"
+ name = "kuroku.io/margocode"
+ packages = [
+ "lookdot",
+ "suggest",
+ ]
+ pruneopts = "UT"
+ revision = "a1431e3c0752dcbcfa76f3bdd0ceff11c041ff4c"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ input-imports = [
+ "github.com/coreos/bbolt",
+ "github.com/dustin/go-humanize",
+ "github.com/karrick/godirwalk",
+ "github.com/klauspost/asmfmt",
+ "github.com/rogpeppe/go-internal/modfile",
+ "github.com/rogpeppe/go-internal/module",
+ "github.com/rogpeppe/go-internal/semver",
+ "github.com/ugorji/go/codec",
+ "github.com/urfave/cli",
+ "golang.org/x/crypto/blake2b",
+ "golang.org/x/net/context",
+ "golang.org/x/sync/errgroup",
+ "golang.org/x/tools/cmd/guru",
+ "golang.org/x/tools/go/gcexportdata",
+ "kuroku.io/margocode/suggest",
+ ]
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/src/margo.sh/Gopkg.toml b/src/margo.sh/Gopkg.toml
new file mode 100644
index 00000000..619fe14f
--- /dev/null
+++ b/src/margo.sh/Gopkg.toml
@@ -0,0 +1,19 @@
+required = [
+ "golang.org/x/tools/cmd/guru"
+]
+
+[prune]
+ go-tests = true
+ unused-packages = true
+
+[[constraint]]
+ branch = "master"
+ name = "kuroku.io/margocode"
+
+[[constraint]]
+ name = "github.com/rogpeppe/go-internal"
+ version = "1.3.0"
+
+[[override]]
+ name = "github.com/russross/blackfriday"
+ version = "1.5.2"
diff --git a/src/margo.sh/LICENSE.md b/src/margo.sh/LICENSE.md
new file mode 100644
index 00000000..411aedb0
--- /dev/null
+++ b/src/margo.sh/LICENSE.md
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 The margo Authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/src/disposa.blue/margo/README.md b/src/margo.sh/README.md
similarity index 54%
rename from src/disposa.blue/margo/README.md
rename to src/margo.sh/README.md
index 423b67b9..1d5caf7d 100644
--- a/src/disposa.blue/margo/README.md
+++ b/src/margo.sh/README.md
@@ -1,8 +1,8 @@
-[](https://travis-ci.org/disposablue/margo)
+[](https://travis-ci.org/KurokuLabs/margo)
-# MarGo
+# margo
This project is the next step in the evolution of https://github.com/DisposaBoy/GoSublime...
this time with less Python, less Sublime Text and more Go.
@@ -13,3 +13,10 @@ This repo is made public at this time solely to make it easier to integrate with
It is under very active experimental development and code may be broken or deleted at any time.
+
+## License & Contributing
+
+margo is released under the MIT license. See [LICENSE.md](LICENSE.md)
+
+See [CONTRIBUTING.md](CONTRIBUTING.md) for details about contributing to the project.
+
diff --git a/src/margo.sh/bolt/bolt.go b/src/margo.sh/bolt/bolt.go
new file mode 100644
index 00000000..8e355117
--- /dev/null
+++ b/src/margo.sh/bolt/bolt.go
@@ -0,0 +1,129 @@
+package bolt
+
+import (
+ "bytes"
+ "fmt"
+ bolt "github.com/coreos/bbolt"
+ "github.com/ugorji/go/codec"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sync"
+ "time"
+)
+
+var (
+ DS = func() *DataStore {
+ dir := os.Getenv("MARGO_DATA_DIR")
+ if dir == "" {
+ d, err := ioutil.TempDir("", "margo.data~fallback~")
+ if err != nil {
+ panic("MARGO_DATA_DIR is not defined and ioutill.TempDir failed: " + err.Error())
+ }
+ dir = d
+ }
+
+ return &DataStore{
+ Path: filepath.Join(dir, "bolt.ds"),
+ Handle: &codec.MsgpackHandle{},
+ Bucket: []byte("ds"),
+ }
+ }()
+)
+
+type DataStore struct {
+ Bucket []byte
+ Handle codec.Handle
+ Path string
+
+ mu sync.RWMutex
+}
+
+func (ds *DataStore) encodeKey(v interface{}) []byte {
+ pkg := ""
+ if t := reflect.TypeOf(v); t != nil {
+ pkg = t.PkgPath()
+ }
+ buf := &bytes.Buffer{}
+ fmt.Fprintf(buf, "pkg=%s typ=%T str=%#v", pkg, v, v)
+ return buf.Bytes()
+}
+
+func (ds *DataStore) encodeVal(v interface{}) ([]byte, error) {
+ s := []byte{}
+ err := codec.NewEncoderBytes(&s, ds.Handle).Encode(v)
+ return s, err
+}
+
+func (ds *DataStore) decodeVal(s []byte, p interface{}) error {
+ return codec.NewDecoderBytes(s, ds.Handle).Decode(p)
+}
+
+func (ds *DataStore) view(f func(*bolt.Tx) error) error {
+ ds.mu.RLock()
+ defer ds.mu.RUnlock()
+
+ return ds.tx(true, f)
+}
+
+func (ds *DataStore) update(f func(*bolt.Tx) error) error {
+ ds.mu.Lock()
+ defer ds.mu.Unlock()
+
+ return ds.tx(false, f)
+}
+
+func (ds *DataStore) tx(view bool, f func(*bolt.Tx) error) error {
+ db, err := bolt.Open(ds.Path, 0600, &bolt.Options{
+ Timeout: 5 * time.Second,
+ })
+ if err != nil {
+ return err
+ }
+ defer db.Close()
+
+ if view {
+ return db.View(f)
+ }
+ return db.Update(f)
+}
+
+func (ds *DataStore) Load(key, ptr interface{}) error {
+ k := ds.encodeKey(key)
+ return ds.view(func(tx *bolt.Tx) error {
+ bck := tx.Bucket(ds.Bucket)
+ if bck == nil {
+ return bolt.ErrBucketNotFound
+ }
+ s := bck.Get(k)
+ return ds.decodeVal(s, ptr)
+ })
+}
+
+func (ds *DataStore) Store(key, val interface{}) error {
+ k := ds.encodeKey(key)
+ v, err := ds.encodeVal(val)
+ if err != nil {
+ return err
+ }
+
+ return ds.update(func(tx *bolt.Tx) error {
+ bck, err := tx.CreateBucketIfNotExists(ds.Bucket)
+ if err != nil {
+ return err
+ }
+ return bck.Put(k, v)
+ })
+}
+
+func (ds *DataStore) Delete(key interface{}) error {
+ k := ds.encodeKey(key)
+ return ds.update(func(tx *bolt.Tx) error {
+ bck := tx.Bucket(ds.Bucket)
+ if bck == nil {
+ return nil
+ }
+ return bck.Delete(k)
+ })
+}
diff --git a/src/disposa.blue/margo/cmd/margo.sublime/main.go b/src/margo.sh/cmd/margo.sublime/main.go
similarity index 60%
rename from src/disposa.blue/margo/cmd/margo.sublime/main.go
rename to src/margo.sh/cmd/margo.sublime/main.go
index e7947790..bb1e34f0 100644
--- a/src/disposa.blue/margo/cmd/margo.sublime/main.go
+++ b/src/margo.sh/cmd/margo.sublime/main.go
@@ -1,7 +1,7 @@
package main
import (
- "disposa.blue/margo/cmdpkg/margosublime"
+ "margo.sh/cmdpkg/margosublime"
)
func main() {
diff --git a/src/margo.sh/cmdpkg/margo/ci.go b/src/margo.sh/cmdpkg/margo/ci.go
new file mode 100644
index 00000000..eb147e1e
--- /dev/null
+++ b/src/margo.sh/cmdpkg/margo/ci.go
@@ -0,0 +1,66 @@
+package margo
+
+import (
+ "github.com/urfave/cli"
+ "margo.sh/cmdpkg/margo/cmdrunner"
+ "strings"
+)
+
+var ciCmd = cli.Command{
+ Name: "ci",
+ Description: "ci runs various tests for use in ci environments, etc.",
+ ArgsUsage: "[patterns...] (default 'margo.sh/...')",
+ Flags: []cli.Flag{
+ cli.BoolFlag{
+ Name: "quick",
+ Usage: "Disable '-race' and other things that are known to be slow.",
+ },
+ },
+ Action: func(cx *cli.Context) error {
+ quick := cx.Bool("quick")
+ race := !quick
+ pats := cx.Args()
+ if len(pats) == 0 {
+ pats = []string{"margo.sh/..."}
+ }
+
+ testScript := []string{"go", "test"}
+ if race {
+ testScript = append(testScript, "-race")
+ }
+
+ vetScript := []string{"go", "vet",
+ "-all",
+ "-printfuncs", strings.Join([]string{
+ "Errorf",
+ "Fatal", "Fatalf",
+ "Fprint", "Fprintf", "Fprintln",
+ "Panic", "Panicf", "Panicln",
+ "Print", "Printf", "Println",
+ "Sprint", "Sprintf", "Sprintln",
+
+ "AddErrorf",
+ "AddStatusf",
+ "dbgf",
+ "EmTextf",
+ "Textf",
+ }, ","),
+ }
+
+ scripts := [][]string{
+ vetScript,
+ testScript,
+ }
+ for _, script := range scripts {
+ cmd := cmdrunner.Cmd{
+ Name: script[0],
+ Args: append(script[1:], pats...),
+ OutToErr: true,
+ }
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+}
diff --git a/src/margo.sh/cmdpkg/margo/cmdrunner/cmdrunner.go b/src/margo.sh/cmdpkg/margo/cmdrunner/cmdrunner.go
new file mode 100644
index 00000000..5afee0e5
--- /dev/null
+++ b/src/margo.sh/cmdpkg/margo/cmdrunner/cmdrunner.go
@@ -0,0 +1,48 @@
+package cmdrunner
+
+import (
+ "fmt"
+ "margo.sh/mgutil"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+type Cmd struct {
+ Name string
+ Args []string
+ Env map[string]string
+ Dir string
+ OutToErr bool
+}
+
+func (c Cmd) Run() error {
+ cmd := exec.Command(c.Name, c.Args...)
+ cmd.Stdin = os.Stdin
+ cmd.Stderr = os.Stderr
+ if c.OutToErr {
+ cmd.Stdout = cmd.Stderr
+ } else {
+ cmd.Stdout = os.Stdout
+ }
+ cmd.Dir = c.Dir
+
+ if len(c.Env) != 0 {
+ environ := os.Environ()
+ cmd.Env = make([]string, 0, len(environ)+1)
+ // I don't remember the rules about duplicate env vars...
+ for _, s := range os.Environ() {
+ k := strings.Split(s, "=")[0]
+ if _, exists := c.Env[k]; !exists {
+ cmd.Env = append(cmd.Env, s)
+ }
+ }
+ for k, v := range c.Env {
+ cmd.Env = append(cmd.Env, k+"="+v)
+ }
+ }
+
+ fmt.Fprintf(os.Stderr, "``` %s ```\n", mgutil.QuoteCmd(c.Name, c.Args...))
+
+ return cmd.Run()
+}
diff --git a/src/margo.sh/cmdpkg/margo/dev.go b/src/margo.sh/cmdpkg/margo/dev.go
new file mode 100644
index 00000000..c6be7812
--- /dev/null
+++ b/src/margo.sh/cmdpkg/margo/dev.go
@@ -0,0 +1,90 @@
+package margo
+
+import (
+ "fmt"
+ "github.com/urfave/cli"
+ "go/build"
+ "margo.sh/cmdpkg/margo/cmdrunner"
+)
+
+const (
+ devRemoteFork = "origin"
+ devRemoteUpstream = "margo"
+ devUpstreamURL = "https://margo.sh/"
+)
+
+var (
+ devCmd = cli.Command{
+ Name: "dev",
+ Description: "",
+ Subcommands: cli.Commands{
+ devCmdFork,
+ },
+ }
+
+ devCmdFork = cli.Command{
+ Name: "fork",
+ Description: "set remote `" + devRemoteFork + "` to your fork and `" + devRemoteUpstream + "` to the margo.sh repo",
+ Action: func(cx *cli.Context) error {
+ pkg, err := devCmdFindPkgDir()
+ if err != nil {
+ return err
+ }
+
+ args := cx.Args()
+ if len(args) != 1 {
+ return fmt.Errorf("Please specify the forked repo url")
+ }
+
+ cmds := []cmdrunner.Cmd{
+ cmdrunner.Cmd{
+ Name: "git",
+ Args: []string{"remote", "add", "-f", devRemoteUpstream, devUpstreamURL},
+ Dir: pkg.Dir,
+ OutToErr: true,
+ },
+ cmdrunner.Cmd{
+ Name: "git",
+ Args: []string{"remote", "set-url", "--push", devRemoteUpstream, "NoPushToMargoRepo"},
+ Dir: pkg.Dir,
+ OutToErr: true,
+ },
+ cmdrunner.Cmd{
+ Name: "git",
+ Args: []string{"remote", "set-url", devRemoteFork, args[0]},
+ Dir: pkg.Dir,
+ OutToErr: true,
+ },
+ }
+ for _, cmd := range cmds {
+ e := cmd.Run()
+ if err == nil && e != nil {
+ err = e
+ }
+ }
+ return err
+ },
+ }
+)
+
+func devCmdFindPkgDir() (*build.Package, error) {
+ pkg, err := build.Import("margo.sh", ".", build.FindOnly)
+ if err == nil {
+ return pkg, nil
+ }
+
+ err = cmdrunner.Cmd{
+ Name: "go",
+ Args: []string{"get", "-d", "-v", "margo.sh"},
+ OutToErr: true,
+ }.Run()
+ if err != nil {
+ return nil, fmt.Errorf("Cannot go get margo.sh: %s", err)
+ }
+
+ pkg, err = build.Import("margo.sh", ".", 0)
+ if err != nil {
+ return nil, fmt.Errorf("Cannot find pkg dir: %s", err)
+ }
+ return pkg, nil
+}
diff --git a/src/margo.sh/cmdpkg/margo/main.go b/src/margo.sh/cmdpkg/margo/main.go
new file mode 100644
index 00000000..6160024a
--- /dev/null
+++ b/src/margo.sh/cmdpkg/margo/main.go
@@ -0,0 +1,100 @@
+package margo
+
+import (
+ "flag"
+ "fmt"
+ "github.com/urfave/cli"
+ "margo.sh/mgcli"
+ "margo.sh/sublime"
+ "os"
+)
+
+var (
+ cmdList = []mgcli.Commands{
+ sublime.Commands,
+ }
+
+ cmdNames []string
+
+ cmdMap map[string]mgcli.Commands
+
+ buildCmd = cli.Command{
+ Name: "build",
+ Description: "build the specified agent (see COMMANDS)",
+ }
+
+ runCmd = cli.Command{
+ Name: "run",
+ Description: "run the specified agent (see COMMANDS)",
+ }
+
+ startCmd = cli.Command{
+ Name: "start",
+ Description: "`build` and `run` the specified agent (see COMMANDS)",
+ }
+)
+
+func init() {
+ cmdMap = map[string]mgcli.Commands{}
+ buildNames := []string{}
+ runNames := []string{}
+ for _, mc := range cmdList {
+ cmdNames = append(cmdNames, mc.Name)
+ cmdMap[mc.Name] = mc
+ if mc.Build != nil {
+ buildNames = append(buildNames, mc.Name)
+ appendSubCmd(&buildCmd, mc, *mc.Build)
+ }
+ if mc.Run != nil {
+ runNames = append(runNames, mc.Name)
+ appendSubCmd(&runCmd, mc, *mc.Run)
+ }
+ appendSubCmd(&startCmd, mc, cli.Command{
+ Action: startAction,
+ SkipFlagParsing: true,
+ SkipArgReorder: true,
+ })
+ }
+}
+
+func Main() {
+ app := mgcli.NewApp()
+ app.Commands = []cli.Command{
+ buildCmd,
+ runCmd,
+ startCmd,
+ devCmd,
+ ciCmd,
+ }
+ app.RunAndExitOnError()
+}
+
+func appendSubCmd(cmd *cli.Command, cmds mgcli.Commands, subCmd cli.Command) {
+ if subCmd.Name == "" {
+ subCmd.Name = cmds.Name
+ }
+ cmd.Subcommands = append(cmd.Subcommands, subCmd)
+}
+
+func startAction(cx *cli.Context) error {
+ mc := cmdMap[cx.Command.Name]
+ app := &mgcli.NewApp().App
+ app.Name = mc.Name
+ newCtx := func(args []string) *cli.Context {
+ flags := flag.NewFlagSet(mc.Name, 0)
+ flags.Usage = func() {}
+ flags.Parse(append([]string{mc.Name}, args...))
+ return cli.NewContext(app, flags, cx)
+ }
+ if mc.Build != nil {
+ err := mc.Build.Run(newCtx(nil))
+ if err != nil {
+ e := fmt.Sprintf("%s build failed: %s", mc.Name, err)
+ os.Setenv("MARGO_BUILD_ERROR", e)
+ }
+ }
+ if mc.Run != nil {
+ return mc.Run.Run(newCtx(cx.Args()))
+ }
+ return nil
+}
diff --git a/src/disposa.blue/margo/cmdpkg/margosublime/main-extension.go b/src/margo.sh/cmdpkg/margosublime/main-extension.go
similarity index 81%
rename from src/disposa.blue/margo/cmdpkg/margosublime/main-extension.go
rename to src/margo.sh/cmdpkg/margosublime/main-extension.go
index 178ebf83..7637d00a 100644
--- a/src/disposa.blue/margo/cmdpkg/margosublime/main-extension.go
+++ b/src/margo.sh/cmdpkg/margosublime/main-extension.go
@@ -9,5 +9,4 @@ import (
func init() {
margoExt = margo.Margo
- sublCfg = sublCfg.EnabledForLangs("*")
}
diff --git a/src/disposa.blue/margo/cmdpkg/margosublime/main.go b/src/margo.sh/cmdpkg/margosublime/main.go
similarity index 63%
rename from src/disposa.blue/margo/cmdpkg/margosublime/main.go
rename to src/margo.sh/cmdpkg/margosublime/main.go
index 4c4e13f6..8486e0be 100644
--- a/src/disposa.blue/margo/cmdpkg/margosublime/main.go
+++ b/src/margo.sh/cmdpkg/margosublime/main.go
@@ -1,26 +1,25 @@
package margosublime
import (
- "disposa.blue/margo/mg"
- "disposa.blue/margo/mgcli"
- "disposa.blue/margo/sublime"
"fmt"
"github.com/urfave/cli"
+ "margo.sh/mg"
+ "margo.sh/mgcli"
+ "margo.sh/sublime"
)
var (
- margoExt mg.MargoFunc = sublime.Margo
- sublCfg mg.EditorConfig = sublime.DefaultConfig
+ margoExt mg.MargoFunc = sublime.Margo
+ agentConfig = mg.AgentConfig{AgentName: sublime.AgentName}
)
func Main() {
- cfg := mg.AgentConfig{}
app := mgcli.NewApp()
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "codec",
- Value: cfg.Codec,
- Destination: &cfg.Codec,
+ Value: agentConfig.Codec,
+ Destination: &agentConfig.Codec,
Usage: fmt.Sprintf("The IPC codec: %s (default %s)", mg.CodecNamesStr, mg.DefaultCodec),
},
}
@@ -29,12 +28,12 @@ func Main() {
return cli.ShowAppHelp(ctx)
}
- ag, err := mg.NewAgent(cfg)
+ ag, err := mg.NewAgent(agentConfig)
if err != nil {
return mgcli.Error("agent creation failed:", err)
}
-
- ag.Store.EditorConfig(sublCfg)
+ mg.SetMemoryLimit(ag.Log, mg.DefaultMemoryLimit)
+ ag.Store.SetBaseConfig(sublime.DefaultConfig)
if margoExt != nil {
margoExt(ag.Args())
}
diff --git a/src/margo.sh/cmdpkg/margosublime/main_test.go b/src/margo.sh/cmdpkg/margosublime/main_test.go
new file mode 100644
index 00000000..c5e91a83
--- /dev/null
+++ b/src/margo.sh/cmdpkg/margosublime/main_test.go
@@ -0,0 +1,42 @@
+package margosublime
+
+import (
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ "margo.sh/sublime"
+ "testing"
+)
+
+func TestConfig(t *testing.T) {
+ ac := agentConfig
+ ac.Stdin = &mgutil.IOWrapper{}
+ ac.Stdout = &mgutil.IOWrapper{}
+ ac.Stderr = &mgutil.IOWrapper{}
+ ag, err := mg.NewAgent(ac)
+ if err != nil {
+ t.Fatalf("agent creation failed: %v", err)
+ }
+
+ ag.Store.SetBaseConfig(sublime.DefaultConfig)
+ mxc := make(chan *mg.Ctx)
+ ag.Store.Subscribe(func(mx *mg.Ctx) {
+ mxc <- mx
+ })
+ ag.Store.Dispatch(mg.Render)
+ go ag.Run()
+ mx := <-mxc
+
+ if _, ok := mx.Config.(sublime.Config); !ok {
+ t.Fatalf("mx.Config is %T, not %T", mx.Config, sublime.Config{})
+ }
+
+ ec := mx.Config.EditorConfig()
+ cv, ok := ec.(sublime.ConfigValues)
+ if !ok {
+ t.Fatalf("mx.Config.EditorConfig() is %T, not %T", ec, sublime.ConfigValues{})
+ }
+
+ if len(cv.EnabledForLangs) == 0 {
+ t.Fatal("EditorConfig().Values.EnabledForLangs in empty")
+ }
+}
diff --git a/src/margo.sh/extension-example/extension-example.go b/src/margo.sh/extension-example/extension-example.go
new file mode 100644
index 00000000..6f4369bc
--- /dev/null
+++ b/src/margo.sh/extension-example/extension-example.go
@@ -0,0 +1,241 @@
+package margo
+
+import (
+ "margo.sh/golang"
+ "margo.sh/mg"
+ "time"
+)
+
+// Margo is the entry-point to margo
+func Margo(m mg.Args) {
+ // See the documentation for `mg.Reducer`
+ // comments beginning with `gs:` denote features that replace old GoSublime settings
+
+ // add our reducers (margo plugins) to the store
+ // they are run in the specified order
+ // and should ideally not block for more than a couple milliseconds
+ m.Use(
+ // MOTD keeps you updated about new versions and important announcements
+ //
+ // It adds a new command `motd.sync` available via the UserCmd palette as `Sync MOTD (check for updates)`
+ //
+ // Interval can be set in order to enable automatic update fetching.
+ //
+ // When new updates are found, it displays the message in the status bar
+ // e.g. `★ margo.sh/cl/18.09.14 ★` a url where you see the upcoming changes before updating
+ //
+ // It sends the following data to the url https://api.margo.sh/motd.json:
+ // * current editor plugin name e.g. `?client=gosublime`
+ // this tells us which editor plugin's changelog to check
+ // * current editor plugin version e.g. `?tag=r18.09.14-1`
+ // this allows us to determine if there any updates
+ // * whether or not this is the first request of the day e.g. `?firstHit=1`
+ // this allows us to get an estimated count of active users without storing
+ // any personally identifiable data
+ //
+ // No other data is sent. For more info contact privacy at kuroku.io
+ //
+ &mg.MOTD{
+ // Interval, if set, specifies how often to automatically fetch messages from Endpoint
+ // Interval: 3600e9, // automatically fetch updates every hour
+ },
+
+ mg.NewReducer(func(mx *mg.Ctx) *mg.State {
+ // By default, events (e.g. ViewSaved) are triggered in all files.
+ // Replace `mg.AllLangs` with `mg.Go` to restrict events to Go(-lang) files.
+ // Please note, however, that this mode is not tested
+ // and saving a non-go file will not trigger linters, etc. for that go pkg
+ return mx.SetConfig(mx.Config.EnabledForLangs(
+ mg.AllLangs,
+ ))
+ }),
+
+ // Add `go` command integration
+ // this adds a new commands:
+ // gs: these commands are all callable through 9o:
+ // * go: Wrapper around the go command, adding linter support
+ // * go.play: Automatically build and run go commands or run go test for packages
+ // with support for linting and unsaved files
+ // * go.replay: Wrapper around go.play limited to a single instance
+ // by default this command is bound to ctrl+.,ctrl+r or cmd+.,cmd+r
+ //
+ // UserCmds are also added for `Go Play` and `Go RePlay`
+ &golang.GoCmd{
+ // Make the output of `go test -bench...` more readable.
+ // Humanize: true,
+ },
+
+ // add the day and time to the status bar
+ &DayTimeStatus{},
+
+ // both GoFmt and GoImports will automatically disable the GoSublime version
+ // you will need to install the `goimports` tool manually
+ // https://godoc.org/golang.org/x/tools/cmd/goimports
+ //
+ // gs: this replaces settings `fmt_enabled`, `fmt_tab_indent`, `fmt_tab_width`, `fmt_cmd`
+ //
+ // golang.GoFmt,
+ // or
+ // golang.GoImports,
+
+ // Configure general auto-completion behaviour
+ &golang.MarGocodeCtl{
+ // whether or not to include Test*, Benchmark* and Example* functions in the auto-completion list
+ // gs: this replaces the `autocomplete_tests` setting
+ ProposeTests: false,
+
+ // Don't try to automatically import packages when auto-compeltion fails
+ // e.g. when `json.` is typed, if auto-complete fails
+ // "encoding/json" is imported and auto-complete attempted on that package instead
+ // See AddUnimportedPackages
+ NoUnimportedPackages: false,
+
+ // If a package was imported internally for use in auto-completion,
+ // insert it in the source code
+ // See NoUnimportedPackages
+ // e.g. after `json.` is typed, `import "encoding/json"` added to the code
+ AddUnimportedPackages: false,
+
+ // Don't preload packages to speed up auto-completion, etc.
+ NoPreloading: false,
+
+ // Don't suggest builtin types and functions
+ // gs: this replaces the `autocomplete_builtins` setting
+ NoBuiltins: false,
+ },
+
+ // Enable auto-completion
+ // gs: this replaces the `gscomplete_enabled` setting
+ &golang.Gocode{
+ // show the function parameters. this can take up a lot of space
+ ShowFuncParams: true,
+ },
+
+ // show func arguments/calltips in the status bar
+ // gs: this replaces the `calltips` setting
+ &golang.GocodeCalltips{},
+
+ // use guru for goto-definition
+ // new commands `goto.definition` and `guru.definition` are defined
+ // gs: by default `goto.definition` is bound to ctrl+.,ctrl+g or cmd+.,cmd+g
+ &golang.Guru{},
+
+ // add some default context aware-ish snippets
+ // gs: this replaces the `autocomplete_snippets` and `default_snippets` settings
+ golang.Snippets,
+
+ // add our own snippets
+ // gs: this replaces the `snippets` setting
+ MySnippets,
+
+ // check the file for syntax errors
+ // gs: this and other linters e.g. below,
+ // replaces the settings `gslint_enabled`, `lint_filter`, `comp_lint_enabled`,
+ // `comp_lint_commands`, `gslint_timeout`, `lint_enabled`, `linters`
+ &golang.SyntaxCheck{},
+
+ // Add user commands for running tests and benchmarks
+ // gs: this adds support for the tests command palette `ctrl+.`,`ctrl+t` or `cmd+.`,`cmd+t`
+ &golang.TestCmds{
+ // additional args to add to the command when running tests and examples
+ TestArgs: []string{},
+
+ // additional args to add to the command when running benchmarks
+ BenchArgs: []string{"-benchmem"},
+ },
+
+ // GoGenerate adds a UserCmd that calls `go generate` in go packages and sub-dirs
+ &golang.GoGenerate{Args: []string{"-v", "-x"}},
+
+ // run `go install -i` on save
+ // golang.GoInstall("-i"),
+ // or
+ // golang.GoInstallDiscardBinaries("-i"),
+ //
+ // GoInstallDiscardBinaries will additionally set $GOBIN
+ // to a temp directory so binaries are not installed into your $GOPATH/bin
+ //
+ // the -i flag is used to install imported packages as well
+ // it's only supported in go1.10 or newer
+
+ // run `go vet` on save. go vet is ran automatically as part of `go test` in go1.10
+ // golang.GoVet(),
+
+ // run `go test -race` on save
+ // golang.GoTest("-race"),
+
+ // run `golint` on save
+ // &golang.Linter{Name: "golint", Label: "Go/Lint"},
+
+ // run gometalinter on save
+ // &golang.Linter{Name: "gometalinter", Args: []string{
+ // "--disable=gas",
+ // "--fast",
+ // }},
+
+ // AsmFmt is a reducer that does code fmt'ing for `.s` files.
+ // It uses the package https://github.com/klauspost/asmfmt
+ // &golang.AsmFmt{},
+
+ // Prettier is a reducer that does code fmt'ing using https://github.com/prettier/prettier
+ // It fmt's CSS, HTML, JS, JSON, JSX, SVG, TS, TSX and XML files.
+ //
+ // NOTE: as a special-case, files with extensions starting with `.sublime-` are ignored.
+ // NOTE: you will need to install prettier separately
+ //
+ // You will need to `import "margo.sh/web"`
+ // &web.Prettier{
+ // Langs: web.PrettierDefaultLangs,
+ // },
+
+ // PackageScripts adds UserCmd entries for each script defined in package.json
+ //
+ // You will need to `import "margo.sh/web/nodejs"`
+ // &nodejs.PackageScripts{},
+ )
+}
+
+// DayTimeStatus adds the current day and time to the status bar
+type DayTimeStatus struct {
+ mg.ReducerType
+}
+
+func (dts DayTimeStatus) RMount(mx *mg.Ctx) {
+ // kick off the ticker when we start
+ dispatch := mx.Store.Dispatch
+ go func() {
+ ticker := time.NewTicker(1 * time.Second)
+ for range ticker.C {
+ dispatch(mg.Render)
+ }
+ }()
+}
+
+func (dts DayTimeStatus) Reduce(mx *mg.Ctx) *mg.State {
+ // we always want to render the time
+ // otherwise it will sometimes disappear from the status bar
+ now := time.Now()
+ format := "Mon, 15:04"
+ if now.Second()%2 == 0 {
+ format = "Mon, 15 04"
+ }
+ return mx.AddStatus(now.Format(format))
+}
+
+// MySnippets is a slice of functions returning our own snippets
+var MySnippets = golang.SnippetFuncs(
+ func(cx *golang.CompletionCtx) []mg.Completion {
+ // if we're not in a block (i.e. function), do nothing
+ if !cx.Scope.Is(golang.BlockScope) {
+ return nil
+ }
+
+ return []mg.Completion{
+ {
+ Query: "if err",
+ Title: "err != nil { return }",
+ Src: "if ${1:err} != nil {\n\treturn $0\n}",
+ },
+ }
+ },
+)
diff --git a/src/disposa.blue/margo/extension-example/extension_test.go b/src/margo.sh/extension-example/extension_test.go
similarity index 68%
rename from src/disposa.blue/margo/extension-example/extension_test.go
rename to src/margo.sh/extension-example/extension_test.go
index 7e772037..837a3131 100644
--- a/src/disposa.blue/margo/extension-example/extension_test.go
+++ b/src/margo.sh/extension-example/extension_test.go
@@ -1,7 +1,7 @@
package margo
import (
- "disposa.blue/margo/mg"
+ "margo.sh/mg"
)
var _ mg.MargoFunc = Margo
diff --git a/src/margo.sh/format/format.go b/src/margo.sh/format/format.go
new file mode 100644
index 00000000..a4e9ce56
--- /dev/null
+++ b/src/margo.sh/format/format.go
@@ -0,0 +1,101 @@
+package format
+
+import (
+ "bytes"
+ "fmt"
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ "os/exec"
+)
+
+// FmtFunc is a reducer for generic fmt functions
+//
+// it takes care of reading the view src and properly reporting any errors to the editor
+type FmtFunc struct {
+ // Fmt receives a copy of the view src and returns the fmt'ed src.
+ //
+ // Fmt should ideally fail in the face of any uncertainty
+ // e.g. if running a command to do the formatting and it prints anything to stderr;
+ // it should return an error because commands do not reliably return an error status.
+ Fmt func(mx *mg.Ctx, src []byte) ([]byte, error)
+
+ // Langs is the list of languages in which the reducer should run
+ Langs []mg.Lang
+
+ // Actions is a list of actions on which the reducer is allowed to run.
+ // The reducer always runs on the ViewFmt action, even if this list is empty.
+ Actions []mg.Action
+}
+
+// Reduce implements the FmtFunc reducer.
+func (ff FmtFunc) Reduce(mx *mg.Ctx) *mg.State {
+ if !mx.ActionIs(ff.Actions...) || !mx.LangIs(ff.Langs...) {
+ return mx.State
+ }
+
+ fn := mx.View.Filename()
+ src, err := mx.View.ReadAll()
+ if err != nil {
+ return mx.AddErrorf("failed to read %s: %s\n", fn, err)
+ }
+ if len(src) == 0 {
+ return mx.State
+ }
+
+ src, err = ff.Fmt(mx, src)
+ if err != nil {
+ return mx.AddErrorf("failed to fmt %s: %s\n", fn, err)
+ }
+ return mx.SetViewSrc(src)
+}
+
+// FmtCmd is wrapper around FmtFunc for generic fmt commands.
+//
+// The view src is passed to the command's stdin.
+// It takes care of handling command failure e.g. output on stderr or no output on stdout.
+type FmtCmd struct {
+ // Name is the command name or path
+ Name string
+
+ // Args is a list of args to pass to the command.
+ Args []string
+
+ // Env is a map of additional env vars to pass to the command.
+ Env mg.EnvMap
+
+ // Langs is the list of languages in which the reducer should run
+ Langs []mg.Lang
+
+ // Actions is a list of actions on which the reducer is allowed to run.
+ // The reducer always runs on the ViewFmt action, even if this list is empty.
+ Actions []mg.Action
+}
+
+// Reduce implements the FmtCmd reducer.
+func (fc FmtCmd) Reduce(mx *mg.Ctx) *mg.State {
+ return FmtFunc{Fmt: fc.fmt, Langs: fc.Langs, Actions: fc.Actions}.Reduce(mx)
+}
+
+func (fc FmtCmd) fmt(mx *mg.Ctx, src []byte) ([]byte, error) {
+ stdin := bytes.NewReader(src)
+ stdout := bytes.NewBuffer(nil)
+ stderr := bytes.NewBuffer(nil)
+ cmd := exec.Command(fc.Name, fc.Args...)
+ cmd.Env = mx.Env.Merge(fc.Env).Environ()
+ cmd.Stdin = stdin
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ if err := cmd.Run(); err != nil {
+ if stderr.Len() != 0 {
+ return nil, fmt.Errorf("`%s`: %s\nStderr: %s", mgutil.QuoteCmd(fc.Name, fc.Args...), err, stderr.Bytes())
+ }
+ return nil, fmt.Errorf("`%s`: %s", mgutil.QuoteCmd(fc.Name, fc.Args...), err)
+ }
+ if stderr.Len() != 0 {
+ return nil, fmt.Errorf("fmt completed successfully, but has output on stderr: %s", stderr.Bytes())
+ }
+ if stdout.Len() == 0 {
+ return nil, fmt.Errorf("fmt completed successfully, but has no output on stdout")
+ }
+ return stdout.Bytes(), nil
+}
diff --git a/src/margo.sh/golang/asmfmt.go b/src/margo.sh/golang/asmfmt.go
new file mode 100644
index 00000000..cb07c951
--- /dev/null
+++ b/src/margo.sh/golang/asmfmt.go
@@ -0,0 +1,25 @@
+package golang
+
+import (
+ "bytes"
+ "github.com/klauspost/asmfmt"
+ "margo.sh/format"
+ "margo.sh/mg"
+)
+
+// AsmFmt is a reducer that does code fmt'ing for `.s` files.
+// It uses the package https://github.com/klauspost/asmfmt
+type AsmFmt struct{ mg.ReducerType }
+
+func (AsmFmt) Reduce(mx *mg.Ctx) *mg.State {
+ if mx.View.Ext != ".s" {
+ return mx.State
+ }
+ return format.FmtFunc{
+ Langs: nil, // we only want to check the extension
+ Actions: commonFmtActions,
+ Fmt: func(mx *mg.Ctx, src []byte) ([]byte, error) {
+ return asmfmt.Format(bytes.NewReader(src))
+ },
+ }.Reduce(mx)
+}
diff --git a/src/margo.sh/golang/common.go b/src/margo.sh/golang/common.go
new file mode 100644
index 00000000..24fcb513
--- /dev/null
+++ b/src/margo.sh/golang/common.go
@@ -0,0 +1,44 @@
+package golang
+
+import (
+ "go/ast"
+ "go/build"
+ "go/token"
+ "margo.sh/golang/goutil"
+ "margo.sh/mg"
+ "regexp"
+)
+
+func init() {
+ pats := []*regexp.Regexp{
+ regexp.MustCompile(`^\s*(?P.+?\.\w+):(?P\d+:)(?P\d+:?)?(?:(?Pwarning|error)[:])?(?P.+?)(?: [(](?P[-\w]+)[)])?$`),
+ regexp.MustCompile(`(?Pcan't load package: package .+: found packages .+ \((?P.+?\.go)\).+)`),
+ }
+ for _, lang := range goutil.Langs {
+ mg.AddCommonPatterns(lang, pats...)
+ }
+}
+
+// BuildContext is an alias of goutil.BuildContext
+func BuildContext(mx *mg.Ctx) *build.Context { return goutil.BuildContext(mx) }
+
+// PathList is an alias of goutil.PathList
+func PathList(p string) []string { return goutil.PathList(p) }
+
+// NodeEnclosesPos is an alias of goutil.NodeEnclosesPos
+func NodeEnclosesPos(node ast.Node, pos token.Pos) bool { return goutil.NodeEnclosesPos(node, pos) }
+
+// PosEnd is an alias of goutil.PosEnd
+type PosEnd = goutil.PosEnd
+
+// IsLetter is an alias of goutil.IsLetter
+func IsLetter(ch rune) bool { return goutil.IsLetter(ch) }
+
+// IsPkgDir is an alias of goutil.IsPkgDir
+func IsPkgDir(dir string) bool { return goutil.IsPkgDir(dir) }
+
+// DedentCompletion is an alias of goutil.DedentCompletion
+func DedentCompletion(s string) string { return goutil.DedentCompletion(s) }
+
+// Dedent is an alias of goutil.Dedent
+func Dedent(s string) string { return goutil.Dedent(s) }
diff --git a/src/margo.sh/golang/cursor.go b/src/margo.sh/golang/cursor.go
new file mode 100644
index 00000000..ef714f5d
--- /dev/null
+++ b/src/margo.sh/golang/cursor.go
@@ -0,0 +1,46 @@
+package golang
+
+import (
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+const (
+ AssignmentScope = cursor.AssignmentScope
+ BlockScope = cursor.BlockScope
+ CommentScope = cursor.CommentScope
+ ConstScope = cursor.ConstScope
+ DeferScope = cursor.DeferScope
+ DocScope = cursor.DocScope
+ ExprScope = cursor.ExprScope
+ FileScope = cursor.FileScope
+ FuncDeclScope = cursor.FuncDeclScope
+ IdentScope = cursor.IdentScope
+ ImportPathScope = cursor.ImportPathScope
+ ImportScope = cursor.ImportScope
+ PackageScope = cursor.PackageScope
+ ReturnScope = cursor.ReturnScope
+ SelectorScope = cursor.SelectorScope
+ StringScope = cursor.StringScope
+ TypeDeclScope = cursor.TypeDeclScope
+ VarScope = cursor.VarScope
+)
+
+type CursorScope = cursor.CurScope
+type CompletionScope = CursorScope
+
+type DocNode = cursor.DocNode
+
+type CompletionCtx = CursorCtx
+type CursorCtx = cursor.CurCtx
+
+// NewCompletionCtx is an alias of cursor.NewCurCtx
+func NewCompletionCtx(mx *mg.Ctx, src []byte, pos int) *CompletionCtx {
+ return cursor.NewCurCtx(mx, src, pos)
+}
+
+// NewViewCursorCtx is an alias of cursor.NewViewCurCtx
+func NewViewCursorCtx(mx *mg.Ctx) *CursorCtx { return cursor.NewViewCurCtx(mx) }
+
+// NewCursorCtx is an alias of cursor.NewCurCtx
+func NewCursorCtx(mx *mg.Ctx, src []byte, pos int) *CursorCtx { return cursor.NewCurCtx(mx, src, pos) }
diff --git a/src/margo.sh/golang/cursor/curctx.go b/src/margo.sh/golang/cursor/curctx.go
new file mode 100644
index 00000000..d88da637
--- /dev/null
+++ b/src/margo.sh/golang/cursor/curctx.go
@@ -0,0 +1,459 @@
+package cursor
+
+import (
+ "bytes"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "margo.sh/golang/goutil"
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ yotsuba "margo.sh/why_would_you_make_yotsuba_cry"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+var (
+ _ ast.Node = (*DocNode)(nil)
+)
+
+type DocNode struct {
+ Node ast.Node
+ ast.CommentGroup
+}
+
+type CurCtx struct {
+ Ctx *mg.Ctx
+ View *mg.View
+ Scope CurScope
+ PkgName string
+ IsTestFile bool
+ Line []byte
+ Src []byte
+ Pos int
+ TokenPos token.Pos
+ AstFile *ast.File
+ TokenFile *token.File
+ Doc *DocNode
+
+ GenDecl *ast.GenDecl
+ ImportSpec *ast.ImportSpec
+ Comment *ast.Comment
+ BlockStmt *ast.BlockStmt
+ CallExpr *ast.CallExpr
+ BasicLit *ast.BasicLit
+ Nodes []ast.Node
+ Node ast.Node
+
+ printer struct {
+ *sync.Mutex
+ printer.Config
+ fset *token.FileSet
+ buf *bytes.Buffer
+ }
+}
+
+func NewViewCurCtx(mx *mg.Ctx) *CurCtx {
+ type Key struct{ *mg.View }
+ k := Key{mx.View}
+ if cx := cachedCx(mx, k); cx != nil {
+ return cx
+ }
+
+ src, pos := k.SrcPos()
+ cx := NewCurCtx(mx, src, pos)
+ mx.Put(k, cx)
+ return cx
+}
+
+func NewCurCtx(mx *mg.Ctx, src []byte, pos int) *CurCtx {
+ type Key struct {
+ hash string
+ pos int
+ }
+ key := Key{mg.SrcHash(src), pos}
+ if cx := cachedCx(mx, key); cx != nil {
+ return cx
+ }
+
+ cx := newCurCtx(mx, src, pos)
+ mx.Put(key, cx)
+ return cx
+}
+
+func cachedCx(mx *mg.Ctx, k interface{}) *CurCtx {
+ cx, _ := mx.Get(k).(*CurCtx)
+ if cx == nil {
+ return nil
+ }
+ // make sure not to re-use old State and other fields of Ctx that might've changed
+ x := *cx
+ x.Ctx = mx
+ return &x
+}
+
+func fixSrcPos(mx *mg.Ctx, src []byte, pos int) ([]byte, int) {
+ pos = mgutil.ClampPos(src, pos)
+ if len(src) == 0 {
+ return src, pos
+ }
+
+ if src[pos] == '\n' && src[pos-1] == '.' {
+ p := make([]byte, len(src)+1)
+ copy(p, src[:pos])
+ p[pos] = ';'
+ copy(p[pos+1:], src[pos:])
+ src = p
+ }
+
+ return src, pos
+}
+
+func newCurCtx(mx *mg.Ctx, src []byte, pos int) *CurCtx {
+ defer mx.Profile.Push("NewCurCtx").Pop()
+
+ src, pos = fixSrcPos(mx, src, pos)
+
+ // if we're at the end of the line, move the cursor onto the last thing on the line
+ space := func(r rune) bool { return r == ' ' || r == '\t' }
+ if i := mgutil.RepositionRight(src, pos, space); i < len(src) && src[i] == '\n' {
+ pos = mgutil.RepositionLeft(src, pos, space)
+ if j := pos - 1; j >= 0 && src[j] != '\n' && src[j] != '}' {
+ pos = j
+ }
+ }
+
+ ll := mgutil.RepositionLeft(src, pos, func(r rune) bool { return r != '\n' })
+ lr := mgutil.RepositionRight(src, pos, func(r rune) bool { return r != '\n' })
+ cx := &CurCtx{
+ Ctx: mx,
+ View: mx.View,
+ Line: bytes.TrimSpace(src[ll:lr]),
+ Src: src,
+ Pos: pos,
+ }
+ cx.printer.Mutex = &sync.Mutex{}
+ cx.printer.fset = token.NewFileSet()
+ cx.printer.buf = &bytes.Buffer{}
+ cx.init(mx)
+
+ af := cx.AstFile
+ if af == nil {
+ af = goutil.NilAstFile
+ }
+ cx.PkgName = af.Name.String()
+
+ cx.IsTestFile = strings.HasSuffix(mx.View.Filename(), "_test.go") ||
+ strings.HasSuffix(cx.PkgName, "_test")
+
+ if cx.Comment != nil {
+ cx.Scope |= CommentScope
+ }
+ if cx.Doc != nil {
+ cx.Scope |= DocScope
+ cx.Scope |= CommentScope
+ }
+
+ if cx.PkgName == goutil.NilPkgName || cx.PkgName == "" {
+ cx.PkgName = goutil.NilPkgName
+ cx.Scope |= PackageScope
+ return cx
+ }
+
+ switch x := cx.Node.(type) {
+ case nil:
+ cx.Scope |= PackageScope
+ case *ast.File:
+ cx.Scope |= FileScope
+ case *ast.BlockStmt:
+ cx.Scope |= BlockScope
+ case *ast.CaseClause:
+ if goutil.NodeEnclosesPos(goutil.PosEnd{P: x.Colon, E: x.End()}, cx.TokenPos) {
+ cx.Scope |= BlockScope
+ }
+ case *ast.Ident:
+ cx.Scope |= IdentScope
+ }
+
+ cx.Each(func(n ast.Node) {
+ switch n.(type) {
+ case *ast.AssignStmt:
+ cx.Scope |= AssignmentScope
+ case *ast.SelectorExpr:
+ cx.Scope |= SelectorScope
+ case *ast.ReturnStmt:
+ cx.Scope |= ReturnScope
+ case *ast.DeferStmt:
+ cx.Scope |= DeferScope
+ }
+ })
+
+ if gd := cx.GenDecl; gd != nil {
+ switch gd.Tok {
+ case token.IMPORT:
+ cx.Scope |= ImportScope
+ case token.CONST:
+ cx.Scope |= ConstScope
+ case token.VAR:
+ cx.Scope |= VarScope
+ }
+ }
+
+ if lit := cx.BasicLit; lit != nil && lit.Kind == token.STRING {
+ cx.Scope |= StringScope
+ if cx.ImportSpec != nil {
+ cx.Scope |= ImportPathScope
+ }
+ }
+
+ // we want to allow `kw`, `kw name`, `kw (\n|\n)`
+ punct := func(r rune) bool { return r != ' ' && r != '\t' && !goutil.IsLetter(r) }
+ if cx.Scope == 0 && bytes.IndexFunc(cx.Line, punct) < 0 {
+ switch x := cx.Node.(type) {
+ case *ast.FuncType:
+ cx.Scope |= FuncDeclScope
+ case *ast.GenDecl:
+ if x.Tok == token.TYPE {
+ cx.Scope |= TypeDeclScope
+ }
+ }
+ }
+
+ exprOk := cx.Scope.Is(
+ AssignmentScope,
+ BlockScope,
+ ConstScope,
+ DeferScope,
+ ReturnScope,
+ VarScope,
+ ) && !cx.Scope.Is(
+ SelectorScope,
+ StringScope,
+ CommentScope,
+ )
+ if x := (*ast.TypeAssertExpr)(nil); exprOk && cx.Set(&x) {
+ exprOk = false
+ }
+ if asn := (*ast.AssignStmt)(nil); exprOk && cx.Set(&asn) {
+ exprOk = pos >= cx.TokenFile.Offset(asn.TokPos)+len(asn.Tok.String())
+ }
+ if exprOk {
+ cx.Scope |= ExprScope
+ }
+
+ return cx
+}
+
+// FuncDeclName returns the name of the FuncDecl iff the cursor is on a func declariton's name.
+// isMethod is true if the declaration is a method.
+func (cx *CurCtx) FuncDeclName() (name string, isMethod bool) {
+ var fd *ast.FuncDecl
+ if !cx.Set(&fd) {
+ return "", false
+ }
+ if fd.Name == nil || !goutil.NodeEnclosesPos(fd.Name, cx.TokenPos) {
+ return "", false
+ }
+ return fd.Name.Name, fd.Recv != nil
+}
+
+// FuncName returns the name of function iff the cursor is on a func declariton's name
+func (cx *CurCtx) FuncName() string {
+ if nm, isMeth := cx.FuncDeclName(); !isMeth {
+ return nm
+ }
+ return ""
+}
+
+// FuncName returns the name of function iff the cursor is on a method declariton's name
+func (cx *CurCtx) MethodName() string {
+ if nm, isMeth := cx.FuncDeclName(); isMeth {
+ return nm
+ }
+ return ""
+}
+
+func (cx *CurCtx) Set(destPtr interface{}) bool {
+ v := reflect.ValueOf(destPtr).Elem()
+ if !v.CanSet() {
+ return false
+ }
+ for i := len(cx.Nodes) - 1; i >= 0; i-- {
+ x := reflect.ValueOf(cx.Nodes[i])
+ if x.Type() == v.Type() {
+ v.Set(x)
+ return true
+ }
+ }
+ return false
+}
+
+func (cx *CurCtx) Each(f func(ast.Node)) {
+ for i := len(cx.Nodes) - 1; i >= 0; i-- {
+ f(cx.Nodes[i])
+ }
+}
+
+func (cx *CurCtx) Some(f func(ast.Node) bool) bool {
+ for i := len(cx.Nodes) - 1; i >= 0; i-- {
+ if f(cx.Nodes[i]) {
+ return true
+ }
+ }
+ return false
+}
+
+func (cx *CurCtx) Contains(typ ast.Node) bool {
+ t := reflect.TypeOf(typ)
+ return cx.Some(func(n ast.Node) bool {
+ return reflect.TypeOf(n) == t
+ })
+}
+
+func (cx *CurCtx) ImportsMatch(match func(importPath string) bool) bool {
+ for _, spec := range cx.AstFile.Imports {
+ p := spec.Path.Value
+ if len(p) < 3 {
+ continue
+ }
+ if c := p[0]; c == '"' || c == '`' {
+ p = p[1:]
+ }
+ if c := p[len(p)-1]; c == '"' || c == '`' {
+ p = p[:len(p)-1]
+ }
+ if match(p) {
+ return true
+ }
+ }
+ return false
+}
+
+func (cx *CurCtx) Print(x ast.Node) (string, error) {
+ p := &cx.printer
+ p.Lock()
+ defer p.Unlock()
+
+ p.buf.Reset()
+ err := p.Fprint(p.buf, p.fset, x)
+ return p.buf.String(), err
+}
+
+func (cx *CurCtx) append(n ast.Node) {
+ // ignore bad nodes, they usually just make scope detection fail with no obvious benefit
+ switch n.(type) {
+ case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
+ return
+ }
+
+ for _, x := range cx.Nodes {
+ if n == x {
+ return
+ }
+ }
+ cx.Nodes = append(cx.Nodes, n)
+}
+
+func (cx *CurCtx) init(mx *mg.Ctx) {
+ defer mx.Profile.Push("CurCtx.init").Pop()
+
+ src, pos := cx.Src, cx.Pos
+ astFileIsValid := func(af *ast.File) bool {
+ return af.Package.IsValid() &&
+ af.Name != nil &&
+ af.Name.End().IsValid() &&
+ af.Name.Name != ""
+ }
+ srcHasComments := func() bool {
+ return bytes.Contains(src, []byte("//")) || bytes.Contains(src, []byte("/*"))
+ }
+
+ pf := goutil.ParseFile(mx, "", src)
+ if !astFileIsValid(pf.AstFile) && srcHasComments() {
+ // we don't want any declaration errors esp. about the package name `_`
+ // we don't parse with this mode by default to increase the chance of caching
+ s := append(src[:len(src):len(src)], goutil.NilPkgSrc...)
+ pf = goutil.ParseFileWithMode(mx, "", s, parser.ParseComments)
+ }
+
+ af := pf.AstFile
+ cx.AstFile = af
+ cx.TokenFile = pf.TokenFile
+ cx.TokenPos = token.Pos(pf.TokenFile.Base() + pos)
+
+ cx.initDocNode(af)
+ if astFileIsValid(af) && cx.TokenPos > af.Name.End() {
+ cx.append(af)
+ ast.Inspect(af, func(n ast.Node) bool {
+ if goutil.NodeEnclosesPos(n, cx.TokenPos) {
+ cx.append(n)
+ }
+ cx.initDocNode(n)
+ return true
+ })
+ }
+
+ for _, cg := range af.Comments {
+ for _, c := range cg.List {
+ if goutil.NodeEnclosesPos(c, cx.TokenPos) {
+ cx.append(c)
+ }
+ }
+ }
+
+ if len(cx.Nodes) == 0 {
+ return
+ }
+ cx.Node = cx.Nodes[len(cx.Nodes)-1]
+ cx.Each(func(n ast.Node) {
+ switch x := n.(type) {
+ case *ast.GenDecl:
+ cx.GenDecl = x
+ case *ast.BlockStmt:
+ cx.BlockStmt = x
+ case *ast.BasicLit:
+ cx.BasicLit = x
+ case *ast.CallExpr:
+ cx.CallExpr = x
+ case *ast.Comment:
+ cx.Comment = x
+ case *ast.ImportSpec:
+ cx.ImportSpec = x
+ }
+ })
+}
+
+func (cx *CurCtx) initDocNode(n ast.Node) {
+ if cx.Doc != nil || yotsuba.IsNil(n) {
+ return
+ }
+
+ setCg := func(cg *ast.CommentGroup) {
+ if cx.Doc != nil || cg == nil || !goutil.NodeEnclosesPos(cg, cx.TokenPos) {
+ return
+ }
+ cx.Doc = &DocNode{
+ Node: n,
+ CommentGroup: *cg,
+ }
+ }
+
+ switch x := n.(type) {
+ case *ast.File:
+ setCg(x.Doc)
+ case *ast.Field:
+ setCg(x.Doc)
+ case *ast.GenDecl:
+ setCg(x.Doc)
+ case *ast.TypeSpec:
+ setCg(x.Doc)
+ case *ast.FuncDecl:
+ setCg(x.Doc)
+ case *ast.ValueSpec:
+ setCg(x.Doc)
+ case *ast.ImportSpec:
+ setCg(x.Doc)
+ }
+}
diff --git a/src/margo.sh/golang/cursor/curscope.go b/src/margo.sh/golang/cursor/curscope.go
new file mode 100644
index 00000000..20500737
--- /dev/null
+++ b/src/margo.sh/golang/cursor/curscope.go
@@ -0,0 +1,77 @@
+package cursor
+
+import (
+ "sort"
+ "strings"
+)
+
+const (
+ curScopesStart CurScope = 1 << iota
+ AssignmentScope
+ BlockScope
+ CommentScope
+ ConstScope
+ DeferScope
+ DocScope
+ ExprScope
+ FileScope
+ FuncDeclScope
+ IdentScope
+ ImportPathScope
+ ImportScope
+ PackageScope
+ ReturnScope
+ SelectorScope
+ StringScope
+ TypeDeclScope
+ VarScope
+ curScopesEnd
+)
+
+var (
+ scopeNames = map[CurScope]string{
+ AssignmentScope: "AssignmentScope",
+ BlockScope: "BlockScope",
+ CommentScope: "CommentScope",
+ ConstScope: "ConstScope",
+ DeferScope: "DeferScope",
+ DocScope: "DocScope",
+ ExprScope: "ExprScope",
+ FileScope: "FileScope",
+ FuncDeclScope: "FuncDeclScope",
+ IdentScope: "IdentScope",
+ ImportPathScope: "ImportPathScope",
+ ImportScope: "ImportScope",
+ PackageScope: "PackageScope",
+ ReturnScope: "ReturnScope",
+ SelectorScope: "SelectorScope",
+ StringScope: "StringScope",
+ TypeDeclScope: "TypeDeclScope",
+ VarScope: "VarScope",
+ }
+)
+
+type CurScope uint64
+
+func (cs CurScope) String() string {
+ if cs <= curScopesStart || cs >= curScopesEnd {
+ return "UnknownCursorScope"
+ }
+ l := []string{}
+ for scope, name := range scopeNames {
+ if cs.Is(scope) {
+ l = append(l, name)
+ }
+ }
+ sort.Strings(l)
+ return strings.Join(l, "|")
+}
+
+func (cs CurScope) Is(scopes ...CurScope) bool {
+ for _, s := range scopes {
+ if s&cs != 0 {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/margo.sh/golang/cursor/cursor_test.go b/src/margo.sh/golang/cursor/cursor_test.go
new file mode 100644
index 00000000..e71e3268
--- /dev/null
+++ b/src/margo.sh/golang/cursor/cursor_test.go
@@ -0,0 +1,16 @@
+package cursor
+
+import (
+ "testing"
+)
+
+func TestCurScopeStringer(t *testing.T) {
+ if cs := CurScope(0); cs.String() == "" {
+ t.Errorf("%#v doesn't have a String() value", cs)
+ }
+ for cs := curScopesStart; cs <= curScopesEnd; cs <<= 1 {
+ if cs.String() == "" {
+ t.Errorf("%#v doesn't have a String() value", cs)
+ }
+ }
+}
diff --git a/src/margo.sh/golang/gocmd.go b/src/margo.sh/golang/gocmd.go
new file mode 100644
index 00000000..9714df72
--- /dev/null
+++ b/src/margo.sh/golang/gocmd.go
@@ -0,0 +1,391 @@
+package golang
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/dustin/go-humanize"
+ "go/ast"
+ "go/build"
+ "io/ioutil"
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type GoCmd struct {
+ mg.ReducerType
+
+ Humanize bool
+}
+
+func (gc *GoCmd) Reduce(mx *mg.Ctx) *mg.State {
+ switch act := mx.Action.(type) {
+ case mg.QueryUserCmds:
+ return gc.userCmds(mx)
+ case mg.RunCmd:
+ return gc.runCmd(mx, act)
+ }
+ return mx.State
+}
+
+func (gc *GoCmd) userCmds(mx *mg.Ctx) *mg.State {
+ return mx.AddUserCmds(
+ mg.UserCmd{
+ Name: "go.play",
+ Title: "Go Play",
+ },
+ mg.UserCmd{
+ Name: "go.replay",
+ Title: "Go RePlay (single instance)",
+ },
+ )
+}
+
+func (gc *GoCmd) runCmd(mx *mg.Ctx, rc mg.RunCmd) *mg.State {
+ return mx.State.AddBuiltinCmds(
+ mg.BuiltinCmd{
+ Run: gc.goBuiltin,
+ Name: "go",
+ Desc: "Wrapper around the go command, adding linter support",
+ },
+ mg.BuiltinCmd{
+ Run: gc.playBuiltin,
+ Name: "go.play",
+ Desc: "Automatically build and run go commands or run go test for packages with support for linting and unsaved files",
+ },
+ mg.BuiltinCmd{
+ Run: gc.replayBuiltin,
+ Name: "go.replay",
+ Desc: "Wrapper around go.play limited to a single instance",
+ },
+ )
+}
+
+func (gc *GoCmd) goBuiltin(bx *mg.CmdCtx) *mg.State {
+ go gc.goTool(bx)
+ return bx.State
+}
+
+func (gc *GoCmd) playBuiltin(bx *mg.CmdCtx) *mg.State {
+ go gc.playTool(bx, "")
+ return bx.State
+}
+
+func (gc *GoCmd) replayBuiltin(bx *mg.CmdCtx) *mg.State {
+ v := bx.View
+ cid := ""
+ if v.Path == "" {
+ cid = v.Name
+ } else {
+ cid = v.Dir()
+ }
+ go gc.playTool(bx, "go.replay`"+cid+"`")
+ return bx.State
+}
+
+func (gc *GoCmd) goTool(bx *mg.CmdCtx) {
+ gx := newGoCmdCtx(gc, bx, "go.builtin", "", "", "", bx.View, len(bx.Args) > 0 && bx.Args[0] == "test")
+ defer gx.Output.Close()
+ gx.run(gx.View)
+}
+
+func (gc *GoCmd) playTool(bx *mg.CmdCtx, cancelID string) {
+ bld := BuildContext(bx.Ctx)
+ pkg, err := bld.ImportDir(bx.View.Dir(), 0)
+ if err != nil {
+ fmt.Fprintln(bx.Output, "Error: cannot import package:", err)
+ }
+
+ testMode := !pkg.IsCommand() ||
+ strings.HasSuffix(bx.View.Filename(), "_test.go")
+
+ origView := bx.View
+ bx, tDir, tFn, err := gc.playTempDir(bx)
+ if err != nil {
+ fmt.Fprintf(bx.Output, "Error: %s\n", err)
+ }
+ defer os.RemoveAll(tDir)
+ if tDir == "" {
+ return
+ }
+ gx := newGoCmdCtx(gc, bx, "go.play", cancelID, tDir, tFn, origView, testMode)
+ defer gx.Output.Close()
+
+ gx.Verbose = true
+
+ switch {
+ case testMode:
+ gc.playToolTest(gx, bld, origView)
+ default:
+ gc.playToolRun(gx, bld, origView)
+ }
+}
+
+func (gc *GoCmd) playTempDir(bx *mg.CmdCtx) (newBx *mg.CmdCtx, tDir string, tFn string, err error) {
+ tDir, err = mg.MkTempDir("go.play")
+ if err != nil {
+ return bx, "", "", fmt.Errorf("cannot MkTempDir: %s", err)
+ }
+
+ if !bx.LangIs(mg.Go) {
+ return bx, tDir, "", nil
+ }
+
+ v := bx.View
+ if v.Path != "" {
+ return bx, tDir, tFn, nil
+ }
+
+ tFn = filepath.Join(tDir, v.Name)
+ src, err := v.ReadAll()
+ if err == nil {
+ err = ioutil.WriteFile(tFn, src, 0600)
+ }
+ if err != nil {
+ return bx, tDir, "", fmt.Errorf("cannot create temp file: %s", err)
+ }
+
+ bx = bx.Copy(func(bx *mg.CmdCtx) {
+ bx.Ctx = bx.Ctx.Copy(func(mx *mg.Ctx) {
+ mx.State = mx.State.Copy(func(st *mg.State) {
+ st.View = st.View.Copy(func(v *mg.View) {
+ v.Path = tFn
+ })
+ })
+ })
+ })
+
+ return bx, tDir, tFn, nil
+}
+
+func (gc *GoCmd) playToolTest(gx *goCmdCtx, bld *build.Context, origView *mg.View) {
+ argsPfx := []string{"test", "-test.run=."}
+ cx := cursor.NewViewCurCtx(gx.Ctx)
+ for _, n := range cx.Nodes {
+ x, ok := n.(*ast.FuncDecl)
+ if !ok || x.Name == nil {
+ continue
+ }
+ nm := x.Name.String()
+ if strings.HasPrefix(nm, "Benchmark") {
+ argsPfx = append(argsPfx, "-test.bench=^"+nm+"$")
+ }
+ }
+ gx.Args = append(argsPfx, gx.Args...)
+ if origView.Path == "" {
+ gx.Args = append(gx.Args, gx.tFn)
+ }
+ gx.run(origView)
+}
+
+func (gc *GoCmd) playToolRun(gx *goCmdCtx, bld *build.Context, origView *mg.View) {
+ nm := filepath.Base(origView.Name)
+ if origView.Path != "" {
+ nm = filepath.Base(origView.Dir())
+ }
+
+ args := gx.Args
+ exe := filepath.Join(gx.tDir, nm+".exe")
+ gx.CmdCtx = gx.CmdCtx.Copy(func(bx *mg.CmdCtx) {
+ bx.Name = "go"
+ bx.Args = []string{"build", "-o", exe}
+ bx.Ctx = bx.Ctx.Copy(func(mx *mg.Ctx) {
+ mx.State = mx.State.Copy(func(st *mg.State) {
+ st.View = st.View.Copy(func(v *mg.View) {
+ v.Wd = v.Dir()
+ })
+ })
+ })
+ })
+ if err := gx.run(origView); err != nil {
+ return
+ }
+
+ gx.CmdCtx = gx.CmdCtx.Copy(func(bx *mg.CmdCtx) {
+ bx.Name = exe
+ bx.Args = args
+ bx.Ctx = bx.Ctx.Copy(func(mx *mg.Ctx) {
+ mx.State = mx.State.Copy(func(st *mg.State) {
+ st.View = origView
+ })
+ })
+ })
+ gx.RunProc()
+}
+
+type goCmdCtx struct {
+ *mg.CmdCtx
+ pkgDir string
+ key interface{}
+ iw *mg.IssueOut
+ tDir string
+ tFn string
+}
+
+func newGoCmdCtx(gc *GoCmd, bx *mg.CmdCtx, label, cancelID string, tDir, tFn string, origView *mg.View, testMode bool) *goCmdCtx {
+ gx := &goCmdCtx{
+ pkgDir: bx.View.Dir(),
+ tDir: tDir,
+ tFn: tFn,
+ }
+
+ output := bx.Output
+ if gc.Humanize && testMode {
+ output = &humanizeWriter{output}
+ }
+ if gx.tFn != "" {
+ dir := filepath.Dir(gx.tFn)
+ qDir := regexp.QuoteMeta(dir)
+ qDirBase := regexp.QuoteMeta(filepath.Base(dir))
+ qNm := regexp.QuoteMeta(filepath.Base(gx.tFn))
+ output = &replWriter{
+ OutputStream: output,
+ old: []*regexp.Regexp{
+ regexp.MustCompile(`(?:` + qDir + `|` + qDirBase + `)?[\\/.]+` + qNm),
+ regexp.MustCompile(qDir),
+ },
+ new: [][]byte{
+ []byte(origView.Name),
+ []byte(`tmp~`),
+ },
+ }
+ }
+ output = mgutil.NewSplitStream(mgutil.SplitLineOrCR, output)
+
+ type Key struct{ label string }
+ gx.key = Key{label}
+
+ gx.iw = &mg.IssueOut{
+ Base: mg.Issue{Label: label},
+ Patterns: bx.CommonPatterns(),
+ Dir: gx.pkgDir,
+ }
+
+ gx.CmdCtx = bx.Copy(func(bx *mg.CmdCtx) {
+ bx.Name = "go"
+ bx.CancelID = cancelID
+ bx.Output = mg.OutputStreams{
+ output,
+ gx.iw,
+ }
+ })
+
+ return gx
+}
+
+func (gx *goCmdCtx) run(origView *mg.View) error {
+ defer func() {
+ gx.VFS.Invalidate(origView.Filename())
+ gx.VFS.Invalidate(origView.Dir())
+ }()
+
+ p, err := gx.StartProc()
+ if err == nil {
+ err = p.Wait()
+ }
+ gx.iw.Flush()
+
+ issues := gx.iw.Issues()
+
+ for i, isu := range issues {
+ if isu.Path == "" || (gx.tFn != "" && filepath.Base(isu.Path) == origView.Name) {
+ isu.Name = origView.Name
+ isu.Path = origView.Path
+ }
+ issues[i] = isu
+ }
+
+ ik := mg.IssueKey{Key: gx.key}
+ if origView.Path == "" {
+ ik.Name = origView.Name
+ } else {
+ ik.Dir = origView.Dir()
+ }
+
+ gx.Store.Dispatch(mg.StoreIssues{IssueKey: ik, Issues: issues})
+ return err
+}
+
+func isWhiteSpace(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+func humanizeMetric(met string) string {
+ i := 0
+ for i < len(met) && isWhiteSpace(met[i]) {
+ i++
+ }
+ j := i
+ for j < len(met) && !isWhiteSpace(met[j]) {
+ j++
+ }
+ k := len(met)
+ for k > j && isWhiteSpace(met[k-1]) {
+ k--
+ }
+ pfx := met[:i]
+ val := met[i:j]
+ unit := met[j:k]
+ sfx := met[k:]
+
+ num, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return met
+ }
+ switch strings.TrimSpace(unit) {
+ case "ns/op":
+ s := time.Duration(num).String()
+ i := 0
+ for i < len(s) {
+ c := s[i]
+ if (c >= '0' && c <= '9') || c == '.' {
+ i++
+ } else {
+ break
+ }
+ }
+ return pfx + s[:i] + " " + s[i:] + "/op" + sfx
+ case "B/op":
+ return pfx + humanize.IBytes(uint64(num)) + "/op" + sfx
+ default:
+ return pfx + humanize.Comma(num) + unit + sfx
+ }
+}
+
+type humanizeWriter struct {
+ mg.OutputStream
+}
+
+func (w *humanizeWriter) Write(ln []byte) (int, error) {
+ s := make([]byte, 0, len(ln)+42)
+ for len(ln) != 0 {
+ i := bytes.IndexByte(ln, '\t')
+ if i < 0 {
+ s = append(s, humanizeMetric(string(ln))...)
+ break
+ }
+ i++
+ s = append(s, humanizeMetric(string(ln[:i]))...)
+ ln = ln[i:]
+ }
+ return w.OutputStream.Write(s)
+}
+
+type replWriter struct {
+ mg.OutputStream
+ old []*regexp.Regexp
+ new [][]byte
+}
+
+func (w *replWriter) Write(ln []byte) (int, error) {
+ for i, pat := range w.old {
+ ln = pat.ReplaceAll(ln, w.new[i])
+ }
+ return w.OutputStream.Write(ln)
+}
diff --git a/src/margo.sh/golang/gocode.go b/src/margo.sh/golang/gocode.go
new file mode 100644
index 00000000..e3645147
--- /dev/null
+++ b/src/margo.sh/golang/gocode.go
@@ -0,0 +1,406 @@
+package golang
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "kuroku.io/margocode/suggest"
+ "margo.sh/mg"
+ "margo.sh/mgpf"
+ "margo.sh/sublime"
+ "os"
+ "path"
+ "strings"
+ "time"
+ "unicode"
+)
+
+var (
+ gocodeClassTags = map[string]mg.CompletionTag{
+ "const": mg.ConstantTag,
+ "func": mg.FunctionTag,
+ "package": mg.PackageTag,
+ "import": mg.PackageTag,
+ "type": mg.TypeTag,
+ "var": mg.VariableTag,
+ }
+)
+
+type gocodeReq struct {
+ g *Gocode
+ mx *mg.Ctx
+ st *mg.State
+ gx *gocodeCtx
+ res chan *mg.State
+}
+
+type suggestions struct {
+ candidates []suggest.Candidate
+ unimported impSpec
+}
+
+func (gr *gocodeReq) addUnimportedPkg(st *mg.State, p impSpec) *mg.State {
+ if !gr.gx.gsu.cfg.AddUnimportedPackages {
+ return st
+ }
+ if p.Path == "" {
+ return st
+ }
+
+ src, _ := st.View.ReadAll()
+ if len(src) == 0 {
+ return st
+ }
+
+ if p.Name == path.Base(p.Path) {
+ p.Name = ""
+ }
+
+ s, merged, _ := impSpecList{p}.mergeWithSrc(st.View.Filename(), src)
+ if len(merged) != 0 {
+ st = st.SetViewSrc(s)
+ }
+
+ return st
+}
+
+func (gr *gocodeReq) reduce() *mg.State {
+ sugg := gr.gx.suggestions()
+ completions := make([]mg.Completion, 0, len(sugg.candidates))
+
+ st := gr.st
+ if len(sugg.candidates) != 0 {
+ st = gr.addUnimportedPkg(st, sugg.unimported)
+ }
+
+ gr.mx.Profile.Push("gocodeReq.finalize").Pop()
+ for _, v := range sugg.candidates {
+ if c, ok := gr.g.completion(gr.mx, gr.gx, v); ok {
+ completions = append(completions, c)
+ }
+ }
+
+ return st.AddCompletions(completions...)
+}
+
+type Gocode struct {
+ mg.ReducerType
+
+ AllowExplicitCompletions bool
+ AllowWordCompletions bool
+ ShowFuncParams bool
+ ShowFuncResultNames bool
+
+ // The following fields are deprecated
+
+ // Consider using MarGocodeCtl.Debug instead, it has more useful output
+ Debug bool
+ // This field is ignored, see MarGocodeCtl.ImporterMode
+ Source bool
+ // This field is ignored, see MarGocodeCtl.NoBuiltins
+ ProposeBuiltins bool
+ // This field is ignored, see MarGocodeCtl.ProposeTests
+ ProposeTests bool
+ // This field is ignored, see MarGocodeCtl.ImporterMode
+ Autobuild bool
+ // This field is ignored, See MarGocodeCtl.NoUnimportedPackages
+ UnimportedPackages bool
+
+ reqs chan gocodeReq
+}
+
+func (g *Gocode) RConfig(mx *mg.Ctx) mg.EditorConfig {
+ cfg, ok := mx.Config.(sublime.Config)
+ if !ok {
+ return nil
+ }
+
+ // ST might query the GoSublime plugin first, so we must always disable it
+ cfg = cfg.DisableGsComplete()
+ // but we don't want to affect editor completions in non-go files
+ if !g.RCond(mx) {
+ return cfg
+ }
+
+ if !g.AllowExplicitCompletions {
+ cfg = cfg.InhibitExplicitCompletions()
+ }
+ if !g.AllowWordCompletions {
+ cfg = cfg.InhibitWordCompletions()
+ }
+ return cfg
+}
+
+func (g *Gocode) RCond(mx *mg.Ctx) bool {
+ return mx.ActionIs(mg.QueryCompletions{}) && mx.LangIs(mg.Go)
+}
+
+func (g *Gocode) RMount(mx *mg.Ctx) {
+ g.reqs = make(chan gocodeReq)
+ go func() {
+ for gr := range g.reqs {
+ gr.res <- gr.reduce()
+ }
+ }()
+}
+
+func (g *Gocode) RUnmount(mx *mg.Ctx) {
+ close(g.reqs)
+}
+
+func (g *Gocode) Reduce(mx *mg.Ctx) *mg.State {
+ start := time.Now()
+
+ gx := initGocodeReducer(mx, *g)
+ st := mx.State
+ if gx == nil {
+ return st
+ }
+
+ qTimeout := 100 * time.Millisecond
+ gr := gocodeReq{
+ g: g,
+ mx: mx,
+ st: st,
+ gx: gx,
+ res: make(chan *mg.State, 1),
+ }
+ select {
+ case g.reqs <- gr:
+ case <-time.After(qTimeout):
+ mx.Log.Println("gocode didn't accept the request after", mgpf.D(time.Since(start)))
+ return st
+ }
+
+ pTimeout := 150 * time.Millisecond
+ if d := qTimeout - time.Since(start); d > 0 {
+ pTimeout += d
+ }
+
+ select {
+ case st := <-gr.res:
+ return st
+ case <-time.After(pTimeout):
+ go func() {
+ <-gr.res
+
+ mx.Log.Println("gocode eventually responded after", mgpf.Since(start))
+
+ if g.Debug {
+ opts := mgpf.DefaultPrintOpts
+ opts.MinDuration = 3 * time.Millisecond
+ mx.Profile.Fprint(os.Stderr, &opts)
+ }
+ }()
+
+ mx.Log.Println("gocode didn't respond after", mgpf.D(pTimeout), "taking", mgpf.Since(start))
+ return st
+ }
+}
+
+func (g Gocode) funcTitle(fx *ast.FuncType, buf *bytes.Buffer, decl string) string {
+ // TODO: caching
+
+ buf.Reset()
+ fset := token.NewFileSet()
+
+ buf.WriteString("func(")
+ if fx.Params != nil {
+ switch {
+ case g.ShowFuncParams:
+ printFields(buf, fset, fx.Params.List, true)
+ case fx.Params.NumFields() != 0:
+ buf.WriteString("…")
+ }
+ }
+ buf.WriteString(")")
+
+ if fl := fx.Results; fl != nil {
+ buf.WriteString(" ")
+ hasNames := g.ShowFuncResultNames && len(fl.List) != 0 && len(fl.List[0].Names) != 0
+ if hasNames {
+ buf.WriteString("(")
+ }
+ printFields(buf, fset, fl.List, g.ShowFuncResultNames)
+ if hasNames {
+ buf.WriteString(")")
+ }
+ }
+
+ return buf.String()
+}
+
+func (g Gocode) funcSrc(fx *ast.FuncType, buf *bytes.Buffer, v suggest.Candidate, gx *gocodeCtx) string {
+ // TODO: caching
+ // TODO: only output the name, if we're in a call, assignment, etc. that takes a func
+
+ outputArgs := true
+ for _, c := range gx.src[gx.pos:] {
+ if c == '(' {
+ outputArgs = false
+ break
+ }
+ r := rune(c)
+ if !IsLetter(r) && !unicode.IsSpace(r) {
+ break
+ }
+ }
+
+ buf.Reset()
+ buf.WriteString(v.Name)
+ if outputArgs {
+ buf.WriteString("(")
+ pos := 0
+ for _, field := range fx.Params.List {
+ for _, name := range field.Names {
+ pos++
+ if pos > 1 {
+ buf.WriteString(", ")
+ }
+ fmt.Fprintf(buf, "${%d:%s}", pos, name)
+ }
+ }
+ buf.WriteString(")")
+ }
+ buf.WriteString("${0}")
+ return buf.String()
+}
+
+func printFields(w io.Writer, fset *token.FileSet, list []*ast.Field, printNames bool) {
+ for i, field := range list {
+ if i > 0 {
+ fmt.Fprint(w, ", ")
+ }
+ if printNames {
+ for j, name := range field.Names {
+ if j > 0 {
+ fmt.Fprint(w, ", ")
+ }
+ fmt.Fprint(w, name.String())
+ }
+ if len(field.Names) != 0 {
+ fmt.Fprint(w, " ")
+ }
+ }
+ printer.Fprint(w, fset, field.Type)
+ }
+}
+
+func (g Gocode) completion(mx *mg.Ctx, gx *gocodeCtx, v suggest.Candidate) (c mg.Completion, ok bool) {
+ buf := bytes.NewBuffer(nil)
+ if v.Class == "PANIC" {
+ mx.Log.Printf("gocode panicked in '%s' at pos '%d'\n", gx.fn, gx.pos)
+ return c, false
+ }
+ if !gx.gsu.cfg.ProposeTests && g.matchTests(v) {
+ return c, false
+ }
+
+ var fx *ast.FuncType
+ if strings.HasPrefix(v.Type, "func(") {
+ x, _ := parser.ParseExpr(v.Type)
+ fx, _ = x.(*ast.FuncType)
+ }
+
+ c = mg.Completion{
+ Query: g.compQuery(v),
+ Tag: g.compTag(v),
+ Src: g.compSrc(fx, buf, v, gx),
+ Title: g.compTitle(fx, buf, v),
+ }
+ return c, true
+}
+
+func (g Gocode) compQuery(v suggest.Candidate) string {
+ return v.Name
+}
+
+func (g Gocode) compSrc(fx *ast.FuncType, buf *bytes.Buffer, v suggest.Candidate, gx *gocodeCtx) string {
+ if fx == nil {
+ return v.Name
+ }
+ return g.funcSrc(fx, buf, v, gx)
+}
+
+func (g Gocode) compTag(v suggest.Candidate) mg.CompletionTag {
+ if tag, ok := gocodeClassTags[v.Class]; ok {
+ return tag
+ }
+ return mg.UnknownTag
+}
+
+func (g Gocode) compTitle(fx *ast.FuncType, buf *bytes.Buffer, v suggest.Candidate) string {
+ if fx != nil {
+ return g.funcTitle(fx, buf, v.Type)
+ }
+ if v.Type == "" {
+ return v.Class
+ }
+ return v.Type
+}
+
+func (g Gocode) matchTests(c suggest.Candidate) bool {
+ if !strings.HasPrefix(c.Type, "func(") {
+ return false
+ }
+ return strings.HasPrefix(c.Name, "Test") ||
+ strings.HasPrefix(c.Name, "Benchmark") ||
+ strings.HasPrefix(c.Name, "Example")
+}
+
+type gocodeCtx struct {
+ Gocode
+ *CursorCtx
+ gsu *gcSuggest
+ mx *mg.Ctx
+ fn string
+ src []byte
+ pos int
+ bctx *build.Context
+}
+
+func initGocodeReducer(mx *mg.Ctx, g Gocode) *gocodeCtx {
+ // TODO: simplify and get rid of this func, it's only used once
+
+ src, pos := mx.View.SrcPos()
+ if len(src) == 0 {
+ return nil
+ }
+
+ cx := NewCursorCtx(mx, src, pos)
+ if cx.Scope.Is(
+ PackageScope,
+ FileScope,
+ ImportScope,
+ StringScope,
+ CommentScope,
+ FuncDeclScope,
+ TypeDeclScope,
+ ) {
+ return nil
+ }
+
+ gsu := mctl.newGcSuggest(mx)
+ gsu.suggestDebug = g.Debug
+ return &gocodeCtx{
+ mx: mx,
+ CursorCtx: cx,
+ gsu: gsu,
+ fn: mx.View.Filename(),
+ pos: pos,
+ src: src,
+ bctx: BuildContext(mx),
+ }
+}
+
+func (gx *gocodeCtx) suggestions() suggestions {
+ if len(gx.src) == 0 {
+ return suggestions{}
+ }
+ return gx.gsu.suggestions(gx.mx, gx.src, gx.pos)
+}
diff --git a/src/margo.sh/golang/gocode_calltips.go b/src/margo.sh/golang/gocode_calltips.go
new file mode 100644
index 00000000..839948b2
--- /dev/null
+++ b/src/margo.sh/golang/gocode_calltips.go
@@ -0,0 +1,370 @@
+package golang
+
+import (
+ "bytes"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "kuroku.io/margocode/suggest"
+ "margo.sh/htm"
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ "margo.sh/sublime"
+ "strings"
+ "unicode"
+)
+
+const (
+ calltipOpenTag = "⎨"
+ calltipCloseTag = "⎬"
+)
+
+type gocodeCtAct struct {
+ mg.ActionType
+ mx *mg.Ctx
+ status string
+}
+
+type GocodeCalltips struct {
+ mg.ReducerType
+
+ // The following fields are deprecated
+
+ // This field is ignored, see MarGocodeCtl.ImporterMode
+ Source bool
+ // Consider using MarGocodeCtl.Debug instead, it has more useful output
+ Debug bool
+
+ q *mgutil.ChanQ
+ status string
+ hud htm.Element
+}
+
+func (gc *GocodeCalltips) RCond(mx *mg.Ctx) bool {
+ return mx.LangIs(mg.Go)
+}
+
+func (gc *GocodeCalltips) RMount(mx *mg.Ctx) {
+ gc.q = mgutil.NewChanQ(1)
+ go gc.processer()
+}
+
+func (gc *GocodeCalltips) RUnmount(mx *mg.Ctx) {
+ gc.q.Close()
+}
+
+func (gc *GocodeCalltips) Reduce(mx *mg.Ctx) *mg.State {
+ st := mx.State
+ if cfg, ok := st.Config.(sublime.Config); ok {
+ st = st.SetConfig(cfg.DisableCalltips())
+ }
+
+ switch act := mx.Action.(type) {
+ case mg.ViewPosChanged, mg.ViewActivated:
+ gc.q.Put(gocodeCtAct{mx: mx, status: gc.status})
+ case gocodeCtAct:
+ s := act.status
+ gc.status = s
+ i := strings.Index(s, calltipOpenTag)
+ j := strings.Index(s, calltipCloseTag)
+ switch {
+ case i >= 0 && j > i:
+ gc.hud = htm.Span(nil,
+ htm.Text(s[:i]),
+ htm.HighlightText(s[i+len(calltipOpenTag):j]),
+ htm.Text(s[j+len(calltipOpenTag):]),
+ )
+ case gc.status == "":
+ gc.hud = nil
+ default:
+ gc.hud = htm.Text(s)
+ }
+ }
+
+ if gc.status != "" {
+ st = st.AddStatus(gc.status)
+ }
+ if gc.hud != nil {
+ st = st.AddHUD(htm.Text("Calltips"), gc.hud)
+ }
+ return st
+}
+
+func (gc *GocodeCalltips) processer() {
+ for a := range gc.q.C() {
+ gc.process(a.(gocodeCtAct))
+ }
+}
+
+func (gc *GocodeCalltips) process(act gocodeCtAct) {
+ defer func() { recover() }()
+
+ if s := gc.processStatus(act); s != act.status {
+ act.mx.Store.Dispatch(gocodeCtAct{status: s})
+ }
+}
+
+func (gc *GocodeCalltips) processStatus(act gocodeCtAct) string {
+ mx := act.mx
+ src, srcPos := mx.View.SrcPos()
+ if len(src) == 0 {
+ return ""
+ }
+
+ cx := NewCursorCtx(mx, src, srcPos)
+ tf := cx.TokenFile
+ tokPos := tf.Pos(srcPos)
+ call, assign := gc.findCallExpr(cx.Nodes, tokPos)
+ if call == nil {
+ return ""
+ }
+
+ ident := gc.exprIdent(call.Fun)
+ if ident == nil {
+ return ""
+ }
+
+ fxName := ident.String()
+ candidate, ok := gc.candidate(mx, src, tf.Position(ident.End()).Offset, fxName)
+ if !ok {
+ return ""
+ }
+
+ expr, _ := parser.ParseExpr(candidate.Type)
+ fx, _ := expr.(*ast.FuncType)
+ if fx == nil {
+ return ""
+ }
+
+ var highlight ast.Node
+ switch {
+ case call.Lparen < tokPos && tokPos <= call.Rparen:
+ i := gc.selectedFieldExpr(tf.Offset, src, srcPos, call.Args)
+ highlight = gc.selectedFieldName(fx.Params, i)
+ case assign != nil:
+ i := gc.selectedFieldExpr(tf.Offset, src, srcPos, assign.Lhs)
+ highlight = gc.selectedFieldName(fx.Results, i)
+ }
+
+ return gc.funcSrc(fx, fxName, highlight)
+}
+
+func (gc *GocodeCalltips) findCallExpr(nodes []ast.Node, pos token.Pos) (*ast.CallExpr, *ast.AssignStmt) {
+ var assign *ast.AssignStmt
+ var call, callCandidate *ast.CallExpr
+out:
+ for i := len(nodes) - 1; i >= 0; i-- {
+ switch x := nodes[i].(type) {
+ case *ast.BlockStmt:
+ break out
+ case *ast.AssignStmt:
+ assign = x
+ case *ast.CallExpr:
+ // we found a CallExpr, but it's not necessarily the right one.
+ // in `funcF(fun|cG())` this will match funcG, but we want funcF
+ // so we track of the first CallExpr but keep searching until we find one
+ // whose left paren is before the cursor
+ if callCandidate == nil {
+ callCandidate = x
+ }
+ if x.Lparen < pos {
+ call = x
+ break out
+ }
+ }
+ }
+
+ switch {
+ case call != nil:
+ return call, nil
+ case callCandidate != nil:
+ return callCandidate, nil
+ case assign != nil && len(assign.Rhs) == 1:
+ if call, ok := assign.Rhs[0].(*ast.CallExpr); ok {
+ return call, assign
+ }
+ }
+ return nil, nil
+}
+
+func (gc *GocodeCalltips) funcSrc(fx *ast.FuncType, funcName string, highlight ast.Node) string {
+ fset := token.NewFileSet()
+ buf := &bytes.Buffer{}
+
+ if funcName == "" {
+ buf.WriteString("func")
+ } else {
+ buf.WriteString(funcName)
+ }
+
+ var params []*ast.Field
+ if p := fx.Params; p != nil {
+ params = p.List
+ }
+ fieldPrinter{
+ fset: fset,
+ fields: params,
+ buf: buf,
+ parens: true,
+ names: true,
+ types: true,
+ highlight: highlight,
+ }.print()
+
+ if p := fx.Results; p != nil {
+ buf.WriteByte(' ')
+ fieldPrinter{
+ fset: fset,
+ fields: p.List,
+ buf: buf,
+ parens: len(p.List) != 0 && len(p.List[0].Names) != 0,
+ names: true,
+ types: true,
+ highlight: highlight,
+ }.print()
+ }
+
+ return buf.String()
+}
+
+func (gc *GocodeCalltips) selectedFieldName(fl *ast.FieldList, fieldIndex int) ast.Node {
+ if fl == nil || len(fl.List) == 0 {
+ return nil
+ }
+
+ index := 0
+ for _, field := range fl.List {
+ if len(field.Names) == 0 {
+ if index == fieldIndex {
+ return field
+ }
+ index++
+ continue
+ }
+
+ for _, id := range field.Names {
+ if index == fieldIndex {
+ return id
+ }
+ index++
+ }
+ }
+
+ f := fl.List[len(fl.List)-1]
+ if _, ok := f.Type.(*ast.Ellipsis); ok && len(f.Names) == 1 {
+ return f.Names[0]
+ }
+
+ return nil
+}
+
+func (gc *GocodeCalltips) selectedFieldExpr(offset func(token.Pos) int, src []byte, pos int, fields []ast.Expr) int {
+ for i, a := range fields {
+ np := mgutil.RepositionLeft(src, offset(a.Pos()), unicode.IsSpace)
+ ne := mgutil.RepositionRight(src, offset(a.End()), unicode.IsSpace)
+ if np <= pos && pos <= ne {
+ return i
+ }
+ }
+ // in most cases we're after a comma,
+ // so choose the next field (that doesn't exist yet)
+ return len(fields)
+}
+
+func (gc *GocodeCalltips) candidate(mx *mg.Ctx, src []byte, pos int, funcName string) (candidate suggest.Candidate, ok bool) {
+ if pos < 0 || pos >= len(src) {
+ return candidate, false
+ }
+
+ gsu := mctl.newGcSuggest(mx)
+ gsu.suggestDebug = gc.Debug
+ sugg := gsu.suggestions(mx, src, pos)
+ for _, c := range sugg.candidates {
+ if !strings.HasPrefix(c.Type, "func(") {
+ continue
+ }
+ switch {
+ case funcName == c.Name:
+ return c, true
+ case strings.EqualFold(funcName, c.Name):
+ candidate = c
+ }
+ }
+ return candidate, candidate != suggest.Candidate{}
+}
+
+func (gc *GocodeCalltips) exprIdent(x ast.Expr) *ast.Ident {
+ switch x := x.(type) {
+ case *ast.Ident:
+ return x
+ case *ast.SelectorExpr:
+ return x.Sel
+ }
+ return nil
+}
+
+type fieldPrinter struct {
+ fset *token.FileSet
+ fields []*ast.Field
+ buf *bytes.Buffer
+ names bool
+ types bool
+ parens bool
+ highlight ast.Node
+}
+
+func (p fieldPrinter) print() {
+ w := p.buf
+ if p.parens {
+ w.WriteByte('(')
+ }
+
+ hlId, _ := p.highlight.(*ast.Ident)
+ hlField, _ := p.highlight.(*ast.Field)
+ hlWriteOpen := func() { w.WriteString(calltipOpenTag) }
+ hlWriteClose := func() { w.WriteString(calltipCloseTag) }
+
+ for i, f := range p.fields {
+ if i > 0 {
+ w.WriteString(", ")
+ }
+
+ if f == hlField {
+ hlWriteOpen()
+ }
+
+ var names []*ast.Ident
+ if p.names {
+ names = f.Names
+ }
+ for j, id := range names {
+ if j > 0 {
+ w.WriteString(", ")
+ }
+ if hlId == id {
+ hlWriteOpen()
+ }
+ w.WriteString(id.String())
+
+ if hlId == id && j < len(names)-1 {
+ hlWriteClose()
+ }
+ }
+
+ if p.types {
+ if len(names) != 0 {
+ w.WriteByte(' ')
+ }
+ printer.Fprint(w, p.fset, f.Type)
+ }
+
+ if l := names; f == hlField || (len(l) > 0 && l[len(l)-1] == hlId) {
+ hlWriteClose()
+ }
+ }
+
+ if p.parens {
+ w.WriteByte(')')
+ }
+}
diff --git a/src/margo.sh/golang/gocode_suggest.go b/src/margo.sh/golang/gocode_suggest.go
new file mode 100644
index 00000000..2efe66e3
--- /dev/null
+++ b/src/margo.sh/golang/gocode_suggest.go
@@ -0,0 +1,285 @@
+package golang
+
+import (
+ "errors"
+ "go/build"
+ "go/types"
+ "kuroku.io/margocode/suggest"
+ "margo.sh/golang/gopkg"
+ "margo.sh/kimporter"
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ "runtime/debug"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ errImportCycleDetected = errors.New("import cycle detected")
+)
+
+type gsuImpRes struct {
+ pkg *types.Package
+ err error
+}
+
+type gcSuggest struct {
+ suggestDebug bool
+ partials bool
+
+ cfg MarGocodeCtl
+
+ sync.Mutex
+ imp *gsuImporter
+}
+
+func (gsu *gcSuggest) newGsuImporter(mx *mg.Ctx) *gsuImporter {
+ gi := &gsuImporter{
+ mx: mx,
+ bld: BuildContext(mx),
+ }
+ gi.res.m = map[mgcCacheKey]gsuImpRes{}
+ return gi
+}
+
+func (gsu *gcSuggest) noPartialsPos(src []byte, pos int) int {
+ // move the cursor off the word.
+ // xxx.yyy| ~> xxx.|
+ // xxx| ~> |xxx
+ // this results in fetching all possible results
+ // which is desirable because the editor is usually better at filtering the list
+ return mgutil.RepositionLeft(src, pos, IsLetter)
+}
+
+func (gsu *gcSuggest) suggestions(mx *mg.Ctx, src []byte, pos int) suggestions {
+ defer mx.Profile.Push("suggestions").Pop()
+
+ sugg := suggestions{}
+
+ if len(src) == 0 {
+ return sugg
+ }
+
+ gsu.Lock()
+ defer gsu.Unlock()
+
+ defer func() {
+ if e := recover(); e != nil {
+ mx.Log.Printf("gocode/suggest panic: %s\n%s\n", e, debug.Stack())
+ }
+ }()
+
+ cfg := suggest.Config{
+ // we no longer support contextual build env :(
+ // GoSublime works around this for other packages by restarting the agent
+ // if GOPATH changes, so we should be ok
+ Importer: gsu.imp,
+ Builtin: !gsu.cfg.NoBuiltins,
+ IgnoreCase: true,
+ }
+ if !gsu.cfg.NoUnimportedPackages {
+ srcDir := mx.View.Dir()
+ cfg.UnimportedPackage = func(nm string) *types.Package {
+ pkg, pth := gsu.imp.importFromName(nm, srcDir)
+ if pkg != nil {
+ sugg.unimported.Name = nm
+ sugg.unimported.Path = pth
+ }
+ return pkg
+ }
+ }
+ if gsu.suggestDebug {
+ cfg.Logf = func(f string, a ...interface{}) {
+ f = "Gocode: " + f
+ if !strings.HasSuffix(f, "\n") {
+ f += "\n"
+ }
+ mx.Log.Dbg.Printf(f, a...)
+ }
+ }
+
+ if !gsu.partials {
+ pos = gsu.noPartialsPos(src, pos)
+ }
+ sugg.candidates, _ = cfg.Suggest(mx.View.Filename(), src, pos)
+ return sugg
+}
+
+type gsuPkgInfo struct {
+ // the import path
+ Path string
+
+ // the abs path to the package directory
+ Dir string
+
+ // whether or not this is a stdlib package
+ Std bool
+}
+
+func (p gsuPkgInfo) cacheKey(source bool) mgcCacheKey {
+ return mgcCacheKey{gsuPkgInfo: p, Source: source}
+}
+
+type gsuImporter struct {
+ mx *mg.Ctx
+ bld *build.Context
+
+ res struct {
+ sync.Mutex
+ m map[mgcCacheKey]gsuImpRes
+ }
+}
+
+func (gi *gsuImporter) Import(path string) (*types.Package, error) {
+ return gi.ImportFrom(path, ".", 0)
+}
+
+func (gi *gsuImporter) importFromName(pkgName, srcDir string) (pkg *types.Package, impPath string) {
+ impPath = mctl.importPathByName(pkgName, srcDir)
+ if impPath == "" {
+ return nil, ""
+ }
+ pkg, _ = gi.ImportFrom(impPath, srcDir, 0)
+ return pkg, impPath
+}
+
+func (gi *gsuImporter) ImportFrom(impPath, srcDir string, mode types.ImportMode) (pkg *types.Package, err error) {
+ if mctl.cfg().ImporterMode == KimPorter {
+ return kimporter.New(gi.mx, nil).ImportFrom(impPath, srcDir, mode)
+ }
+
+ // TODO: add mode to the key somehow?
+ // mode is reserved, but currently not used so it's not a problem
+ // but if it's used in the future, the importer result could depend on it
+ //
+ // adding it to the key might complicate the pkginfo api because it's called
+ // by code that doesn't know anything about mode
+ pkgInf, err := mctl.pkgInfo(gi.mx, impPath, srcDir)
+ if err != nil {
+ mctl.dbgf("pkgInfo(%q, %q): %s\n", impPath, srcDir, err)
+ return nil, err
+ }
+ newDefImpr, newFbkImpr, srcMode := mctl.importerFactories()
+ k := pkgInf.cacheKey(srcMode)
+
+ gi.res.Lock()
+ res, seen := gi.res.m[k]
+ if !seen {
+ gi.res.m[k] = gsuImpRes{err: errImportCycleDetected}
+ }
+ gi.res.Unlock()
+
+ // we cache the results of the underlying importer for this *session*
+ // because if it fails, or there's an import cycle, we could potentialy end up in a loop
+ // trying to import the package again.
+ if seen {
+ return res.pkg, res.err
+ }
+ defer func() {
+ gi.res.Lock()
+ defer gi.res.Unlock()
+
+ gi.res.m[k] = gsuImpRes{pkg: pkg, err: err}
+ }()
+
+ defImpr := newDefImpr(gi.mx, gi)
+ pkg, err = gi.importFrom(defImpr, k, srcDir, mode)
+ complete := err == nil && pkg.Complete()
+ if complete {
+ return pkg, nil
+ }
+
+ mctl.dbgf("importFrom(%q, %q): default=%T: complete=%v, err=%v\n",
+ k.Path, k.Dir, defImpr, complete, err,
+ )
+
+ // no fallback allowed
+ if newFbkImpr == nil {
+ return pkg, err
+ }
+
+ // problem1:
+ // if the pkg import fails we will offer no completion
+ //
+ // problem 2:
+ // if it succeeds, but is incomplete we offer completion with `invalid-type` failures
+ // i.e. completion stops working at random points for no obvious reason
+ //
+ // assumption:
+ // it's better to risk using stale data (bin imports)
+ // as opposed to offering no completion at all
+ //
+ // risks:
+ // we will end up caching the result, but that shouldn't be a big deal
+ // because if the pkg is edited, thus (possibly) making it importable,
+ // we will remove it from the cache anyway.
+ // there is the issue about mixing binary (potentially incomplete) pkgs with src pkgs
+ // but we were already not going to return anything, so it *shouldn't* apply here
+
+ fbkImpr := newFbkImpr(gi.mx, gi)
+ fbkPkg, fbkErr := gi.importFrom(fbkImpr, k.fallback(), srcDir, mode)
+ fbkComplete := fbkErr == nil && fbkPkg.Complete()
+ switch {
+ case fbkComplete:
+ pkg, err = fbkPkg, nil
+ case fbkPkg != nil && pkg == nil:
+ pkg, err = fbkPkg, fbkErr
+ }
+
+ mctl.dbgf("importFrom(%q, %q): fallback=%T: complete=%v, err=%v\n",
+ k.Path, k.Dir, fbkImpr, fbkComplete, fbkErr,
+ )
+
+ return pkg, err
+}
+
+func (gi *gsuImporter) importFrom(underlying types.ImporterFrom, ck mgcCacheKey, srcDir string, mode types.ImportMode) (*types.Package, error) {
+ _, memo, err := gi.mx.VFS.Memo(ck.Dir)
+ if err != nil {
+ return nil, err
+ }
+ type K struct{ mgcCacheKey }
+ type V struct {
+ p *types.Package
+ e error
+ }
+ k := K{ck}
+ v := memo.Read(k, func() interface{} {
+ p, err := gi.doImportFrom(underlying, ck, srcDir, mode)
+ return V{p: p, e: err}
+ }).(V)
+ return v.p, v.e
+}
+
+func (gi *gsuImporter) doImportFrom(underlying types.ImporterFrom, k mgcCacheKey, srcDir string, mode types.ImportMode) (*types.Package, error) {
+ defer gi.mx.Profile.Push("gsuImport: " + k.Path).Pop()
+
+ if k.Std && k.Path == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ if e, ok := mctl.pkgs.get(k); ok {
+ return e.Pkg, nil
+ }
+
+ impStart := time.Now()
+ pkg, err := underlying.ImportFrom(k.Path, srcDir, mode)
+ impDur := time.Since(impStart)
+
+ if err == nil {
+ mctl.pkgs.put(mgcCacheEnt{Key: k, Pkg: pkg, Dur: impDur})
+
+ if _, ok := mctl.plst.View().ByDir[k.Dir]; !ok {
+ mctl.plst.Add(gopkg.Pkg{
+ Dir: k.Dir,
+ ImportPath: k.Path,
+ Name: pkg.Name(),
+ })
+ }
+ } else {
+ mctl.dbgf("%T.ImportFrom(%q, %q): %s\n", underlying, k.Path, k.Dir, err)
+ }
+
+ return pkg, err
+}
diff --git a/src/margo.sh/golang/gofmt.go b/src/margo.sh/golang/gofmt.go
new file mode 100644
index 00000000..6558f981
--- /dev/null
+++ b/src/margo.sh/golang/gofmt.go
@@ -0,0 +1,51 @@
+package golang
+
+import (
+ "go/format"
+ mgformat "margo.sh/format"
+ "margo.sh/mg"
+ "margo.sh/sublime"
+)
+
+var (
+ GoFmt mg.Reducer = mg.NewReducer(goFmt)
+ GoImports mg.Reducer = mg.NewReducer(goImports)
+
+ commonFmtLangs = []mg.Lang{mg.Go}
+ commonFmtActions = []mg.Action{
+ mg.ViewFmt{},
+ mg.ViewPreSave{},
+ }
+)
+
+func disableGsFmt(st *mg.State) *mg.State {
+ if cfg, ok := st.Config.(sublime.Config); ok {
+ return st.SetConfig(cfg.DisableGsFmt())
+ }
+ return st
+}
+
+type FmtFunc func(mx *mg.Ctx, src []byte) ([]byte, error)
+
+func (ff FmtFunc) Reduce(mx *mg.Ctx) *mg.State {
+ return disableGsFmt(mgformat.FmtFunc{
+ Fmt: ff,
+ Langs: commonFmtLangs,
+ Actions: commonFmtActions,
+ }.Reduce(mx))
+}
+
+func goFmt(mx *mg.Ctx) *mg.State {
+ return FmtFunc(func(_ *mg.Ctx, src []byte) ([]byte, error) {
+ return format.Source(src)
+ }).Reduce(mx)
+}
+
+func goImports(mx *mg.Ctx) *mg.State {
+ return disableGsFmt(mgformat.FmtCmd{
+ Name: "goimports",
+ Args: []string{"-srcdir", mx.View.Filename()},
+ Langs: commonFmtLangs,
+ Actions: commonFmtActions,
+ }.Reduce(mx))
+}
diff --git a/src/margo.sh/golang/gogenerate.go b/src/margo.sh/golang/gogenerate.go
new file mode 100644
index 00000000..566d68ef
--- /dev/null
+++ b/src/margo.sh/golang/gogenerate.go
@@ -0,0 +1,33 @@
+package golang
+
+import (
+ "margo.sh/golang/goutil"
+ "margo.sh/mg"
+)
+
+// GoGenerate adds a UserCmd that calls `go generate` in go packages and sub-dirs
+type GoGenerate struct {
+ mg.ReducerType
+
+ // Args are extra arguments to pass to `go generate`
+ Args []string
+}
+
+// RCond implements mg.Reducer
+func (gg *GoGenerate) RCond(mx *mg.Ctx) bool {
+ return mx.ActionIs(mg.QueryUserCmds{})
+}
+
+// RCond implements mg.Reducer
+func (gg *GoGenerate) Reduce(mx *mg.Ctx) *mg.State {
+ dir := goutil.ClosestPkgDir(mx.View.Dir())
+ if dir == nil {
+ return mx.State
+ }
+ return mx.State.AddUserCmds(mg.UserCmd{
+ Title: "Go Generate",
+ Name: "go",
+ Args: append([]string{"generate"}, gg.Args...),
+ Dir: dir.Path(),
+ })
+}
diff --git a/src/disposa.blue/margo/golang/golang.go b/src/margo.sh/golang/golang.go
similarity index 69%
rename from src/disposa.blue/margo/golang/golang.go
rename to src/margo.sh/golang/golang.go
index 35245caa..600ee48f 100644
--- a/src/disposa.blue/margo/golang/golang.go
+++ b/src/margo.sh/golang/golang.go
@@ -1,7 +1,7 @@
package golang
import (
- "disposa.blue/margo/mg"
+ "margo.sh/mg"
)
var Reducers = []mg.Reducer{}
diff --git a/src/margo.sh/golang/gopkg/import.go b/src/margo.sh/golang/gopkg/import.go
new file mode 100644
index 00000000..9839e652
--- /dev/null
+++ b/src/margo.sh/golang/gopkg/import.go
@@ -0,0 +1,470 @@
+package gopkg
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "github.com/rogpeppe/go-internal/modfile"
+ "github.com/rogpeppe/go-internal/module"
+ "github.com/rogpeppe/go-internal/semver"
+ "go/build"
+ "margo.sh/golang/goutil"
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ "margo.sh/vfs"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ pkgModFilepath = string(filepath.Separator) + "pkg" + string(filepath.Separator) + "mod" + string(filepath.Separator)
+ errPkgPathNotFound = errors.New("pkg path not found")
+)
+
+func ScanFilter(de *vfs.Dirent) bool {
+ nm := de.Name()
+ if nm[0] == '.' || nm[0] == '_' || nm == "testdata" || nm == "node_modules" {
+ return false
+ }
+ return de.IsDir() || strings.HasSuffix(nm, ".go")
+}
+
+func ImportDir(mx *mg.Ctx, dir string) (*Pkg, error) {
+ if !filepath.IsAbs(dir) {
+ return nil, fmt.Errorf("ImportDir: %s is not an absolute path", dir)
+ }
+ return ImportDirNd(mx, mx.VFS.Poke(dir))
+}
+
+func ImportDirNd(mx *mg.Ctx, dir *vfs.Node) (*Pkg, error) {
+ return importDirNd(mx, dir, true)
+}
+
+func importDirNd(mx *mg.Ctx, nd *vfs.Node, poke bool) (*Pkg, error) {
+ var cl *vfs.NodeList
+ if poke {
+ cl = nd.Ls()
+ } else {
+ cl = nd.Children()
+ }
+ ls := cl.Filter(goutil.PkgNdFilter).Nodes()
+ if len(ls) == 0 {
+ if poke {
+ return nil, &build.NoGoError{Dir: nd.Path()}
+ }
+ return nil, nil
+ }
+ bctx := goutil.BuildContext(mx)
+ type K struct{ GOROOT, GOPATH string }
+ type V struct {
+ p *Pkg
+ e error
+ }
+ k := K{GOROOT: bctx.GOROOT, GOPATH: bctx.GOPATH}
+ if !poke {
+ v, _ := nd.PeekMemo(k).(V)
+ return v.p, v.e
+ }
+ v := nd.ReadMemo(k, func() interface{} {
+ p, err := importDir(mx, nd, bctx, ls)
+ return V{p: p, e: err}
+ }).(V)
+ return v.p, v.e
+}
+
+func PeekDir(mx *mg.Ctx, dir string) *Pkg {
+ return PeekDirNd(mx, mx.VFS.Peek(dir))
+}
+
+func PeekDirNd(mx *mg.Ctx, dir *vfs.Node) *Pkg {
+ p, _ := importDirNd(mx, dir, false)
+ return p
+}
+
+func importDir(mx *mg.Ctx, nd *vfs.Node, bctx *build.Context, ls []*vfs.Node) (*Pkg, error) {
+ dir := nd.Path()
+ var errNoGo error = &build.NoGoError{Dir: dir}
+ bctx.IsDir = func(p string) bool {
+ if p == dir {
+ return true
+ }
+ return mx.VFS.IsDir(p)
+ }
+ bctx.ReadDir = func(p string) ([]os.FileInfo, error) {
+ if p != dir {
+ return mx.VFS.ReadDir(p)
+ }
+ if len(ls) == 0 {
+ return nil, errNoGo
+ }
+ fi, err := ls[0].Stat()
+ if err == nil {
+ return []os.FileInfo{fi}, nil
+ }
+ return nil, err
+ }
+ resErr := errNoGo
+ for len(ls) != 0 {
+ bp, err := bctx.ImportDir(dir, 0)
+ ls = ls[1:]
+ if err != nil {
+ resErr = err
+ continue
+ }
+ p := &Pkg{
+ Dir: bp.Dir,
+ Name: bp.Name,
+ ImportPath: bp.ImportPath,
+ Goroot: bp.Goroot,
+ }
+ p.Finalize()
+ return p, nil
+ }
+ return nil, resErr
+}
+
+func FindPkg(mx *mg.Ctx, importPath, srcDir string) (*PkgPath, error) {
+ bctx := goutil.BuildContext(mx)
+ grDir := filepath.Join(bctx.GOROOT, "src", importPath)
+ grNd := mx.VFS.Poke(grDir).Ls()
+ if grNd.Some(goutil.PkgNdFilter) {
+ return &PkgPath{Dir: grDir, ImportPath: importPath, Goroot: true}, nil
+ }
+ if goutil.ModEnabled(mx, srcDir) {
+ return findPkgGm(mx, importPath, srcDir, nil)
+ }
+ if p, err := findPkgPm(mx, importPath, srcDir); err == nil {
+ return p, nil
+ }
+ return findPkgGp(mx, bctx, importPath, srcDir)
+}
+
+func findPkgGp(mx *mg.Ctx, bctx *build.Context, importPath, srcDir string) (*PkgPath, error) {
+ _, memo, err := mx.VFS.Memo(srcDir)
+ if err != nil {
+ return nil, err
+ }
+ type K struct {
+ goutil.SrcDirKey
+ importPath string
+ }
+ type V struct {
+ p *PkgPath
+ e error
+ }
+ k := K{goutil.MakeSrcDirKey(bctx, srcDir), importPath}
+ v := memo.Read(k, func() interface{} {
+ bpkg, err := bctx.Import(importPath, k.SrcDir, build.FindOnly)
+ v := V{e: err}
+ if err == nil {
+ v.p = &PkgPath{
+ Dir: bpkg.Dir,
+ ImportPath: bpkg.ImportPath,
+ Goroot: bpkg.Goroot,
+ }
+ }
+ return v
+ }).(V)
+ return v.p, v.e
+}
+
+func findPkgPm(mx *mg.Ctx, importPath, srcDir string) (*PkgPath, error) {
+ srcDir = filepath.Clean(srcDir)
+ pmPos := strings.Index(srcDir, pkgModFilepath)
+ if pmPos < 0 {
+ return nil, errPkgPathNotFound
+ }
+ vPos := strings.Index(srcDir[pmPos:], "@v")
+ if vPos < 0 {
+ return nil, errPkgPathNotFound
+ }
+ vPos += pmPos
+ modDir := srcDir
+ if i := strings.IndexByte(srcDir[vPos:], filepath.Separator); i >= 0 {
+ modDir = srcDir[:vPos+i]
+ }
+ mod := filepath.ToSlash(modDir[pmPos+len(pkgModFilepath) : vPos])
+ sfx := strings.TrimPrefix(importPath, mod)
+ if sfx != "" && sfx[0] != '/' {
+ return nil, errPkgPathNotFound
+ }
+ ver := modDir[vPos+1:]
+ if !semver.IsValid(ver) {
+ return nil, errPkgPathNotFound
+ }
+ dir := filepath.Join(modDir, filepath.ToSlash(sfx))
+ if !mx.VFS.Poke(dir).Ls().Some(goutil.PkgNdFilter) {
+ return nil, errPkgPathNotFound
+ }
+ return &PkgPath{
+ Dir: dir,
+ ImportPath: importPath,
+ }, nil
+}
+
+func findPkgGm(mx *mg.Ctx, importPath, srcDir string, mp *ModPath) (*PkgPath, error) {
+ fileNd := goutil.ModFileNd(mx, srcDir)
+ if fileNd == nil {
+ return nil, os.ErrNotExist
+ }
+ dir := fileNd.Parent().Path()
+ if mp != nil && mp.Dir == dir {
+ return mp.FindPkg(mx, importPath, srcDir)
+ }
+ return (&ModPath{Parent: mp, Dir: dir}).FindPkg(mx, importPath, srcDir)
+}
+
+type ModPath struct {
+ Parent *ModPath
+ Dir string
+}
+
+func (mp *ModPath) FindPkg(mx *mg.Ctx, importPath, srcDir string) (*PkgPath, error) {
+ if mp == nil {
+ return FindPkg(mx, importPath, srcDir)
+ }
+ for ; mp != nil; mp = mp.Parent {
+ if p, err := mp.findPkg(mx, importPath, srcDir); err == nil {
+ return p, nil
+ }
+ }
+ if p, err := findPkgPm(mx, importPath, srcDir); err == nil {
+ return p, nil
+ }
+ return findPkgGp(mx, goutil.BuildContext(mx), importPath, srcDir)
+}
+
+func (mp *ModPath) findPkg(mx *mg.Ctx, importPath, srcDir string) (*PkgPath, error) {
+ dirNd := mx.VFS.Poke(mp.Dir)
+ bctx := goutil.BuildContext(mx)
+ mf, err := loadModSumNd(mx, dirNd)
+ if err != nil {
+ return nil, err
+ }
+ return mf.find(mx, bctx, importPath, mp)
+}
+
+type modFile struct {
+ Dir string
+ Path string
+ Deps map[string]modDep
+ File *modfile.File
+}
+
+type modDep struct {
+ Dir string
+ ModPath string
+ SubPkg string
+ Version string
+
+ oldPath string
+}
+
+func (mf *modFile) requireMD(modPath string) (_ modDep, found bool) {
+ if md, ok := mf.Deps[modPath]; ok {
+ return md, true
+ }
+ if p := mgutil.PathParent(modPath); p != "" {
+ return mf.requireMD(p)
+ }
+ return modDep{}, false
+}
+
+func (mf *modFile) require(importPath string) (modDep, error) {
+ md, found := mf.requireMD(importPath)
+ if !found {
+ return modDep{}, fmt.Errorf("require(%s) not found in %s", importPath, mf.Path)
+ }
+ modPath := md.ModPath
+ if md.oldPath != "" {
+ modPath = md.oldPath
+ }
+ md.SubPkg = strings.TrimPrefix(importPath, modPath)
+ md.SubPkg = strings.TrimLeft(md.SubPkg, "/")
+ return md, nil
+}
+
+// TODO: support `std`. stdlib pkgs are vendored, so AFAIK, it's not used yet.
+func (mf *modFile) find(mx *mg.Ctx, bctx *build.Context, importPath string, mp *ModPath) (pp *PkgPath, err error) {
+ defer func() {
+ if pp != nil {
+ pp.Mod = &ModPath{Dir: mf.Dir, Parent: mp}
+ }
+ }()
+
+ md, err := mf.require(importPath)
+ if err != nil {
+ return nil, err
+ }
+ lsPkg := func(pfx, sfx string) *PkgPath {
+ dir := filepath.Join(pfx, filepath.FromSlash(sfx))
+ ok := mx.VFS.Poke(dir).Ls().Some(goutil.PkgNdFilter)
+ if ok {
+ return &PkgPath{Dir: dir, ImportPath: importPath}
+ }
+ return nil
+ }
+
+ // if we're importing a self/sub-module or local replacement package don't search anywhere else
+ if md.Dir != "" {
+ if p := lsPkg(md.Dir, md.SubPkg); p != nil {
+ return p, nil
+ }
+ return nil, fmt.Errorf("cannot find local/replacement package `%s` in `%s`", importPath, md.Dir)
+ }
+
+ // local vendor first to support un-imaginable use-cases like editing third-party packages.
+ // we don't care about BS like `-mod=vendor`
+ searchLocalVendor := func() *PkgPath {
+ return lsPkg(filepath.Join(mf.Dir, "vendor"), importPath)
+ }
+ mpath, err := module.EncodePath(md.ModPath)
+ if err != nil {
+ return nil, err
+ }
+ grSrc := mx.VFS.Poke(bctx.GOROOT).Poke("src")
+ roots := map[string]bool{grSrc.Path(): true}
+ searchPkgMod := func() *PkgPath {
+ gopath := mgutil.PathList(bctx.GOPATH)
+ pkgMod := filepath.FromSlash("pkg/mod/" + mpath + "@" + md.Version)
+ for _, gp := range gopath {
+ roots[mx.VFS.Poke(gp).Poke("src").Path()] = true
+ if p := lsPkg(filepath.Join(gp, pkgMod), md.SubPkg); p != nil {
+ return p
+ }
+ }
+ return nil
+ }
+ // check all the parent vendor dirs. we check mf.Dir separately
+ searchOtherVendors := func() *PkgPath {
+ for sd := mx.VFS.Poke(mf.Dir).Parent(); !sd.IsRoot(); sd = sd.Parent() {
+ dir := sd.Path()
+ if roots[dir] {
+ break
+ }
+ if p := lsPkg(filepath.Join(dir, "vendor"), importPath); p != nil {
+ return p
+ }
+ }
+ return nil
+ }
+ // check GOROOT/vendor to support the `std` module
+ searchGrVendor := func() *PkgPath {
+ return lsPkg(filepath.Join(bctx.GOROOT, "src", "vendor"), importPath)
+ }
+ search := []func() *PkgPath{
+ searchLocalVendor,
+ searchPkgMod,
+ searchOtherVendors,
+ searchGrVendor,
+ }
+ if !strings.Contains(strings.SplitN(importPath, "/", 2)[0], ".") {
+ // apparently import paths without dots are reserved for the stdlib
+ // checking first also avoids the many misses for each stdlib pkg
+ search = []func() *PkgPath{
+ searchGrVendor,
+ searchLocalVendor,
+ searchPkgMod,
+ searchOtherVendors,
+ }
+ }
+ for _, f := range search {
+ if p := f(); p != nil {
+ return p, nil
+ }
+ }
+ if md.oldPath != "" {
+ return nil, fmt.Errorf("cannot find `%s` replacement `%s` using `%s`", importPath, md.ModPath, mf.Path)
+ }
+ return nil, fmt.Errorf("cannot find `%s` using `%s`", importPath, mf.Path)
+}
+
+func loadModSumNd(mx *mg.Ctx, dirNd *vfs.Node) (*modFile, error) {
+ type K struct{}
+ type V struct {
+ mf *modFile
+ e error
+ }
+ v := dirNd.ReadMemo(K{}, func() interface{} {
+ v := V{}
+ v.mf, v.e = loadModSum(mx, dirNd.Path())
+ return v
+ }).(V)
+ return v.mf, v.e
+}
+
+func loadModSum(mx *mg.Ctx, dir string) (*modFile, error) {
+ gomod := filepath.Join(dir, "go.mod")
+ modSrc, err := mx.VFS.ReadBlob(gomod).ReadFile()
+ if err != nil {
+ return nil, err
+ }
+ mf := &modFile{
+ Dir: dir,
+ Path: gomod,
+ Deps: map[string]modDep{},
+ }
+ mf.File, err = modfile.Parse(gomod, modSrc, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, r := range mf.File.Require {
+ mf.Deps[r.Mod.Path] = modDep{
+ ModPath: r.Mod.Path,
+ Version: r.Mod.Version,
+ }
+ }
+
+ for _, r := range mf.File.Replace {
+ md := modDep{
+ oldPath: r.Old.Path,
+ ModPath: r.New.Path,
+ Version: r.New.Version,
+ }
+ if dir := r.New.Path; modfile.IsDirectoryPath(dir) {
+ if !filepath.IsAbs(dir) {
+ dir = filepath.Join(mf.Dir, dir)
+ }
+ nd := mx.VFS.Poke(dir)
+ // replacement isn't valid unless the go.mod file exists
+ // TODO: should we ignore this rule? I don't know what problem it solves
+ // but it makes is more annoying to just point a module at a local directory
+ if nd.Poke("go.mod").IsFile() {
+ md.Dir = nd.Path()
+ // the path is a filesystem path, not an import path
+ md.ModPath = r.Old.Path
+ }
+ }
+ mf.Deps[r.Old.Path] = md
+ }
+
+ self := mf.File.Module.Mod
+ mf.Deps[self.Path] = modDep{
+ Dir: mf.Dir,
+ ModPath: self.Path,
+ Version: self.Version,
+ }
+
+ gosum := filepath.Join(dir, "go.sum")
+ sumSrc, err := mx.VFS.ReadBlob(gosum).ReadFile()
+ if err != nil {
+ return mf, nil
+ }
+ for _, ln := range bytes.Split(sumSrc, []byte{'\n'}) {
+ fields := bytes.Fields(ln)
+ if len(fields) != 3 {
+ continue
+ }
+ md := modDep{ModPath: string(fields[0]), Version: string(fields[1])}
+ if !semver.IsValid(md.Version) {
+ continue
+ }
+ if _, exists := mf.Deps[md.ModPath]; exists {
+ continue
+ }
+ mf.Deps[md.ModPath] = md
+ }
+ return mf, nil
+}
diff --git a/src/margo.sh/golang/gopkg/pkg.go b/src/margo.sh/golang/gopkg/pkg.go
new file mode 100644
index 00000000..879c307c
--- /dev/null
+++ b/src/margo.sh/golang/gopkg/pkg.go
@@ -0,0 +1,71 @@
+package gopkg
+
+import (
+ "path/filepath"
+ "strings"
+)
+
+type Pkg struct {
+ ImportablePfx string
+
+ // The following fields are a subset of build.Package
+ Dir string
+ Name string
+ ImportPath string
+ Goroot bool
+}
+
+type PkgPath struct {
+ Dir string
+ ImportPath string
+ Goroot bool
+ Mod *ModPath
+}
+
+var (
+ internalSepDir = filepath.FromSlash("/internal/")
+ vendorSepDir = filepath.FromSlash("/vendor/")
+)
+
+func (p *Pkg) IsCommand() bool { return p.Name == "main" }
+
+func (p *Pkg) Importable(srcDir string) bool {
+ if p.ImportPath == "." || p.IsCommand() {
+ return false
+ }
+ if s := p.ImportablePfx; s != "" {
+ return strings.HasPrefix(srcDir, s) || srcDir == s[:len(s)-1]
+ }
+ if p.Dir == srcDir {
+ return false
+ }
+ return true
+}
+
+func (p *Pkg) dirPfx(dir, slash string) string {
+ if i := strings.LastIndex(dir, slash); i >= 0 {
+ return filepath.Dir(dir[:i+len(slash)-1]) + string(filepath.Separator)
+ }
+ if d := strings.TrimSuffix(dir, slash[:len(slash)-1]); d != dir {
+ return filepath.Dir(d) + string(filepath.Separator)
+ }
+ return ""
+}
+
+func (p *Pkg) Finalize() {
+ p.Dir = filepath.Clean(p.Dir)
+ // does importing from the 'vendor' and 'internal' dirs work the same?
+ // who cares... I'm the supreme, I make the rules in this outpost...
+ p.ImportablePfx = p.dirPfx(p.Dir, internalSepDir)
+ if p.ImportablePfx == "" {
+ p.ImportablePfx = p.dirPfx(p.Dir, vendorSepDir)
+ }
+
+ s := p.ImportPath
+ switch i := strings.LastIndex(s, "/vendor/"); {
+ case i >= 0:
+ p.ImportPath = s[i+len("/vendor/"):]
+ case strings.HasPrefix(s, "vendor/"):
+ p.ImportPath = s[len("vendor/"):]
+ }
+}
diff --git a/src/margo.sh/golang/gotest.go b/src/margo.sh/golang/gotest.go
new file mode 100644
index 00000000..f0e4fac2
--- /dev/null
+++ b/src/margo.sh/golang/gotest.go
@@ -0,0 +1,178 @@
+package golang
+
+import (
+ "go/ast"
+ "margo.sh/golang/goutil"
+ "margo.sh/mg"
+ "path/filepath"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type TestCmds struct {
+ mg.ReducerType
+
+ // BenchArgs is a list of extra arguments to pass to `go test` for benchmarks
+ // these are in addition to the usual `-test.run` and `-test.bench` args
+ BenchArgs []string
+
+ // TestArgs is a list of extra arguments to pass to `go test` for tests and examples
+ // these are in addition to the usual `-test.run` arg
+ TestArgs []string
+}
+
+func (tc *TestCmds) RCond(mx *mg.Ctx) bool {
+ return mx.LangIs(goutil.Langs...)
+}
+
+func (tc *TestCmds) Reduce(mx *mg.Ctx) *mg.State {
+ switch act := mx.Action.(type) {
+ case mg.QueryTestCmds:
+ return tc.queryTestCmds(mx)
+ case mg.RunCmd:
+ return tc.actuateCmd(mx, act)
+ default:
+ return mx.State
+ }
+}
+
+func (tc *TestCmds) actuateCmd(mx *mg.Ctx, rc mg.RunCmd) *mg.State {
+ if rc.Name != mg.RcActuate {
+ return mx.State
+ }
+
+ cx := NewViewCursorCtx(mx)
+ if !cx.IsTestFile {
+ return mx.State
+ }
+
+ name, pfx, _, ok := tc.splitName(cx.FuncName())
+ if !ok {
+ return mx.State
+ }
+
+ pat := "^" + name
+ switch rc.StringFlag("button", "left") {
+ case "left":
+ pat += "$"
+ case "right":
+ pat += ".*"
+ default:
+ return mx.State
+ }
+
+ args := tc.pfxArgs(pfx, pat)
+ return mx.AddBuiltinCmds(mg.BuiltinCmd{
+ Name: mg.RcActuate,
+ Run: func(cx *mg.CmdCtx) *mg.State {
+ return cx.WithCmd("go", args...).Run()
+ },
+ })
+}
+
+func (tc *TestCmds) queryTestCmds(mx *mg.Ctx) *mg.State {
+ dir := mx.View.Dir()
+ bld := BuildContext(mx)
+ pkg, err := bld.ImportDir(dir, 0)
+ if pkg == nil {
+ mx.Log.Println("TestCmds:", err)
+ return mx.State
+ }
+
+ cmds := map[string]mg.UserCmdList{}
+ for _, names := range [][]string{pkg.TestGoFiles, pkg.XTestGoFiles} {
+ for _, nm := range names {
+ tc.process(mx, cmds, filepath.Join(dir, nm))
+ }
+ }
+
+ numCmds := len(cmds["Test"]) + len(cmds["Benchmark"]) + len(cmds["Exampe"])
+ if numCmds == 0 {
+ mx.Log.Println("TestCmds: no Test, Benchmarks or Examples found")
+ return mx.State
+ }
+
+ cl := make(mg.UserCmdList, 0, 4+numCmds)
+ cl = append(cl, mg.UserCmd{
+ Name: "go",
+ Args: tc.testArgs("."),
+ Title: "Run all Tests and Examples",
+ })
+ for _, pfx := range []string{"Test", "Benchmark", "Example"} {
+ if len(cmds[pfx]) == 0 {
+ continue
+ }
+
+ cmd := mg.UserCmd{
+ Name: "go",
+ Title: "Run all " + pfx + "s",
+ }
+ if pfx == "Benchmark" {
+ cmd.Args = tc.benchArgs(".")
+ } else {
+ cmd.Args = tc.testArgs(pfx + ".+")
+ }
+ cl = append(cl, cmd)
+ }
+ for _, pfx := range []string{"Test", "Benchmark", "Example"} {
+ l := cmds[pfx]
+ sort.Sort(l)
+ cl = append(cl, l...)
+ }
+ return mx.AddUserCmds(cl...)
+}
+
+func (tc *TestCmds) benchArgs(pat string) []string {
+ return append([]string{"test", "-test.run=none", "-test.bench=" + pat}, tc.BenchArgs...)
+}
+
+func (tc *TestCmds) pfxArgs(pfx, pat string) []string {
+ if pfx == "Benchmark" {
+ return tc.benchArgs(pat)
+ }
+ return tc.testArgs(pat)
+}
+
+func (tc *TestCmds) testArgs(pat string) []string {
+ return append([]string{"test", "-test.run=" + pat}, tc.TestArgs...)
+}
+
+func (tc *TestCmds) process(mx *mg.Ctx, cmds map[string]mg.UserCmdList, fn string) {
+ for _, d := range ParseFile(mx, fn, nil).AstFile.Decls {
+ fun, ok := d.(*ast.FuncDecl)
+ if ok && fun.Name != nil {
+ tc.processIdent(cmds, fun.Name)
+ }
+ }
+}
+
+func (tc *TestCmds) processIdent(cmds map[string]mg.UserCmdList, id *ast.Ident) {
+ name, pfx, sfx, ok := tc.splitName(id.Name)
+ if !ok {
+ return
+ }
+ cmds[pfx] = append(cmds[pfx], mg.UserCmd{
+ Name: "go",
+ Args: tc.pfxArgs(pfx, "^"+name+"$"),
+ Title: pfx + ": " + sfx,
+ })
+}
+
+func (tc *TestCmds) splitName(nm string) (name, pfx, sfx string, ok bool) {
+ if nm == "" {
+ return "", "", "", false
+ }
+ for _, pfx := range []string{"Test", "Benchmark", "Example"} {
+ if nm == pfx {
+ return nm, nm, "", true
+ }
+ sfx := strings.TrimPrefix(nm, pfx)
+ if sfx != nm {
+ r, _ := utf8.DecodeRuneInString(sfx)
+ return nm, pfx, sfx, unicode.IsUpper(r)
+ }
+ }
+ return "", "", "", false
+}
diff --git a/src/margo.sh/golang/goutil/goutil.go b/src/margo.sh/golang/goutil/goutil.go
new file mode 100644
index 00000000..5495c2c0
--- /dev/null
+++ b/src/margo.sh/golang/goutil/goutil.go
@@ -0,0 +1,197 @@
+package goutil
+
+import (
+ "go/ast"
+ "go/build"
+ "go/token"
+ "io"
+ "margo.sh/mg"
+ "margo.sh/vfs"
+ yotsuba "margo.sh/why_would_you_make_yotsuba_cry"
+ "os"
+ "path/filepath"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+var (
+ // Langs is the list of all Go related langs
+ Langs = []mg.Lang{mg.Go, mg.GoMod, mg.GoSum}
+)
+
+type SrcDirKey struct {
+ GOROOT, GOPATH, SrcDir string
+}
+
+func MakeSrcDirKey(bctx *build.Context, srcDir string) SrcDirKey {
+ return SrcDirKey{bctx.GOROOT, bctx.GOPATH, filepath.Clean(srcDir)}
+}
+
+func BuildContextWithoutCallbacks(mx *mg.Ctx) *build.Context {
+ c := build.Default
+ c.GOARCH = mx.Env.Get("GOARCH", c.GOARCH)
+ c.GOOS = mx.Env.Get("GOOS", c.GOOS)
+ // these must be passed by the client
+ // if we leave them unset, there's a risk something will end up using os.Getenv(...)
+ logUndefined := func(k string) string {
+ v := mx.Env[k]
+ if v == "" {
+ v = k + "-is-not-defined"
+ mx.Log.Println(v)
+ }
+ return v
+ }
+ c.GOROOT = logUndefined("GOROOT")
+ c.GOPATH = logUndefined("GOPATH")
+ return &c
+}
+
+func BuildContext(mx *mg.Ctx) *build.Context {
+ c := BuildContextWithoutCallbacks(mx)
+ c.ReadDir = mx.VFS.ReadDir
+ c.IsDir = mx.VFS.IsDir
+ c.HasSubdir = HasImportPath // rage against the ~~machine~~symlinks...
+ c.OpenFile = func(p string) (io.ReadCloser, error) {
+ if v := mx.View; v != nil && p == v.Path {
+ return v.Open()
+ }
+ if v := mx.View; v != nil && v.Path != "" && filepath.Dir(v.Path) == filepath.Dir(p) {
+ if b := mx.VFS.ReadBlob(p); b != nil {
+ return b.OpenFile()
+ }
+ }
+ if b := mx.VFS.PeekBlob(p); b != nil {
+ return b.OpenFile()
+ }
+ return os.Open(p)
+ }
+ return c
+}
+
+// HasImportPath reports whether dir is lexically a subdirectory of root.
+// If so, it sets importPath to a slash-separated path that
+// can be joined to root to produce a path equivalent to dir.
+//
+// HasImportPath is an implementation of go/build.Context.HasSubdir
+func HasImportPath(root, dir string) (importPath string, ok bool) {
+ root = filepath.Clean(root)
+ dir = filepath.Clean(dir)
+ if !strings.HasPrefix(dir, root) || root == dir {
+ return "", false
+ }
+ importPath = filepath.ToSlash(dir[len(root):])
+ if !strings.HasPrefix(importPath, string(filepath.Separator)) {
+ return "", false
+ }
+ return importPath[1:], true
+}
+
+func PathList(p string) []string {
+ l := []string{}
+ for _, s := range strings.Split(p, string(filepath.ListSeparator)) {
+ s = filepath.Clean(s)
+ if filepath.IsAbs(s) {
+ l = append(l, s)
+ }
+ }
+ return l
+}
+
+func NodeEnclosesPos(node ast.Node, pos token.Pos) bool {
+ if yotsuba.IsNil(node) {
+ return false
+ }
+ if np := node.Pos(); !np.IsValid() || pos <= np {
+ return false
+ }
+
+ ne := node.End()
+ var cmnt *ast.Comment
+ switch x := node.(type) {
+ case *ast.Comment:
+ cmnt = x
+ case *ast.CommentGroup:
+ if l := x.List; len(l) != 0 {
+ cmnt = l[len(l)-1]
+ }
+ }
+ if cmnt != nil && strings.HasPrefix(cmnt.Text, "//") {
+ // line comments' end don't include the newline
+ ne++
+ }
+ return pos < ne || !ne.IsValid()
+}
+
+type PosEnd struct {
+ P token.Pos
+ E token.Pos
+}
+
+func (pe PosEnd) Pos() token.Pos {
+ return pe.P
+}
+
+func (pe PosEnd) End() token.Pos {
+ return pe.E
+}
+
+func IsLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+func PkgNdFilter(nd *vfs.Node) bool {
+ nm := nd.Name()
+ return nm[0] != '.' && nm[0] != '_' &&
+ strings.HasSuffix(nm, ".go") &&
+ // there's no such thing as a ~~killer videotape~~go package with only test files
+ !strings.HasSuffix(nm, "_test.go")
+}
+
+func IsPkgDirNd(nd *vfs.Node) bool {
+ return nd.Ls().Some(PkgNdFilter)
+}
+
+func IsPkgDir(dir string) bool {
+ return IsPkgDirNd(mg.VFS.Poke(dir))
+}
+
+func ClosestPkgDirNd(nd *vfs.Node) *vfs.Node {
+ return nd.Closest(IsPkgDirNd)
+}
+
+func ClosestPkgDir(dir string) *vfs.Node {
+ return mg.VFS.Closest(dir, IsPkgDirNd)
+}
+
+// DedentCompletion Dedents s then trims preceding and succeeding empty lines.
+func DedentCompletion(s string) string {
+ return strings.TrimFunc(Dedent(s), func(r rune) bool {
+ return r == '\n' || r == '\r'
+ })
+}
+
+// Dedent un-indents tab-indented lines is s.
+func Dedent(s string) string {
+ lines := strings.Split(s, "\n")
+ trim := func(s string) int {
+ i := 0
+ for i < len(s) && s[i] == '\t' {
+ i++
+ }
+ return i
+ }
+ max := 0
+ for i, s := range lines {
+ cut := trim(s)
+ switch {
+ case max == 0:
+ max = cut
+ case cut > max:
+ cut = max
+ }
+ lines[i] = s[cut:]
+ }
+ return strings.Join(lines, "\n")
+
+}
diff --git a/src/margo.sh/golang/goutil/goutil_test.go b/src/margo.sh/golang/goutil/goutil_test.go
new file mode 100644
index 00000000..1edf2c78
--- /dev/null
+++ b/src/margo.sh/golang/goutil/goutil_test.go
@@ -0,0 +1,108 @@
+// +build !windows
+
+package goutil
+
+import (
+ "go/build"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+var (
+ escTbSp = strings.NewReplacer("\t", "", " ", "")
+ unescTbSp = strings.NewReplacer("", "\t", "", " ")
+)
+
+func TestDedent(t *testing.T) {
+ cases := []struct{ src, want string }{
+ {
+ `
+
+ // empty_lines_at_the_start
+
+ type_T_struct_{
+ //_space_alignment
+ S_string
+ }
+
+ //_space_before
+
+ //_line_with_extra_indentation
+
+//_line_with_tab_at_the_end
+//_line_with_space_at_the_end
+
+ //_empty_lines_after
+
+
+`,
+ `
+
+ // empty_lines_at_the_start
+
+type_T_struct_{
+ //_space_alignment
+ S_string
+}
+
+ //_space_before
+
+ //_line_with_extra_indentation
+
+//_line_with_tab_at_the_end
+//_line_with_space_at_the_end
+
+//_empty_lines_after
+
+
+`,
+ },
+ }
+ for _, c := range cases {
+ got := Dedent(unescTbSp.Replace(c.src))
+ if got != unescTbSp.Replace(c.want) {
+ t.Errorf("got `%s`, want `%s`", escTbSp.Replace(got), escTbSp.Replace(c.want))
+ }
+ }
+}
+
+func TestDedentCompletion(t *testing.T) {
+ cases := []struct{ src, want string }{
+ {
+ `
+ hello world
+
+ `,
+ ` hello world`,
+ },
+ }
+ for _, c := range cases {
+ got := DedentCompletion(c.src)
+ if got != c.want {
+ t.Errorf("got `%s`, want `%s`", got, c.want)
+ }
+ }
+}
+
+func TestHasImportPath(t *testing.T) {
+ root := build.Default.GOROOT
+ src := filepath.Join(root, "src")
+ cmd := filepath.Join(root, "cmd")
+ if p, _ := HasImportPath(src, filepath.Join(src, "p", "k", "g")); p != "p/k/g" {
+ t.Fatalf("Expected `%s`, got `%s`\n", "p/k/g", p)
+ }
+ if p, _ := HasImportPath(src, cmd); p != "" {
+ t.Fatalf("Expected `%s`, got `%s`\n", "", p)
+ }
+}
+
+func BenchmarkHasImportPath(b *testing.B) {
+ root := build.Default.GOROOT
+ src := filepath.Join(root, "src")
+ cmd := filepath.Join(root, "cmd")
+ for i := 0; i < b.N; i++ {
+ HasImportPath(root, src)
+ HasImportPath(src, cmd)
+ }
+}
diff --git a/src/margo.sh/golang/goutil/mod.go b/src/margo.sh/golang/goutil/mod.go
new file mode 100644
index 00000000..b042c28b
--- /dev/null
+++ b/src/margo.sh/golang/goutil/mod.go
@@ -0,0 +1,62 @@
+package goutil
+
+import (
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ "margo.sh/vfs"
+ "path/filepath"
+)
+
+const (
+ ModEnvVar = "GO111MODULE"
+)
+
+// ModEnabled returns true of Go modules are enabled in srcDir
+func ModEnabled(mx *mg.Ctx, srcDir string) bool {
+ // - If on Go <= go1.12 and inside GOPATH — defaults to old 1.10 behavior (ignoring modules)
+ // - Outside GOPATH while inside a file tree with a go.mod — defaults to modules behavior
+ // - GO111MODULE environment variable:
+ // unset or auto — default behavior above
+ // on — force module support on regardless of directory location
+ // off — force module support off regardless of directory location
+
+ switch mx.Env.Getenv(ModEnvVar, "") {
+ case "on":
+ return true
+ case "off":
+ return false
+ }
+
+ bctx := BuildContext(mx)
+ type K struct{ SrcDirKey }
+ k := K{MakeSrcDirKey(bctx, srcDir)}
+ if v, ok := mx.Get(k).(bool); ok {
+ return v
+ }
+
+ if v := Version; v.Major <= 1 && v.Minor <= 12 {
+ for _, gp := range PathList(bctx.GOPATH) {
+ p := filepath.Join(gp, "src")
+ if mgutil.IsParentDir(p, k.SrcDir) || k.SrcDir == p {
+ mx.Put(k, false)
+ return false
+ }
+ }
+ }
+
+ modFileExists := ModFileNd(mx, k.SrcDir) != nil
+ mx.Put(k, modFileExists)
+ return modFileExists
+}
+
+func ModFileNd(mx *mg.Ctx, srcDir string) *vfs.Node {
+ bctx := BuildContext(mx)
+ type K struct{ SrcDirKey }
+ k := K{MakeSrcDirKey(bctx, srcDir)}
+ if v, ok := mx.Get(k).(*vfs.Node); ok {
+ return v
+ }
+ nd, _, _ := mx.VFS.Poke(k.SrcDir).Locate("go.mod")
+ mx.Put(k, nd)
+ return nd
+}
diff --git a/src/margo.sh/golang/goutil/parse.go b/src/margo.sh/golang/goutil/parse.go
new file mode 100644
index 00000000..72228871
--- /dev/null
+++ b/src/margo.sh/golang/goutil/parse.go
@@ -0,0 +1,78 @@
+package goutil
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "margo.sh/mg"
+)
+
+const (
+ ParseFileMode = parser.ParseComments | parser.DeclarationErrors | parser.AllErrors
+)
+
+var (
+ NilPkgName = "_"
+ NilFset = token.NewFileSet()
+ NilPkgSrc = "\n\npackage " + NilPkgName + "\n"
+ NilAstFile, _ = parser.ParseFile(NilFset, "", NilPkgSrc, 0)
+ NilTokenFile = NilFset.File(NilAstFile.Pos())
+)
+
+type ParsedFile struct {
+ Fset *token.FileSet
+ AstFile *ast.File
+ TokenFile *token.File
+ Error error
+ ErrorList scanner.ErrorList
+}
+
+func ParseFile(mx *mg.Ctx, fn string, src []byte) *ParsedFile {
+ return ParseFileWithMode(mx, fn, src, ParseFileMode)
+}
+
+func ParseFileWithMode(mx *mg.Ctx, fn string, src []byte, mode parser.Mode) *ParsedFile {
+ mx.Profile.Push("ParseFileWithMode").Pop()
+
+ if len(src) == 0 {
+ var err error
+ if fn != "" {
+ src, err = mx.VFS.ReadBlob(fn).ReadFile()
+ }
+ if len(src) == 0 {
+ return &ParsedFile{
+ Fset: NilFset,
+ AstFile: NilAstFile,
+ TokenFile: NilTokenFile,
+ Error: err,
+ }
+ }
+ }
+
+ type key struct {
+ hash string
+ mode parser.Mode
+ }
+ k := key{hash: mg.SrcHash(src), mode: mode}
+ if pf, ok := mx.Get(k).(*ParsedFile); ok {
+ return pf
+ }
+
+ _, memo, _ := mx.VFS.Memo(fn)
+ pf := memo.Read(k, func() interface{} {
+ pf := &ParsedFile{Fset: token.NewFileSet()}
+ pf.AstFile, pf.Error = parser.ParseFile(pf.Fset, fn, src, mode)
+ if pf.AstFile == nil {
+ pf.AstFile = NilAstFile
+ }
+ pf.TokenFile = pf.Fset.File(pf.AstFile.Pos())
+ if pf.TokenFile == nil {
+ pf.TokenFile = NilTokenFile
+ }
+ pf.ErrorList, _ = pf.Error.(scanner.ErrorList)
+ return pf
+ }).(*ParsedFile)
+ mx.Put(k, pf)
+ return pf
+}
diff --git a/src/disposa.blue/margo/golang/version.go b/src/margo.sh/golang/goutil/version.go
similarity index 96%
rename from src/disposa.blue/margo/golang/version.go
rename to src/margo.sh/golang/goutil/version.go
index 264ff447..a70c732f 100644
--- a/src/disposa.blue/margo/golang/version.go
+++ b/src/margo.sh/golang/goutil/version.go
@@ -1,4 +1,4 @@
-package golang
+package goutil
import (
"go/build"
diff --git a/src/margo.sh/golang/guru.go b/src/margo.sh/golang/guru.go
new file mode 100644
index 00000000..510323eb
--- /dev/null
+++ b/src/margo.sh/golang/guru.go
@@ -0,0 +1,199 @@
+package golang
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "go/ast"
+ "io"
+ "io/ioutil"
+ "margo.sh/cmdpkg/margo/cmdrunner"
+ "margo.sh/mg"
+ yotsuba "margo.sh/why_would_you_make_yotsuba_cry"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+)
+
+var (
+ fnPosPat = regexp.MustCompile(`^(.+):(\d+):(\d+)$`)
+)
+
+type Guru struct {
+ mg.ReducerType
+}
+
+func (g *Guru) RCond(mx *mg.Ctx) bool {
+ return mx.LangIs(mg.Go)
+}
+
+func (g *Guru) RMount(mx *mg.Ctx) {
+ go cmdrunner.Cmd{
+ Name: "go",
+ Args: []string{"install", "margo.sh/vendor/golang.org/x/tools/cmd/guru"},
+ Env: yotsuba.AgentBuildEnv,
+ OutToErr: true,
+ }.Run()
+}
+
+func (g *Guru) Reduce(mx *mg.Ctx) *mg.State {
+ switch act := mx.Action.(type) {
+ case mg.QueryUserCmds:
+ return mx.AddUserCmds(
+ mg.UserCmd{
+ Title: "Guru Definition",
+ Name: "guru.definition",
+ Desc: "show declaration of selected identifier",
+ },
+ )
+ case mg.RunCmd:
+ return g.runCmd(mx, act)
+ default:
+ return mx.State
+ }
+}
+
+func (g *Guru) runCmd(mx *mg.Ctx, rc mg.RunCmd) *mg.State {
+ if rc.Name == "goto.definition" || rc.Name == "guru.definition" {
+ return mx.AddBuiltinCmds(mg.BuiltinCmd{Name: rc.Name, Run: g.runDef})
+ }
+
+ if rc.Name != mg.RcActuate || rc.StringFlag("button", "left") != "left" {
+ return mx.State
+ }
+
+ cx := NewViewCursorCtx(mx)
+ var onId *ast.Ident
+ var onSel *ast.SelectorExpr
+ if !cx.Set(&onId) && !cx.Set(&onSel) {
+ // we're not on a name, nothing to look for
+ return mx.State
+ }
+ // we're on a func decl name, we're already at the definition
+ if nm, _ := cx.FuncDeclName(); nm != "" {
+ return mx.State
+ }
+
+ return mx.AddBuiltinCmds(mg.BuiltinCmd{Name: rc.Name, Run: g.runDef})
+}
+
+func (g *Guru) runDef(cx *mg.CmdCtx) *mg.State {
+ go g.definition(cx)
+ return cx.State
+}
+
+func (g *Guru) definition(bx *mg.CmdCtx) {
+ defer bx.Output.Close()
+ defer bx.Begin(mg.Task{Title: "guru definition", ShowNow: true}).Done()
+
+ v := bx.View
+ dir := v.Dir()
+ fn := v.Filename()
+
+ if v.Path == "" {
+ tmpDir, err := ioutil.TempDir("", "guru")
+ if err == nil {
+ defer os.RemoveAll(tmpDir)
+ fn = filepath.Join(tmpDir, v.Name)
+ src, _ := v.ReadAll()
+ ioutil.WriteFile(fn, src, 0600)
+ }
+ }
+
+ cmd := exec.Command(
+ "guru",
+ "-json",
+ "-tags", g.wasmTags(bx.Ctx),
+ "-modified",
+ "definition",
+ fmt.Sprintf("%s:#%d", fn, v.Pos),
+ )
+ cmd.Dir = dir
+ buf := &bytes.Buffer{}
+ cmd.Stdout = buf
+ cmd.Stderr = bx.Output
+ cmd.Env = bx.Env.Environ()
+ if v.Dirty {
+ src, _ := v.ReadAll()
+ hdr := &bytes.Buffer{}
+ fmt.Fprintf(hdr, "%s\n%d\n", fn, len(src))
+ cmd.Stdin = io.MultiReader(hdr, bytes.NewReader(src))
+ }
+
+ if err := cmd.Run(); err != nil {
+ fmt.Fprintln(bx.Output, "Error:", err)
+ return
+ }
+
+ res := struct {
+ ObjPos string `json:"objpos"`
+ }{}
+ if err := json.Unmarshal(buf.Bytes(), &res); err != nil {
+ fmt.Fprintln(bx.Output, "cannot decode guru output:", err)
+ }
+
+ m := fnPosPat.FindStringSubmatch(res.ObjPos)
+ if len(m) != 4 {
+ fmt.Fprintln(bx.Output, "cannot parse guru objpos:", res.ObjPos)
+ return
+ }
+
+ n := func(s string) int {
+ n, _ := strconv.Atoi(s)
+ if n > 0 {
+ return n - 1
+ }
+ return 0
+ }
+
+ fn = m[1]
+ if v.Path == "" && filepath.Base(fn) == v.Name {
+ fn = v.Name
+ }
+ bx.Store.Dispatch(mg.Activate{
+ Path: fn,
+ Row: n(m[2]),
+ Col: n(m[3]),
+ })
+}
+
+func (g *Guru) wasmTags(mx *mg.Ctx) string {
+ tags := "js wasm"
+ sysjs := "syscall/js"
+ v := mx.View
+ src, _ := v.ReadAll()
+ if len(src) == 0 {
+ return ""
+ }
+
+ pf := ParseFile(mx, v.Filename(), src)
+ for _, spec := range pf.AstFile.Imports {
+ p := spec.Path
+ if p == nil {
+ continue
+ }
+ if s, _ := strconv.Unquote(p.Value); s == sysjs {
+ return tags
+ }
+ }
+ if v.Path == "" {
+ // file doesn't exist, so there's no package
+ return ""
+ }
+
+ pkg, _ := BuildContext(mx).ImportDir(mx.View.Dir(), 0)
+ if pkg == nil {
+ return ""
+ }
+ for _, l := range [][]string{pkg.Imports, pkg.TestImports} {
+ for _, s := range l {
+ if s == sysjs {
+ return tags
+ }
+ }
+ }
+
+ return ""
+}
diff --git a/src/margo.sh/golang/imports.go b/src/margo.sh/golang/imports.go
new file mode 100644
index 00000000..1c96043a
--- /dev/null
+++ b/src/margo.sh/golang/imports.go
@@ -0,0 +1,245 @@
+package golang
+
+import (
+ "bytes"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "strconv"
+ "strings"
+)
+
+type impSpec struct {
+ Name string
+ Path string
+}
+
+type impSpecList []impSpec
+
+func (l impSpecList) contains(p impSpec) bool {
+ for _, q := range l {
+ if p == q {
+ return true
+ }
+ }
+ return false
+}
+
+func (l impSpecList) mergeWithSrc(fn string, src []byte) (updatedSrc []byte, mergedImports impSpecList, err error) {
+ // modifying the AST in areas near comments is a losing battle
+ // so we're trying a different strategy:
+ // * `import "C"` is ignored as usual
+ // * if there are no other imports:
+ // insert `import ("P")\n` below the `package` line
+ // * if there is an `import ("X")`:
+ // insert `import ("P";X")`
+ // * if there is an `import "X"`:
+ // insert `import ("P";"X"\n)\n`
+
+ eol := func(src []byte, pos int) int {
+ if i := bytes.IndexByte(src[pos:], '\n'); i >= 0 {
+ return pos + i + 1
+ }
+ return len(src)
+ }
+
+ fset, af, err := parseImportsOnly(fn, src)
+ if err != nil {
+ return nil, nil, err
+ }
+ tf := fset.File(af.Pos())
+ tailPos := eol(src, tf.Offset(af.End()))
+
+ var target *ast.GenDecl
+ skip := map[impSpec]bool{}
+ for _, decl := range af.Decls {
+ decl, ok := decl.(*ast.GenDecl)
+ if !ok || decl.Tok != token.IMPORT {
+ continue
+ }
+ for _, spec := range decl.Specs {
+ spec, ok := spec.(*ast.ImportSpec)
+ if !ok || spec.Path == nil {
+ continue
+ }
+ imp := impSpec{}
+ imp.Path, _ = strconv.Unquote(spec.Path.Value)
+ if spec.Name != nil {
+ imp.Name = spec.Name.Name
+ }
+ skip[imp] = true
+ if imp.Path != "C" && target == nil {
+ target = decl
+ }
+ }
+ }
+
+ out := &bytes.Buffer{}
+ merge := func() {
+ for _, imp := range l {
+ if skip[imp] {
+ continue
+ }
+ skip[imp] = true
+
+ if imp.Name != "" {
+ out.WriteString(imp.Name)
+ }
+ out.WriteString(strconv.Quote(imp.Path))
+ out.WriteByte(';')
+ mergedImports = append(mergedImports, imp)
+ }
+ }
+
+ switch {
+ case target == nil:
+ i := eol(src, tf.Offset(af.Name.End()))
+ out.Write(src[:i])
+ out.WriteString("\nimport (")
+ merge()
+ out.WriteString(")\n")
+ out.Write(src[i:])
+ case target.Lparen > target.TokPos:
+ i := tf.Offset(target.Lparen) + 1
+ out.Write(src[:i])
+ merge()
+ out.Write(src[i:])
+ default:
+ i := tf.Offset(target.TokPos) + len("import")
+ j := eol(src, i)
+ out.Write(src[:i])
+ out.WriteString("(")
+ merge()
+ out.Write(src[i:j])
+ out.WriteString("\n)\n")
+ out.Write(src[j:])
+ }
+
+ fset, af, err = parseImportsOnly(fn, out.Bytes())
+ if err != nil {
+ return nil, nil, err
+ }
+
+ out.Reset()
+ pr := &printer.Config{
+ Tabwidth: 4,
+ Mode: printer.TabIndent | printer.UseSpaces,
+ }
+ if err := pr.Fprint(out, fset, af); err != nil {
+ return nil, nil, err
+ }
+ out.Write(src[tailPos:])
+ return out.Bytes(), mergedImports, nil
+}
+
+func unquote(s string) string {
+ return strings.Trim(s, "\"`")
+}
+
+func quote(s string) string {
+ return `"` + unquote(s) + `"`
+}
+
+func updateImports(fn string, src []byte, add, rem impSpecList) (_ []byte, updated bool) {
+ fset := token.NewFileSet()
+ af, err := parser.ParseFile(fset, fn, src, parser.ImportsOnly|parser.ParseComments)
+ if err != nil || af.Name == nil || !af.End().IsValid() {
+ return src, false
+ }
+ tf := fset.File(af.Pos())
+ ep := tf.Offset(af.End())
+ if i := bytes.IndexByte(src[ep:], '\n'); i >= 0 {
+ // make sure to include the ImportComment
+ ep += i + 1
+ } else {
+ ep = tf.Size()
+ }
+ updateImpSpecs(fset, af, ep, add, rem)
+ buf := &bytes.Buffer{}
+ pr := &printer.Config{Tabwidth: 4, Mode: printer.TabIndent | printer.UseSpaces}
+ if pr.Fprint(buf, fset, af) != nil {
+ return src, false
+ }
+ p, s := buf.Bytes(), src[ep:]
+ if len(s) >= 2 && s[0] == '\n' && s[1] == '\n' {
+ p = bytes.TrimRight(p, "\n")
+ }
+ return append(p, s...), true
+}
+
+func updateImpSpecs(fset *token.FileSet, af *ast.File, ep int, add, rem impSpecList) {
+ var firstImpDecl *ast.GenDecl
+ imports := map[impSpec]bool{}
+ for _, decl := range af.Decls {
+ gdecl, ok := decl.(*ast.GenDecl)
+ if !ok || gdecl.Tok != token.IMPORT {
+ continue
+ }
+ hasC := false
+ i := 0
+ for _, spec := range gdecl.Specs {
+ ispec, ok := spec.(*ast.ImportSpec)
+ if !ok {
+ continue
+ }
+
+ sd := impSpec{Path: unquote(ispec.Path.Value)}
+ if ispec.Name != nil {
+ sd.Name = ispec.Name.String()
+ }
+
+ switch {
+ case sd.Path == "C":
+ hasC = true
+ case rem.contains(sd):
+ if i > 0 {
+ if lspec, ok := gdecl.Specs[i-1].(*ast.ImportSpec); ok {
+ lspec.EndPos = ispec.Pos()
+ }
+ }
+ continue
+ default:
+ imports[sd] = true
+ }
+
+ gdecl.Specs[i] = spec
+ i += 1
+ }
+ gdecl.Specs = gdecl.Specs[:i]
+
+ if !hasC && firstImpDecl == nil {
+ firstImpDecl = gdecl
+ }
+ }
+
+ if len(add) > 0 {
+ if firstImpDecl == nil {
+ tf := fset.File(af.Pos())
+ firstImpDecl = &ast.GenDecl{TokPos: tf.Pos(ep), Tok: token.IMPORT, Lparen: 1}
+ af.Decls = append(af.Decls, firstImpDecl)
+ }
+
+ addSpecs := make([]ast.Spec, 0, len(firstImpDecl.Specs)+len(add))
+ for _, sd := range add {
+ if imports[sd] {
+ continue
+ }
+ imports[sd] = true
+ ispec := &ast.ImportSpec{
+ Path: &ast.BasicLit{Value: quote(sd.Path), Kind: token.STRING},
+ }
+ if sd.Name != "" {
+ ispec.Name = &ast.Ident{Name: sd.Name}
+ }
+ addSpecs = append(addSpecs, ispec)
+ }
+ firstImpDecl.Specs = append(addSpecs, firstImpDecl.Specs...)
+ }
+}
+
+func parseImportsOnly(fn string, src []byte) (*token.FileSet, *ast.File, error) {
+ fset := token.NewFileSet()
+ af, err := parser.ParseFile(fset, fn, src, parser.ParseComments|parser.ImportsOnly)
+ return fset, af, err
+}
diff --git a/src/margo.sh/golang/internal/pkglst/mod.go b/src/margo.sh/golang/internal/pkglst/mod.go
new file mode 100644
index 00000000..49694b8e
--- /dev/null
+++ b/src/margo.sh/golang/internal/pkglst/mod.go
@@ -0,0 +1 @@
+package pkglst
diff --git a/src/margo.sh/golang/internal/pkglst/pkglst.go b/src/margo.sh/golang/internal/pkglst/pkglst.go
new file mode 100644
index 00000000..3ebb0d51
--- /dev/null
+++ b/src/margo.sh/golang/internal/pkglst/pkglst.go
@@ -0,0 +1,148 @@
+package pkglst
+
+import (
+ "margo.sh/golang/gopkg"
+ "margo.sh/mg"
+ "margo.sh/vfs"
+ "path/filepath"
+ "sort"
+ "sync"
+)
+
+type View struct {
+ List []*gopkg.Pkg
+ ByDir map[string]*gopkg.Pkg
+ ByImportPath map[string][]*gopkg.Pkg
+ ByName map[string][]*gopkg.Pkg
+}
+
+func (vu View) shallowClone(lstLen int) View {
+ x := View{
+ ByDir: make(map[string]*gopkg.Pkg, len(vu.ByDir)+lstLen),
+ ByImportPath: make(map[string][]*gopkg.Pkg, len(vu.ByImportPath)+lstLen),
+ ByName: make(map[string][]*gopkg.Pkg, len(vu.ByName)+lstLen),
+ }
+ for k, p := range vu.ByDir {
+ x.ByDir[k] = p
+ }
+ for k, l := range vu.ByImportPath {
+ x.ByImportPath[k] = l[:len(l):len(l)]
+ }
+ for k, l := range vu.ByName {
+ x.ByName[k] = l[:len(l):len(l)]
+ }
+ return x
+}
+
+func (vu View) PruneDir(dir string) View {
+ dir = filepath.Clean(dir)
+ p, exists := vu.ByDir[dir]
+ if !exists {
+ return vu
+ }
+
+ delpkg := func(m map[string][]*gopkg.Pkg, k string) {
+ l := m[k]
+ if len(l) == 0 {
+ return
+ }
+
+ x := make([]*gopkg.Pkg, 0, len(l)-1)
+ for _, p := range l {
+ if p.Dir != dir {
+ x = append(x, p)
+ }
+ }
+
+ if len(x) == 0 {
+ delete(m, k)
+ } else {
+ m[k] = x
+ }
+ }
+ x := vu.shallowClone(0)
+ delete(x.ByDir, p.Dir)
+ delpkg(x.ByImportPath, p.ImportPath)
+ delpkg(x.ByName, p.Name)
+ return x
+}
+
+func (vu View) Add(lst ...*gopkg.Pkg) View {
+ x := vu.shallowClone(len(lst))
+
+ for _, p := range lst {
+ x.ByDir[p.Dir] = p
+ x.ByImportPath[p.ImportPath] = append(x.ByImportPath[p.ImportPath], p)
+ x.ByName[p.Name] = append(x.ByName[p.Name], p)
+ }
+
+ x.List = make([]*gopkg.Pkg, 0, len(x.ByDir))
+ for _, p := range x.ByDir {
+ x.List = append(x.List, p)
+ }
+ sort.Slice(x.List, func(i, j int) bool {
+ a, b := x.List[i], x.List[j]
+ switch {
+ case a.Name != b.Name:
+ return a.Name < b.Name
+ case a.ImportPath != b.ImportPath:
+ return a.ImportPath < b.ImportPath
+ default:
+ return a.Dir < b.Dir
+ }
+ })
+
+ return x
+}
+
+type Cache struct {
+ mu sync.RWMutex
+ view View
+}
+
+func (cc *Cache) Scan(mx *mg.Ctx, dir string) (output []byte, _ error) {
+ lst, out, err := cc.vfsList(mx, dir)
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ cc.view = cc.view.Add(lst...)
+
+ return out, err
+}
+
+func (cc *Cache) PruneDir(dir string) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ cc.view = cc.view.PruneDir(dir)
+}
+
+func (cc *Cache) Add(l ...gopkg.Pkg) {
+ x := make([]*gopkg.Pkg, len(l))
+ for i, p := range l {
+ p.Finalize()
+ x[i] = &p
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ cc.view = cc.view.Add(x...)
+}
+
+func (cc *Cache) View() View {
+ cc.mu.RLock()
+ defer cc.mu.RUnlock()
+
+ return cc.view
+}
+
+func (cc *Cache) vfsList(mx *mg.Ctx, dir string) ([]*gopkg.Pkg, []byte, error) {
+ lst := []*gopkg.Pkg{}
+ mx.VFS.Peek(dir).Branches(func(nd *vfs.Node) {
+ if p, err := gopkg.ImportDirNd(mx, nd); err == nil {
+ lst = append(lst, p)
+ }
+ })
+ return lst, nil, nil
+}
diff --git a/src/disposa.blue/margo/vendor/golang.org/x/crypto/LICENSE b/src/margo.sh/golang/internal/srcimporter/LICENSE
similarity index 100%
rename from src/disposa.blue/margo/vendor/golang.org/x/crypto/LICENSE
rename to src/margo.sh/golang/internal/srcimporter/LICENSE
diff --git a/src/margo.sh/golang/internal/srcimporter/README.md b/src/margo.sh/golang/internal/srcimporter/README.md
new file mode 100644
index 00000000..e7d86edb
--- /dev/null
+++ b/src/margo.sh/golang/internal/srcimporter/README.md
@@ -0,0 +1,2 @@
+This is a fork of go/internal/srcimporter
+
diff --git a/src/margo.sh/golang/internal/srcimporter/srcimporter.go b/src/margo.sh/golang/internal/srcimporter/srcimporter.go
new file mode 100644
index 00000000..5ca011af
--- /dev/null
+++ b/src/margo.sh/golang/internal/srcimporter/srcimporter.go
@@ -0,0 +1,236 @@
+package srcimporter
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "io"
+ "margo.sh/golang/gopkg"
+ "margo.sh/mg"
+ "os"
+ "path/filepath"
+ "sync"
+)
+
+// An Importer provides the context for importing packages from source code.
+type Importer struct {
+ mx *mg.Ctx
+ overlay types.ImporterFrom
+
+ ctxt *build.Context
+ fset *token.FileSet
+ sizes types.Sizes
+ packages map[string]*types.Package
+}
+
+// NewImporter returns a new Importer for the given context, file set, and map
+// of packages. The context is used to resolve import paths to package paths,
+// and identifying the files belonging to the package. If the context provides
+// non-nil file system functions, they are used instead of the regular package
+// os functions. The file set is used to track position information of package
+// files; and imported packages are added to the packages map.
+func New(mx *mg.Ctx, overlay types.ImporterFrom, ctxt *build.Context, fset *token.FileSet, packages map[string]*types.Package) *Importer {
+ return &Importer{
+ mx: mx,
+ overlay: overlay,
+
+ ctxt: ctxt,
+ fset: fset,
+ sizes: types.SizesFor(ctxt.Compiler, ctxt.GOARCH), // uses go/types default if GOARCH not found
+ packages: packages,
+ }
+}
+
+// Importing is a sentinel taking the place in Importer.packages
+// for a package that is in the process of being imported.
+var importing types.Package
+
+// Import(path) is a shortcut for ImportFrom(path, ".", 0).
+func (p *Importer) Import(path string) (*types.Package, error) {
+ return p.ImportFrom(path, ".", 0) // use "." rather than "" (see issue #24441)
+}
+
+// ImportFrom imports the package with the given import path resolved from the given srcDir,
+// adds the new package to the set of packages maintained by the importer, and returns the
+// package. Package path resolution and file system operations are controlled by the context
+// maintained with the importer. The import mode must be zero but is otherwise ignored.
+// Packages that are not comprised entirely of pure Go files may fail to import because the
+// type checker may not be able to determine all exported entities (e.g. due to cgo dependencies).
+func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) {
+ if mode != 0 {
+ panic("non-zero import mode")
+ }
+
+ if abs, err := p.absPath(srcDir); err == nil { // see issue #14282
+ srcDir = abs
+ }
+
+ ctxt := p.ctxt
+ if path == "syscall/js" {
+ c := *p.ctxt
+ c.BuildTags = append(c.BuildTags, "js", "wasm")
+ ctxt = &c
+ }
+
+ pp, err := gopkg.FindPkg(p.mx, path, srcDir)
+ if err != nil {
+ return nil, err
+ }
+
+ bp, err := ctxt.ImportDir(pp.Dir, 0)
+ if err != nil {
+ return nil, err // err may be *build.NoGoError - return as is
+ }
+
+ // package unsafe is known to the type checker
+ if bp.ImportPath == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ // no need to re-import if the package was imported completely before
+ pkg := p.packages[bp.ImportPath]
+ if pkg != nil {
+ if pkg == &importing {
+ return nil, fmt.Errorf("import cycle through package %q", bp.ImportPath)
+ }
+ if !pkg.Complete() {
+ // Package exists but is not complete - we cannot handle this
+ // at the moment since the source importer replaces the package
+ // wholesale rather than augmenting it (see #19337 for details).
+ // Return incomplete package with error (see #16088).
+ return pkg, fmt.Errorf("reimported partially imported package %q", bp.ImportPath)
+ }
+ return pkg, nil
+ }
+
+ p.packages[bp.ImportPath] = &importing
+ defer func() {
+ // clean up in case of error
+ // TODO(gri) Eventually we may want to leave a (possibly empty)
+ // package in the map in all cases (and use that package to
+ // identify cycles). See also issue 16088.
+ if p.packages[bp.ImportPath] == &importing {
+ p.packages[bp.ImportPath] = nil
+ }
+ }()
+
+ var filenames []string
+ filenames = append(filenames, bp.GoFiles...)
+ filenames = append(filenames, bp.CgoFiles...)
+
+ files, err := p.parseFiles(bp.Dir, filenames)
+ if err != nil {
+ return nil, err
+ }
+
+ // type-check package files
+ var firstHardErr error
+ conf := types.Config{
+ IgnoreFuncBodies: true,
+ FakeImportC: true,
+ // continue type-checking after the first error
+ Error: func(err error) {
+ if firstHardErr == nil && !err.(types.Error).Soft {
+ firstHardErr = err
+ }
+ },
+ Importer: p.overlay,
+ Sizes: p.sizes,
+ }
+ pkg, err = conf.Check(bp.ImportPath, p.fset, files, nil)
+ if err != nil {
+ // If there was a hard error it is possibly unsafe
+ // to use the package as it may not be fully populated.
+ // Do not return it (see also #20837, #20855).
+ if firstHardErr != nil {
+ pkg = nil
+ err = firstHardErr // give preference to first hard error over any soft error
+ }
+ return pkg, fmt.Errorf("type-checking package %q failed (%v)", bp.ImportPath, err)
+ }
+ if firstHardErr != nil {
+ // this can only happen if we have a bug in go/types
+ panic("package is not safe yet no error was returned")
+ }
+
+ p.packages[bp.ImportPath] = pkg
+ return pkg, nil
+}
+
+func (p *Importer) parseFiles(dir string, filenames []string) ([]*ast.File, error) {
+ // use build.Context's OpenFile if there is one
+ open := p.ctxt.OpenFile
+ if open == nil {
+ open = func(name string) (io.ReadCloser, error) { return os.Open(name) }
+ }
+
+ files := make([]*ast.File, len(filenames))
+ errors := make([]error, len(filenames))
+
+ var wg sync.WaitGroup
+ wg.Add(len(filenames))
+ for i, filename := range filenames {
+ go func(i int, filepath string) {
+ defer wg.Done()
+ src, err := open(filepath)
+ if err != nil {
+ errors[i] = err // open provides operation and filename in error
+ return
+ }
+ files[i], errors[i] = p.parseFile(p.fset, filepath, src)
+ src.Close() // ignore Close error - parsing may have succeeded which is all we need
+ }(i, p.joinPath(dir, filename))
+ }
+ wg.Wait()
+
+ // if there are errors, return the first one for deterministic results
+ for _, err := range errors {
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return files, nil
+}
+
+func (p *Importer) parseFile(fset *token.FileSet, fn string, src interface{}) (*ast.File, error) {
+ f, err := parser.ParseFile(fset, fn, src, 0)
+ if f == nil {
+ return nil, err
+ }
+
+ // trim func bodies to reduce memory
+ for _, d := range f.Decls {
+ switch d := d.(type) {
+ case *ast.FuncDecl:
+ d.Body = &ast.BlockStmt{}
+ }
+ }
+
+ return f, err
+}
+
+// context-controlled file system operations
+
+func (p *Importer) absPath(path string) (string, error) {
+ // TODO(gri) This should be using p.ctxt.AbsPath which doesn't
+ // exist but probably should. See also issue #14282.
+ return filepath.Abs(path)
+}
+
+func (p *Importer) isAbsPath(path string) bool {
+ if f := p.ctxt.IsAbsPath; f != nil {
+ return f(path)
+ }
+ return filepath.IsAbs(path)
+}
+
+func (p *Importer) joinPath(elem ...string) string {
+ if f := p.ctxt.JoinPath; f != nil {
+ return f(elem...)
+ }
+ return filepath.Join(elem...)
+}
diff --git a/src/margo.sh/golang/lint.go b/src/margo.sh/golang/lint.go
new file mode 100644
index 00000000..33292a63
--- /dev/null
+++ b/src/margo.sh/golang/lint.go
@@ -0,0 +1,79 @@
+package golang
+
+import (
+ "margo.sh/golang/goutil"
+ "margo.sh/mg"
+)
+
+// Linter wraps mg.Linter to restrict its Langs to Go
+//
+// all top-level fields are passed along to the underlying Linter
+type Linter struct {
+ mg.Linter
+
+ Actions []mg.Action
+
+ Name string
+ Args []string
+
+ Tag mg.IssueTag
+ Label string
+ TempDir []string
+}
+
+// RInit syncs top-level fields with the underlying Linter
+func (lt *Linter) RInit(mx *mg.Ctx) {
+ l := <.Linter
+ l.Actions = lt.Actions
+ l.Name = lt.Name
+ l.Args = lt.Args
+ l.Tag = lt.Tag
+ l.Label = lt.Label
+ l.TempDir = lt.TempDir
+
+ lt.Linter.RInit(mx)
+}
+
+// RCond restricts reduction to Go files
+func (lt *Linter) RCond(mx *mg.Ctx) bool {
+ return mx.LangIs(goutil.Langs...) && lt.Linter.RCond(mx)
+}
+
+// GoInstall returns a Linter that runs `go install args...`
+func GoInstall(args ...string) *Linter {
+ return &Linter{
+ Name: "go",
+ Args: append([]string{"install"}, args...),
+ Label: "Go/Install",
+ }
+}
+
+// GoInstallDiscardBinaries returns a Linter that runs `go install args...`
+// it's like GoInstall, but additionally sets GOBIN to a temp directory
+// resulting in all binaries being discarded
+func GoInstallDiscardBinaries(args ...string) *Linter {
+ return &Linter{
+ Name: "go",
+ Args: append([]string{"install"}, args...),
+ Label: "Go/Install",
+ TempDir: []string{"GOBIN"},
+ }
+}
+
+// GoVet returns a Linter that runs `go vet args...`
+func GoVet(args ...string) *Linter {
+ return &Linter{
+ Name: "go",
+ Args: append([]string{"vet"}, args...),
+ Label: "Go/Vet",
+ }
+}
+
+// GoTest returns a Linter that runs `go test args...`
+func GoTest(args ...string) *Linter {
+ return &Linter{
+ Name: "go",
+ Args: append([]string{"test"}, args...),
+ Label: "Go/Test",
+ }
+}
diff --git a/src/margo.sh/golang/margocode.go b/src/margo.sh/golang/margocode.go
new file mode 100644
index 00000000..b5aa207d
--- /dev/null
+++ b/src/margo.sh/golang/margocode.go
@@ -0,0 +1,602 @@
+package golang
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "golang.org/x/tools/go/gcexportdata"
+ "log"
+ "margo.sh/golang/gopkg"
+ "margo.sh/golang/goutil"
+ "margo.sh/golang/internal/pkglst"
+ "margo.sh/golang/internal/srcimporter"
+ "margo.sh/kimporter"
+ "margo.sh/mg"
+ "margo.sh/mgpf"
+ "margo.sh/mgutil"
+ "margo.sh/vfs"
+ "math"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "sort"
+ "strings"
+ "sync"
+ "text/tabwriter"
+ "time"
+)
+
+const (
+ // KimPorter tells the importer to use Kim-Porter to import packages
+ KimPorter ImporterMode = iota
+
+ // SrcImporterOnly tells the importer use source only, with no fall-back
+ SrcImporterOnly
+
+ // BinImporterOnly tells the importer use binary packages only, with no fall-back
+ BinImporterOnly
+
+ // SrcImporterWithFallback tells the importer use source code, then fall-back to a binary package
+ SrcImporterWithFallback
+)
+
+var (
+ mctl *marGocodeCtl
+)
+
+func init() {
+ mctl = newMarGocodeCtl()
+ mg.DefaultReducers.Before(mctl)
+}
+
+type importerFactory func(mx *mg.Ctx, overlay types.ImporterFrom) types.ImporterFrom
+
+// ImporterMode specifies the mode in which the corresponding importer should operate
+type ImporterMode int
+
+type marGocodeCtl struct {
+ mg.ReducerType
+
+ mxQ *mgutil.ChanQ
+
+ mu sync.RWMutex
+ mgcctl MarGocodeCtl
+ pkgs *mgcCache
+ cmdMap map[string]func(*mg.CmdCtx)
+ logs *log.Logger
+
+ plst pkglst.Cache
+}
+
+func (mgc *marGocodeCtl) importerFactories() (newDefaultImporter, newFallbackImporter importerFactory, srcMode bool) {
+ s := mgc.newSrcImporter
+ b := mgc.newBinImporter
+ switch mgc.cfg().ImporterMode {
+ case SrcImporterWithFallback:
+ return s, b, true
+ case SrcImporterOnly:
+ return s, nil, true
+ case BinImporterOnly:
+ return b, nil, false
+ default:
+ panic("unreachable")
+ }
+}
+
+// importPathByName returns an import path whose pkg's name is pkgName
+func (mgc *marGocodeCtl) importPathByName(pkgName, srcDir string) string {
+ pkl := mgc.plst.View().ByName[pkgName]
+ switch len(pkl) {
+ case 0:
+ return ""
+ case 1:
+ if p := pkl[0]; p.Importable(srcDir) {
+ return p.ImportPath
+ }
+ return ""
+ }
+
+ // check the cache
+ // it includes packages the user actually imported
+ // so there's theoretically a better chance of importing the ideal package
+ // in cases where there's a name collision
+ cached := func(pk *gopkg.Pkg) bool {
+ ok := false
+ mgc.pkgs.forEach(func(e mgcCacheEnt) bool {
+ if p := e.Pkg; p.Name() == pk.Name && e.Key.Path == pk.ImportPath {
+ ok = true
+ return false
+ }
+ return true
+ })
+ return ok
+ }
+
+ importPath := ""
+ for _, p := range pkl {
+ if !p.Importable(srcDir) {
+ continue
+ }
+ importPath = p.ImportPath
+ if cached(p) {
+ break
+ }
+ }
+ return importPath
+}
+
+// newSrcImporter returns a new instance a source code importer
+func (mgc *marGocodeCtl) newSrcImporter(mx *mg.Ctx, overlay types.ImporterFrom) types.ImporterFrom {
+ return srcimporter.New(
+ mx,
+ overlay,
+
+ goutil.BuildContextWithoutCallbacks(mx),
+ token.NewFileSet(),
+ map[string]*types.Package{},
+ )
+}
+
+// newBinImporter returns a new instance of a binary package importer for packages compiled by runtime.Compiler
+func (mgc *marGocodeCtl) newBinImporter(mx *mg.Ctx, overlay types.ImporterFrom) types.ImporterFrom {
+ if runtime.Compiler == "gc" {
+ return gcexportdata.NewImporter(token.NewFileSet(), map[string]*types.Package{})
+ }
+ return importer.Default().(types.ImporterFrom)
+}
+
+func (mgc *marGocodeCtl) processQ(mx *mg.Ctx) {
+ defer func() { recover() }()
+
+ switch mx.Action.(type) {
+ case mg.ViewModified, mg.ViewSaved:
+ mgc.autoPruneCache(mx)
+ case mg.ViewActivated:
+ mgc.preloadPackages(mx)
+ }
+}
+
+func (mgc *marGocodeCtl) preloadPackages(mx *mg.Ctx) {
+ cfg := mgc.cfg()
+ if cfg.NoPreloading {
+ return
+ }
+
+ v := mx.View
+ src, _ := v.ReadAll()
+ if len(src) == 0 {
+ return
+ }
+
+ defer mx.Begin(mg.Task{Title: "Preloading packages in " + v.ShortFn(mx.Env)}).Done()
+
+ fset := token.NewFileSet()
+ af, _ := parser.ParseFile(fset, v.Filename(), src, parser.ImportsOnly)
+ if af == nil || len(af.Imports) == 0 {
+ return
+ }
+
+ var importFrom func(string, string, types.ImportMode) (*types.Package, error)
+ if cfg.ImporterMode == KimPorter {
+ importFrom = kimporter.New(mx, nil).ImportFrom
+ } else {
+ importFrom = mgc.newGcSuggest(mx).imp.ImportFrom
+ }
+
+ dir := v.Dir()
+ for _, spec := range af.Imports {
+ importFrom(unquote(spec.Path.Value), dir, 0)
+ }
+}
+
+func (mgc *marGocodeCtl) autoPruneCache(mx *mg.Ctx) {
+ pkgInf, err := mgc.pkgInfo(mx, ".", mx.View.Dir())
+ if err == nil {
+ for _, source := range []bool{true, false} {
+ mgc.pkgs.del(pkgInf.cacheKey(source))
+ }
+ // TODO: should we prune the plst?
+ // we only need to do anything if the pkg is deleted or its name changes
+ // both cases are rare and we would need to reload it somehow
+ }
+
+ dpr := mgc.cfg().DebugPrune
+
+ if dpr != nil {
+ mgc.pkgs.forEach(func(e mgcCacheEnt) bool {
+ if dpr(e.Pkg) {
+ mgc.pkgs.del(e.Key)
+ }
+ return true
+ })
+ }
+}
+
+func (mgc *marGocodeCtl) cfg() MarGocodeCtl {
+ mgc.mu.RLock()
+ defer mgc.mu.RUnlock()
+
+ return mgc.mgcctl
+}
+
+func (mgc *marGocodeCtl) configure(f func(*marGocodeCtl)) {
+ mgc.mu.Lock()
+ defer mgc.mu.Unlock()
+
+ f(mgc)
+}
+
+func newMarGocodeCtl() *marGocodeCtl {
+ mgc := &marGocodeCtl{}
+ mgc.pkgs = &mgcCache{m: map[mgcCacheKey]mgcCacheEnt{}}
+ mgc.cmdMap = map[string]func(*mg.CmdCtx){
+ "help": mgc.helpCmd,
+ "cache-list": mgc.cacheListCmd,
+ "cache-prune": mgc.cachePruneCmd,
+ "unimported-packages": mgc.pkglistPackagesCmd,
+ "pkg-list": mgc.pkglistPackagesCmd,
+ }
+ mgc.mxQ = mgutil.NewChanQ(10)
+ go func() {
+ for v := range mgc.mxQ.C() {
+ mgc.processQ(v.(*mg.Ctx))
+ }
+ }()
+ return mgc
+}
+
+func (mgc *marGocodeCtl) RCond(mx *mg.Ctx) bool {
+ if mx.LangIs(mg.Go) {
+ return true
+ }
+ if act, ok := mx.Action.(mg.RunCmd); ok {
+ for _, c := range mgc.cmds() {
+ if c.Name == act.Name {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (mgc *marGocodeCtl) RMount(mx *mg.Ctx) {
+ mgc.initPlst(mx)
+}
+
+func (mgc *marGocodeCtl) Reduce(mx *mg.Ctx) *mg.State {
+ switch mx.Action.(type) {
+ case mg.RunCmd:
+ return mx.AddBuiltinCmds(mgc.cmds()...)
+ case mg.ViewModified, mg.ViewSaved, mg.ViewActivated:
+ // ViewSaved is probably not required, but saving might result in a `go install`
+ // which results in an updated package.a file
+ mgc.mxQ.Put(mx)
+ }
+
+ return mx.State
+}
+
+func (mgc *marGocodeCtl) scanVFS(mx *mg.Ctx, rootName, rootDir string) {
+ // TODO: (eventually) move this function into plst.Scan
+ // for now, the extra scan at the end is fast enough to not be worth the complexity
+ dir := filepath.Join(rootDir, "src")
+ tsk := mg.Task{Title: "VFS.Scan " + rootName + " ( " + mgutil.ShortFn(rootDir, mx.Env) + " )"}
+ defer mx.Begin(tsk).Done()
+
+ mu := sync.Mutex{}
+ pkgs := 0
+ preload := func(nd *vfs.Node) {
+ _, err := gopkg.ImportDirNd(mx, nd)
+ if err != nil {
+ return
+ }
+ mu.Lock()
+ pkgs++
+ mu.Unlock()
+ }
+ start := time.Now()
+ wg := &sync.WaitGroup{}
+ procs := runtime.NumCPU()
+ dirs := make(chan *vfs.Node, procs*100)
+ proc := func(wg *sync.WaitGroup) {
+ defer wg.Done()
+
+ for de := range dirs {
+ preload(de)
+ }
+ }
+ for i := 0; i < procs; i++ {
+ wg.Add(1)
+ go proc(wg)
+ }
+ mx.VFS.Scan(dir, vfs.ScanOptions{
+ Filter: gopkg.ScanFilter,
+ Dirs: func(nd *vfs.Node) { dirs <- nd },
+ })
+ close(dirs)
+ wg.Wait()
+ mgc.plst.Scan(mx, dir)
+ dur := mgpf.Since(start)
+ mx.Log.Printf("%s: %d packages preloaded in %s\n", tsk.Title, pkgs, dur)
+}
+
+func (mgc *marGocodeCtl) initPlst(mx *mg.Ctx) {
+ bctx := BuildContext(mx)
+ mx = mx.SetState(mx.SetEnv(
+ mx.Env.Merge(mg.EnvMap{
+ "GOROOT": bctx.GOROOT,
+ "GOPATH": bctx.GOPATH,
+ }),
+ ))
+
+ go mgc.scanVFS(mx, "GOROOT", bctx.GOROOT)
+ for _, root := range PathList(bctx.GOPATH) {
+ go mgc.scanVFS(mx, "GOPATH", root)
+ }
+}
+
+// srcMode returns true if the importMode is not SrcImporterOnly or SrcImporterWithFallback
+func (mgc *marGocodeCtl) srcMode() bool {
+ switch mgc.cfg().ImporterMode {
+ case SrcImporterOnly, SrcImporterWithFallback:
+ return true
+ case BinImporterOnly:
+ return false
+ default:
+ panic("unreachable")
+ }
+}
+
+func (mgc *marGocodeCtl) pkgInfo(mx *mg.Ctx, impPath, srcDir string) (gsuPkgInfo, error) {
+ start := time.Now()
+ defer func() {
+ dur := time.Since(start)
+ if dur > 10*time.Millisecond {
+ mgc.dbgf("pkgInfo: %s: %s\n", impPath, dur)
+ }
+ }()
+
+ p, err := gopkg.FindPkg(mx, impPath, srcDir)
+ if err != nil {
+ return gsuPkgInfo{}, err
+ }
+ return gsuPkgInfo{
+ Path: p.ImportPath,
+ Dir: p.Dir,
+ Std: p.Goroot,
+ }, nil
+}
+
+func (mgc *marGocodeCtl) dbgf(format string, a ...interface{}) {
+ mgc.mu.Lock()
+ logs := mgc.logs
+ mgc.mu.Unlock()
+
+ if logs == nil {
+ return
+ }
+
+ if !strings.HasSuffix(format, "\n") {
+ format += "\n"
+ }
+ logs.Printf("margocode: "+format, a...)
+}
+
+func (mgc *marGocodeCtl) cmds() mg.BuiltinCmdList {
+ return mg.BuiltinCmdList{
+ mg.BuiltinCmd{
+ Name: "margocodectl",
+ Desc: "introspect and manage the margocode cache and state",
+ Run: mgc.cmd,
+ },
+ }
+}
+
+func (mgc *marGocodeCtl) cacheListCmd(cx *mg.CmdCtx) {
+ defer cx.Output.Close()
+
+ ents := mgc.pkgs.entries()
+ lessFuncs := map[string]func(i, j int) bool{
+ "path": func(i, j int) bool {
+ return ents[i].Key.Path < ents[j].Key.Path
+ },
+ "dur": func(i, j int) bool {
+ return ents[i].Dur < ents[j].Dur
+ },
+ }
+ orderNames := func() string {
+ l := make([]string, 0, len(lessFuncs))
+ for k, _ := range lessFuncs {
+ l = append(l, k)
+ }
+ sort.Strings(l)
+ return strings.Join(l, "|")
+ }()
+
+ by := "path"
+ desc := false
+ flags := flag.NewFlagSet(cx.Name, flag.ContinueOnError)
+ flags.SetOutput(cx.Output)
+ flags.BoolVar(&desc, "desc", desc, "Order results in descending order")
+ flags.StringVar(&by, "by", by, "Field to order by: "+orderNames)
+ err := flags.Parse(cx.Args)
+ if err != nil {
+ return
+ }
+ less, ok := lessFuncs[by]
+ if !ok {
+ fmt.Fprintf(cx.Output, "Unknown order=%s. Expected one of: %s\n", by, orderNames)
+ flags.Usage()
+ return
+ }
+ if desc {
+ lf := less
+ less = func(i, j int) bool { return lf(j, i) }
+ }
+
+ if len(ents) == 0 {
+ fmt.Fprintln(cx.Output, "The cache is empty")
+ return
+ }
+
+ buf := &bytes.Buffer{}
+ tbw := tabwriter.NewWriter(cx.Output, 1, 4, 1, ' ', 0)
+ defer tbw.Flush()
+
+ digits := int(math.Floor(math.Log10(float64(len(ents)))) + 1)
+ sfxFormat := "\t%s\t%s\t%s\n"
+ hdrFormat := "%s" + sfxFormat
+ rowFormat := fmt.Sprintf("%%%dd/%d", digits, len(ents)) + sfxFormat
+
+ sort.Slice(ents, less)
+ fmt.Fprintf(buf, hdrFormat, "Count:", "Path:", "Duration:", "Mode:")
+ for i, e := range ents {
+ mode := "bin"
+ if e.Key.Source {
+ mode = "src"
+ }
+ fmt.Fprintf(buf, rowFormat, i+1, e.Key.Path, mgpf.D(e.Dur), mode)
+ }
+ tbw.Write(buf.Bytes())
+}
+
+func (mgc *marGocodeCtl) pkglistPackagesCmd(cx *mg.CmdCtx) {
+ defer cx.Output.Close()
+
+ type ent struct {
+ nm string
+ pth string
+ }
+
+ pkl := mgc.plst.View().List
+ buf := &bytes.Buffer{}
+ tbw := tabwriter.NewWriter(cx.Output, 1, 4, 1, ' ', 0)
+ defer tbw.Flush()
+
+ digits := int(math.Floor(math.Log10(float64(len(pkl)))) + 1)
+ sfxFormat := "\t%s\t%s\t%s\n"
+ hdrFormat := "%s" + sfxFormat
+ rowFormat := fmt.Sprintf("%%%dd/%d", digits, len(pkl)) + sfxFormat
+
+ fmt.Fprintf(buf, hdrFormat, "Count:", "Name:", "ImportPath:", "Dir:")
+ for i, p := range pkl {
+ fmt.Fprintf(buf, rowFormat, i+1, p.Name, p.ImportPath, strings.TrimSuffix(p.Dir, p.ImportPath))
+ }
+ tbw.Write(buf.Bytes())
+}
+
+func (mgc *marGocodeCtl) cachePruneCmd(cx *mg.CmdCtx) {
+ defer cx.Output.Close()
+
+ args := cx.Args
+ if len(args) == 0 {
+ args = []string{".*"}
+ }
+
+ pats := make([]*regexp.Regexp, 0, len(args))
+ for _, s := range args {
+ p, err := regexp.Compile(s)
+ if err == nil {
+ pats = append(pats, p)
+ } else {
+ fmt.Fprintf(cx.Output, "Error: regexp.Compile(%s): %s\n", s, err)
+ }
+ }
+
+ ents := mgc.pkgs.prune(pats...)
+ for _, e := range ents {
+ fmt.Fprintln(cx.Output, "Pruned:", e.Key)
+ }
+ fmt.Fprintln(cx.Output, "Pruned", len(ents), "entries")
+ debug.FreeOSMemory()
+}
+
+func (mgc *marGocodeCtl) helpCmd(cx *mg.CmdCtx) {
+ defer cx.Output.Close()
+
+ cx.Output.Write([]byte(`Usage: ` + cx.Name + ` $subcmd [args...]
+ cache-prune [regexp, or path...] - remove packages matching glob from the cache. default: '.*'
+ cache-list - list cached packages, see '` + cx.Name + ` cache-list --help' for more details
+ pkg-list - list packages known to exist (in GOROOT, GOPATH, etc.)
+`))
+}
+
+func (mgc *marGocodeCtl) newGcSuggest(mx *mg.Ctx) *gcSuggest {
+ mgc.mu.RLock()
+ defer mgc.mu.RUnlock()
+
+ gsu := &gcSuggest{cfg: mgc.cfg()}
+ gsu.imp = gsu.newGsuImporter(mx)
+ return gsu
+}
+
+func (mgc *marGocodeCtl) cmd(cx *mg.CmdCtx) *mg.State {
+ cmd := mgc.helpCmd
+ if len(cx.Args) > 0 {
+ sub := cx.Args[0]
+ if c, ok := mgc.cmdMap[sub]; ok {
+ cmd = c
+ cx = cx.Copy(func(cx *mg.CmdCtx) {
+ cx.Args = cx.Args[1:]
+ })
+ } else {
+ fmt.Fprintln(cx.Output, "Unknown subcommand:", sub)
+ }
+ }
+ go cmd(cx)
+ return cx.State
+}
+
+type MarGocodeCtl struct {
+ mg.ReducerType
+
+ // Whether or not to print debugging info related to the gocode cache
+ // used by the Gocode and GocodeCalltips reducers
+ Debug bool
+
+ // DebugPrune returns true if pkg should be removed from the cache
+ DebugPrune func(pkg *types.Package) bool
+
+ // The mode in which the types.Importer shouler operate
+ // By default it is SrcImporterWithFallback
+ ImporterMode ImporterMode
+
+ // Don't try to automatically import packages when auto-compeltion fails
+ // e.g. when `json.` is typed, if auto-complete fails
+ // "encoding/json" is imported and auto-complete attempted on that package instead
+ // See AddUnimportedPackages
+ NoUnimportedPackages bool
+
+ // If a package was imported internally for use in auto-completion,
+ // insert it in the source code
+ // See NoUnimportedPackages
+ // e.g. after `json.` is typed, `import "encoding/json"` added to the code
+ AddUnimportedPackages bool
+
+ // Don't preload packages to speed up auto-completion, etc.
+ NoPreloading bool
+
+ // Don't propose builtin types and functions
+ NoBuiltins bool
+
+ // Whether or not to propose builtin types and functions
+ ProposeTests bool
+}
+
+func (mgc *MarGocodeCtl) RInit(mx *mg.Ctx) {
+ mctl.configure(func(m *marGocodeCtl) {
+ m.mgcctl = *mgc
+ if mgc.Debug {
+ m.logs = mx.Log.Dbg
+ }
+ })
+}
+
+func (mgc *MarGocodeCtl) Reduce(mx *mg.Ctx) *mg.State {
+ return mx.State
+}
diff --git a/src/margo.sh/golang/mgccache.go b/src/margo.sh/golang/mgccache.go
new file mode 100644
index 00000000..426cffa0
--- /dev/null
+++ b/src/margo.sh/golang/mgccache.go
@@ -0,0 +1,122 @@
+package golang
+
+import (
+ "go/types"
+ "margo.sh/mgpf"
+ "regexp"
+ "sync"
+ "time"
+)
+
+// mgcCacheKey is the key used for caching package imports
+type mgcCacheKey struct {
+ gsuPkgInfo
+
+ // Source indicates whether the package was imported from source code
+ Source bool
+}
+
+func (mck mgcCacheKey) fallback() mgcCacheKey {
+ fbk := mck
+ fbk.Source = !fbk.Source
+ return fbk
+}
+
+type mgcCacheEnt struct {
+ Key mgcCacheKey
+ Pkg *types.Package
+ Dur time.Duration
+}
+
+type mgcCache struct {
+ sync.RWMutex
+ m map[mgcCacheKey]mgcCacheEnt
+}
+
+func (mc *mgcCache) get(k mgcCacheKey) (mgcCacheEnt, bool) {
+ mc.RLock()
+ defer mc.RUnlock()
+
+ e, ok := mc.m[k]
+ if !ok {
+ mctl.dbgf("cache.miss: %+v\n", k)
+ }
+ return e, ok
+}
+
+func (mc *mgcCache) put(e mgcCacheEnt) {
+ if !e.Pkg.Complete() {
+ mctl.dbgf("cache.put: not storing %+v, it's incomplete\n", e.Key)
+ return
+ }
+
+ mc.Lock()
+ defer mc.Unlock()
+
+ mc.m[e.Key] = e
+ mctl.dbgf("cache.put: %+v %s\n", e.Key, mgpf.D(e.Dur))
+}
+
+func (mc *mgcCache) del(k mgcCacheKey) {
+ mc.Lock()
+ defer mc.Unlock()
+
+ if _, exists := mc.m[k]; !exists {
+ return
+ }
+
+ delete(mc.m, k)
+ mctl.dbgf("cache.del: %+v\n", k)
+}
+
+func (mc *mgcCache) prune(pats ...*regexp.Regexp) []mgcCacheEnt {
+ ents := []mgcCacheEnt{}
+ defer func() {
+ for _, e := range ents {
+ mctl.dbgf("cache.prune: %+v\n", e.Key)
+ }
+ }()
+
+ mc.Lock()
+ defer mc.Unlock()
+
+ for _, e := range mc.m {
+ for _, pat := range pats {
+ if pat.MatchString(e.Key.Path) {
+ ents = append(ents, e)
+ delete(mc.m, e.Key)
+ }
+ }
+ }
+
+ return ents
+}
+
+func (mc *mgcCache) size() int {
+ mc.RLock()
+ defer mc.RUnlock()
+
+ return len(mc.m)
+}
+
+func (mc *mgcCache) entries() []mgcCacheEnt {
+ mc.RLock()
+ defer mc.RUnlock()
+
+ l := make([]mgcCacheEnt, 0, len(mc.m))
+ for _, e := range mc.m {
+ l = append(l, e)
+ }
+ return l
+}
+
+func (mc *mgcCache) forEach(f func(mgcCacheEnt) bool) {
+ mc.RLock()
+ defer mc.RUnlock()
+
+ for _, e := range mc.m {
+ if !f(e) {
+ break
+ }
+ }
+}
diff --git a/src/margo.sh/golang/parse.go b/src/margo.sh/golang/parse.go
new file mode 100644
index 00000000..415339da
--- /dev/null
+++ b/src/margo.sh/golang/parse.go
@@ -0,0 +1,31 @@
+package golang
+
+import (
+ "go/parser"
+ "margo.sh/golang/goutil"
+ "margo.sh/mg"
+)
+
+const (
+ ParseFileMode = goutil.ParseFileMode
+)
+
+var (
+ NilPkgName = goutil.NilPkgName
+ NilFset = goutil.NilFset
+ NilPkgSrc = goutil.NilPkgSrc
+ NilAstFile = goutil.NilAstFile
+ NilTokenFile = goutil.NilTokenFile
+)
+
+type ParsedFile = goutil.ParsedFile
+
+// ParseFile is an alias of goutil.ParseFile
+func ParseFile(mx *mg.Ctx, fn string, src []byte) *ParsedFile {
+ return goutil.ParseFile(mx, fn, src)
+}
+
+// ParseFileWithMode is an alias of goutil.ParseFileWithMode
+func ParseFileWithMode(mx *mg.Ctx, fn string, src []byte, mode parser.Mode) *ParsedFile {
+ return goutil.ParseFileWithMode(mx, fn, src, mode)
+}
diff --git a/src/disposa.blue/margo/golang/parse_test.go b/src/margo.sh/golang/parse_test.go
similarity index 100%
rename from src/disposa.blue/margo/golang/parse_test.go
rename to src/margo.sh/golang/parse_test.go
diff --git a/src/margo.sh/golang/snippets.go b/src/margo.sh/golang/snippets.go
new file mode 100644
index 00000000..0708ed1e
--- /dev/null
+++ b/src/margo.sh/golang/snippets.go
@@ -0,0 +1,133 @@
+package golang
+
+import (
+ "go/ast"
+ "margo.sh/golang/goutil"
+ "margo.sh/golang/snippets"
+ "margo.sh/mg"
+ "sort"
+ "strings"
+)
+
+var (
+ Snippets = SnippetFuncs(append([]snippets.SnippetFunc{ImportPathSnippet}, snippets.DefaultSnippets...)...)
+)
+
+// SnippetFunc is an alias of snippets.SnippetFunc
+type SnippetFunc = snippets.SnippetFunc
+
+type SnippetFuncsList struct {
+ mg.ReducerType
+ Funcs []SnippetFunc
+}
+
+func SnippetFuncs(l ...SnippetFunc) *SnippetFuncsList {
+ return &SnippetFuncsList{Funcs: l}
+}
+
+func (sf *SnippetFuncsList) RCond(mx *mg.Ctx) bool {
+ return mx.ActionIs(mg.QueryCompletions{}) && mx.LangIs(mg.Go)
+}
+
+func (sf *SnippetFuncsList) Reduce(mx *mg.Ctx) *mg.State {
+ cx := NewViewCursorCtx(mx)
+ var cl []mg.Completion
+ for _, f := range sf.Funcs {
+ cl = append(cl, f(cx)...)
+ }
+ for i, _ := range cl {
+ sf.fixCompletion(&cl[i])
+ }
+ return mx.State.AddCompletions(cl...)
+}
+
+func (sf *SnippetFuncsList) fixCompletion(c *mg.Completion) {
+ c.Src = goutil.DedentCompletion(c.Src)
+ if c.Tag == "" {
+ c.Tag = mg.SnippetTag
+ }
+}
+
+// PackageNameSnippet is an alias of snippets.PackageNameSnippet
+func PackageNameSnippet(cx *CompletionCtx) []mg.Completion { return snippets.PackageNameSnippet(cx) }
+
+// MainFuncSnippet is an alias of snippets.MainFuncSnippet
+func MainFuncSnippet(cx *CompletionCtx) []mg.Completion { return snippets.MainFuncSnippet(cx) }
+
+// InitFuncSnippet is an alias of snippets.InitFuncSnippet
+func InitFuncSnippet(cx *CompletionCtx) []mg.Completion { return snippets.InitFuncSnippet(cx) }
+
+// FuncSnippet is an alias of snippets.FuncSnippet
+func FuncSnippet(cx *CompletionCtx) []mg.Completion { return snippets.FuncSnippet(cx) }
+
+// MethodSnippet is an alias of snippets.MethodSnippet
+func MethodSnippet(cx *CompletionCtx) []mg.Completion { return snippets.MethodSnippet(cx) }
+
+// GenDeclSnippet is an alias of snippets.GenDeclSnippet
+func GenDeclSnippet(cx *CompletionCtx) []mg.Completion { return snippets.GenDeclSnippet(cx) }
+
+// MapSnippet is an alias of snippets.MapSnippet
+func MapSnippet(cx *CompletionCtx) []mg.Completion { return snippets.MapSnippet(cx) }
+
+// TypeSnippet is an alias of snippets.TypeSnippet
+func TypeSnippet(cx *CompletionCtx) []mg.Completion { return snippets.TypeSnippet(cx) }
+
+// AppendSnippet is an alias of snippets.AppendSnippet
+func AppendSnippet(cx *CompletionCtx) []mg.Completion { return snippets.AppendSnippet(cx) }
+
+// DocSnippet is an alias of snippets.DocSnippet
+func DocSnippet(cx *CompletionCtx) []mg.Completion { return snippets.DocSnippet(cx) }
+
+func ImportPathSnippet(cx *CompletionCtx) []mg.Completion {
+ lit, ok := cx.Node.(*ast.BasicLit)
+ if !ok || !cx.Scope.Is(ImportPathScope) {
+ return nil
+ }
+
+ pfx := unquote(lit.Value)
+ if i := strings.LastIndexByte(pfx, '/'); i >= 0 {
+ pfx = pfx[:i+1]
+ } else {
+ // if there's no slash, don't do any filtering
+ // this allows the fuzzy selection to work in editor
+ pfx = ""
+ }
+
+ pkl := mctl.plst.View().List
+ skip := map[string]bool{}
+ srcDir := cx.View.Dir()
+ for _, spec := range cx.AstFile.Imports {
+ skip[unquote(spec.Path.Value)] = true
+ }
+
+ cl := make([]mg.Completion, 0, len(pkl))
+ for _, p := range pkl {
+ if skip[p.ImportPath] || !p.Importable(srcDir) {
+ continue
+ }
+
+ src := p.ImportPath
+ if pfx != "" {
+ src = strings.TrimPrefix(p.ImportPath, pfx)
+ if src == p.ImportPath || src == "" {
+ continue
+ }
+
+ // BUG: in ST
+ // given candidate `margo.sh/xxx`, and prefix `margo.sh`
+ // if we return xxx, it will replace the whole path
+ if !strings.ContainsRune(src, '/') {
+ src = p.ImportPath
+ }
+ }
+ cl = append(cl, mg.Completion{
+ Query: p.ImportPath,
+ Src: src,
+ })
+ }
+ sort.Slice(cl, func(i, j int) bool { return cl[i].Query < cl[j].Query })
+ return cl
+}
+
+// DeferSnippet is an alias of snippets.DeferSnippet
+func DeferSnippet(cx *CompletionCtx) []mg.Completion { return snippets.DeferSnippet(cx) }
diff --git a/src/margo.sh/golang/snippets/append-snippet.go b/src/margo.sh/golang/snippets/append-snippet.go
new file mode 100644
index 00000000..acae9444
--- /dev/null
+++ b/src/margo.sh/golang/snippets/append-snippet.go
@@ -0,0 +1,54 @@
+package snippets
+
+import (
+ "bytes"
+ "go/ast"
+ "go/printer"
+ "go/token"
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+func AppendSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if !cx.Scope.Is(cursor.ExprScope) {
+ return nil
+ }
+
+ cl := func(sel string) []mg.Completion {
+ if sel == "" {
+ sel = "s"
+ }
+ return []mg.Completion{
+ mg.Completion{
+ Query: `append`,
+ Title: `append(` + sel + `, ...)`,
+ Src: `append(${1:` + sel + `}, ${2})$0`,
+ },
+ mg.Completion{
+ Query: `append:len`,
+ Title: `append(` + sel + `[:len:len], ...)`,
+ Src: `append(${1:` + sel + `}[:len(${1:` + sel + `}):len(${1:` + sel + `})], ${2})$0`,
+ },
+ }
+ }
+
+ if !cx.Scope.Is(cursor.AssignmentScope) {
+ return cl("")
+ }
+
+ var asn *ast.AssignStmt
+ if !cx.Set(&asn) || len(asn.Lhs) != 1 || len(asn.Rhs) > 1 {
+ return cl("")
+ }
+
+ sel := ""
+ switch x := asn.Lhs[0].(type) {
+ case *ast.Ident:
+ sel = x.Name
+ case *ast.SelectorExpr:
+ buf := &bytes.Buffer{}
+ printer.Fprint(buf, token.NewFileSet(), x)
+ sel = buf.String()
+ }
+ return cl(sel)
+}
diff --git a/src/margo.sh/golang/snippets/defer-snippet.go b/src/margo.sh/golang/snippets/defer-snippet.go
new file mode 100644
index 00000000..c6231578
--- /dev/null
+++ b/src/margo.sh/golang/snippets/defer-snippet.go
@@ -0,0 +1,32 @@
+package snippets
+
+import (
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+func DeferSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if !cx.Scope.Is(cursor.BlockScope) {
+ return nil
+ }
+ return []mg.Completion{
+ mg.Completion{
+ Query: `defer func`,
+ Title: `defer func{}`,
+ Src: `
+ defer func() {
+ ${1}
+ }()
+ $0
+ `,
+ },
+ mg.Completion{
+ Query: `defer`,
+ Title: `defer f()`,
+ Src: `
+ defer ${1:f}()
+ $0
+ `,
+ },
+ }
+}
diff --git a/src/margo.sh/golang/snippets/doc-snippet.go b/src/margo.sh/golang/snippets/doc-snippet.go
new file mode 100644
index 00000000..7ac60439
--- /dev/null
+++ b/src/margo.sh/golang/snippets/doc-snippet.go
@@ -0,0 +1,92 @@
+package snippets
+
+import (
+ "go/ast"
+ "margo.sh/golang/cursor"
+ "margo.sh/golang/goutil"
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ yotsuba "margo.sh/why_would_you_make_yotsuba_cry"
+)
+
+func DocSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if cx.Doc == nil {
+ return nil
+ }
+
+ var ids []*ast.Ident
+ var addNames func(n ast.Node)
+ addFieldNames := func(fl *ast.FieldList) {
+ if fl == nil {
+ return
+ }
+ for _, f := range fl.List {
+ ids = append(ids, f.Names...)
+ addNames(f.Type)
+ }
+ }
+ addNames = func(n ast.Node) {
+ if yotsuba.IsNil(n) {
+ return
+ }
+
+ switch x := n.(type) {
+ case *ast.GenDecl:
+ for _, spec := range x.Specs {
+ addNames(spec)
+ }
+ case *ast.SelectorExpr:
+ addNames(x.Sel)
+ addNames(x.X)
+ case *ast.Ident:
+ ids = append(ids, x)
+ case *ast.File:
+ ids = append(ids, x.Name)
+ case *ast.FieldList:
+ addFieldNames(x)
+ case *ast.Field:
+ addNames(x.Type)
+ ids = append(ids, x.Names...)
+ case *ast.TypeSpec:
+ ids = append(ids, x.Name)
+ case *ast.FuncDecl:
+ ids = append(ids, x.Name)
+ addFieldNames(x.Recv)
+ if t := x.Type; t != nil {
+ addFieldNames(t.Params)
+ addFieldNames(t.Results)
+ }
+ case *ast.ValueSpec:
+ addNames(x.Type)
+ ids = append(ids, x.Names...)
+ }
+ }
+ addNames(cx.Doc.Node)
+
+ pfx := ""
+ // we use View.Pos because cx.Pos might have been changed
+ if i := cx.View.Pos; 0 <= i && i < len(cx.Src) {
+ i = mgutil.RepositionLeft(cx.Src, i, goutil.IsLetter) - 1
+ if r := cx.Src[i]; r != ' ' && r != '.' {
+ pfx = " "
+ }
+ }
+ sfx := " "
+ if i := cx.View.Pos; 0 <= i && i < len(cx.Src) && cx.Src[i] == ' ' {
+ sfx = ""
+ }
+
+ seen := map[string]bool{}
+ cl := make([]mg.Completion, 0, len(ids))
+ for _, id := range ids {
+ if id == nil || id.Name == "_" || seen[id.Name] {
+ continue
+ }
+ seen[id.Name] = true
+ cl = append(cl, mg.Completion{
+ Query: id.Name,
+ Src: pfx + id.Name + sfx + `$0`,
+ })
+ }
+ return cl
+}
diff --git a/src/margo.sh/golang/snippets/func-snippet.go b/src/margo.sh/golang/snippets/func-snippet.go
new file mode 100644
index 00000000..f44b9be4
--- /dev/null
+++ b/src/margo.sh/golang/snippets/func-snippet.go
@@ -0,0 +1,68 @@
+package snippets
+
+import (
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+func FuncSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if cx.Scope == cursor.FileScope || cx.Scope.Is(cursor.FuncDeclScope) {
+ comp := mg.Completion{
+ Query: `func`,
+ Title: `name() {...}`,
+ Src: `
+ func ${1:name}($2)$3 {
+ $0
+ }
+ `,
+ }
+ if !cx.IsTestFile {
+ return []mg.Completion{comp}
+ }
+ return []mg.Completion{
+ {
+ Query: `func Test`,
+ Title: `Test() {...}`,
+ Src: `
+ func Test${1:name}(t *testing.T) {
+ $0
+ }
+ `,
+ },
+ {
+ Query: `func Benchmark`,
+ Title: `Benchmark() {...}`,
+ Src: `
+ func Benchmark${1:name}(b *testing.B) {
+ $0
+ }
+ `,
+ },
+ {
+ Query: `func Example`,
+ Title: `Example() {...}`,
+ Src: `
+ func Example${1:name}() {
+ $0
+
+ // Output:
+ }
+ `,
+ },
+ }
+ }
+
+ if cx.Scope.Is(cursor.BlockScope, cursor.VarScope) {
+ return []mg.Completion{{
+ Query: `func`,
+ Title: `func() {...}`,
+ Src: `
+ func($1)$2 {
+ $3
+ }$0
+ `,
+ }}
+ }
+
+ return nil
+}
diff --git a/src/margo.sh/golang/snippets/gen-decl-snippet.go b/src/margo.sh/golang/snippets/gen-decl-snippet.go
new file mode 100644
index 00000000..1e702257
--- /dev/null
+++ b/src/margo.sh/golang/snippets/gen-decl-snippet.go
@@ -0,0 +1,61 @@
+package snippets
+
+import (
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+func GenDeclSnippet(cx *cursor.CurCtx) []mg.Completion {
+ switch cx.Scope {
+ case cursor.BlockScope:
+ return []mg.Completion{
+ {
+ Query: `var`,
+ Title: `X`,
+ Src: `var ${1:name}`,
+ },
+ {
+ Query: `var`,
+ Title: `X = Y`,
+ Src: `var ${1:name} = ${2:value}`,
+ },
+ {
+ Query: `const`,
+ Title: `X = Y`,
+ Src: `const ${1:name} = ${2:value}`,
+ },
+ }
+ case cursor.FileScope:
+ return []mg.Completion{
+ {
+ Query: `import`,
+ Title: `(...)`,
+ Src: `
+ import (
+ "$0"
+ )
+ `,
+ },
+ {
+ Query: `var`,
+ Title: `(...)`,
+ Src: `
+ var (
+ ${1:name} = ${2:value}
+ )
+ `,
+ },
+ {
+ Query: `const`,
+ Title: `(...)`,
+ Src: `
+ const (
+ ${1:name} = ${2:value}
+ )
+ `,
+ },
+ }
+ default:
+ return nil
+ }
+}
diff --git a/src/margo.sh/golang/snippets/http-snippet.go b/src/margo.sh/golang/snippets/http-snippet.go
new file mode 100644
index 00000000..e06c37ff
--- /dev/null
+++ b/src/margo.sh/golang/snippets/http-snippet.go
@@ -0,0 +1,60 @@
+package snippets
+
+import (
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+func HTTPSnippet(cx *cursor.CurCtx) []mg.Completion {
+ switch {
+ case !cx.ImportsMatch(func(p string) bool { return p == "net/http" }):
+ return nil
+ case cx.Scope.Is(cursor.BlockScope):
+ return []mg.Completion{
+ mg.Completion{
+ Query: `http.HandleFunc`,
+ Title: `http.HandleFunc("...", func(w, r))`,
+ Src: `
+ http.HandleFunc("/${1}", func(w http.ResponseWriter, r *http.Request) {
+ $0
+ })
+ `,
+ },
+ }
+ case cx.Scope.Is(cursor.ExprScope):
+ return []mg.Completion{
+ mg.Completion{
+ Query: `http.HandlerFunc`,
+ Title: `http.HandlerFunc(func(w, r))`,
+ Src: `
+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ $0
+ })
+ `,
+ },
+ mg.Completion{
+ Query: `func http handler`,
+ Title: `func(w, r)`,
+ Src: `
+ func(w http.ResponseWriter, r *http.Request) {
+ $0
+ }
+ `,
+ },
+ }
+ case cx.Scope.Is(cursor.FileScope):
+ return []mg.Completion{
+ mg.Completion{
+ Query: `func http handler`,
+ Title: `func(w, r)`,
+ Src: `
+ func ${1:name}(w http.ResponseWriter, r *http.Request) {
+ $0
+ }
+ `,
+ },
+ }
+ default:
+ return nil
+ }
+}
diff --git a/src/margo.sh/golang/snippets/init-func-snippet.go b/src/margo.sh/golang/snippets/init-func-snippet.go
new file mode 100644
index 00000000..4b68791d
--- /dev/null
+++ b/src/margo.sh/golang/snippets/init-func-snippet.go
@@ -0,0 +1,30 @@
+package snippets
+
+import (
+ "go/ast"
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+func InitFuncSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if cx.Scope != cursor.FileScope {
+ return nil
+ }
+
+ for _, x := range cx.AstFile.Decls {
+ x, ok := x.(*ast.FuncDecl)
+ if ok && x.Name != nil && x.Name.String() == "init" {
+ return nil
+ }
+ }
+
+ return []mg.Completion{{
+ Query: `func init`,
+ Title: `init() {...}`,
+ Src: `
+ func init() {
+ $0
+ }
+ `,
+ }}
+}
diff --git a/src/margo.sh/golang/snippets/main-func-snippet.go b/src/margo.sh/golang/snippets/main-func-snippet.go
new file mode 100644
index 00000000..37c85b68
--- /dev/null
+++ b/src/margo.sh/golang/snippets/main-func-snippet.go
@@ -0,0 +1,30 @@
+package snippets
+
+import (
+ "go/ast"
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+func MainFuncSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if cx.Scope != cursor.FileScope || cx.PkgName != "main" {
+ return nil
+ }
+
+ for _, x := range cx.AstFile.Decls {
+ x, ok := x.(*ast.FuncDecl)
+ if ok && x.Name != nil && x.Name.String() == "main" {
+ return nil
+ }
+ }
+
+ return []mg.Completion{{
+ Query: `func main`,
+ Title: `main() {...}`,
+ Src: `
+ func main() {
+ $0
+ }
+ `,
+ }}
+}
diff --git a/src/margo.sh/golang/snippets/map-snippet.go b/src/margo.sh/golang/snippets/map-snippet.go
new file mode 100644
index 00000000..e3a070e5
--- /dev/null
+++ b/src/margo.sh/golang/snippets/map-snippet.go
@@ -0,0 +1,24 @@
+package snippets
+
+import (
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+func MapSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if !cx.Scope.Is(cursor.ExprScope) {
+ return nil
+ }
+ return []mg.Completion{
+ {
+ Query: `map`,
+ Title: `map[T]T`,
+ Src: `map[${1:T}]${2:T}`,
+ },
+ {
+ Query: `map`,
+ Title: `map[T]T{...}`,
+ Src: `map[${1:T}]${2:T}{$0}`,
+ },
+ }
+}
diff --git a/src/margo.sh/golang/snippets/method-snippet.go b/src/margo.sh/golang/snippets/method-snippet.go
new file mode 100644
index 00000000..809c1e0a
--- /dev/null
+++ b/src/margo.sh/golang/snippets/method-snippet.go
@@ -0,0 +1,96 @@
+package snippets
+
+import (
+ "go/ast"
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+ "unicode"
+)
+
+func receiverName(typeName string) string {
+ name := make([]rune, 0, 4)
+ for _, r := range typeName {
+ if len(name) == 0 || unicode.IsUpper(r) {
+ name = append(name, unicode.ToLower(r))
+ }
+ }
+ return string(name)
+}
+
+func MethodSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if cx.Scope != cursor.FileScope && !cx.Scope.Is(cursor.FuncDeclScope) {
+ return nil
+ }
+
+ type field struct {
+ nm string
+ typ string
+ }
+ fields := map[string]field{}
+ types := []string{}
+
+ for _, x := range cx.AstFile.Decls {
+ switch x := x.(type) {
+ case *ast.FuncDecl:
+ if x.Recv == nil || len(x.Recv.List) == 0 {
+ continue
+ }
+
+ r := x.Recv.List[0]
+ if len(r.Names) == 0 {
+ continue
+ }
+
+ name := ""
+ if id := r.Names[0]; id != nil {
+ name = id.String()
+ }
+
+ switch x := r.Type.(type) {
+ case *ast.Ident:
+ typ := x.String()
+ fields[typ] = field{nm: name, typ: typ}
+ case *ast.StarExpr:
+ if id, ok := x.X.(*ast.Ident); ok {
+ typ := id.String()
+ fields[typ] = field{nm: name, typ: "*" + typ}
+ }
+ }
+ case *ast.GenDecl:
+ for _, spec := range x.Specs {
+ spec, ok := spec.(*ast.TypeSpec)
+ if ok && spec.Name != nil && spec.Name.Name != "_" {
+ types = append(types, spec.Name.Name)
+ }
+ }
+ }
+ }
+
+ cl := make([]mg.Completion, 0, len(types))
+ for _, typ := range types {
+ if f, ok := fields[typ]; ok {
+ cl = append(cl, mg.Completion{
+ Query: `func method ` + f.typ,
+ Title: `(` + f.typ + `) method() {...}`,
+ Src: `
+ func (` + f.nm + ` ` + f.typ + `) ${1:name}($2)$3 {
+ $0
+ }
+ `,
+ })
+ } else {
+ nm := receiverName(typ)
+ cl = append(cl, mg.Completion{
+ Query: `func method ` + typ,
+ Title: `(` + typ + `) method() {...}`,
+ Src: `
+ func (${1:` + nm + `} ${2:*}` + typ + `) ${3:name}($4)$5 {
+ $0
+ }
+ `,
+ })
+ }
+ }
+
+ return cl
+}
diff --git a/src/margo.sh/golang/snippets/mutex-snippet.go b/src/margo.sh/golang/snippets/mutex-snippet.go
new file mode 100644
index 00000000..683211e4
--- /dev/null
+++ b/src/margo.sh/golang/snippets/mutex-snippet.go
@@ -0,0 +1,62 @@
+package snippets
+
+import (
+ "go/ast"
+ "margo.sh/golang/cursor"
+ "margo.sh/golang/goutil"
+ "margo.sh/mg"
+ "strings"
+)
+
+func MutexSnippet(cx *cursor.CurCtx) []mg.Completion {
+ x, ok := cx.Node.(*ast.SelectorExpr)
+ if !ok {
+ return nil
+ }
+
+ sel, _ := cx.Print(x)
+ sel = strings.TrimRightFunc(sel, func(r rune) bool { return r != '.' })
+ if sel == "" {
+ return nil
+ }
+
+ snips := func(lock, unlock string) []mg.Completion {
+ return []mg.Completion{
+ {
+ Query: lock + `; defer ` + unlock,
+ Src: goutil.DedentCompletion(`
+ ` + lock + `()
+ defer ` + sel + unlock + `()
+
+ $0
+ `),
+ Tag: mg.SnippetTag,
+ },
+ {
+ Query: lock + `; ...; ` + unlock,
+ Src: goutil.DedentCompletion(`
+ ` + lock + `()
+ $0
+ ` + sel + unlock + `()
+ `),
+ Tag: mg.SnippetTag,
+ },
+ }
+ }
+
+ // as a temporary hack, until we have typechecking,
+ // we'll rely on the gocode reducer to tell us if this is a lock
+ cx.Ctx.Defer(func(mx *mg.Ctx) *mg.State {
+ cl := []mg.Completion{}
+ for _, c := range mx.State.Completions {
+ switch c.Query {
+ case "Lock":
+ cl = append(cl, snips("Lock", "Unlock")...)
+ case "RLock":
+ cl = append(cl, snips("RLock", "RUnlock")...)
+ }
+ }
+ return mx.AddCompletions(cl...)
+ })
+ return nil
+}
diff --git a/src/margo.sh/golang/snippets/package-name-snippet.go b/src/margo.sh/golang/snippets/package-name-snippet.go
new file mode 100644
index 00000000..67e32827
--- /dev/null
+++ b/src/margo.sh/golang/snippets/package-name-snippet.go
@@ -0,0 +1,59 @@
+package snippets
+
+import (
+ "margo.sh/golang/cursor"
+ "margo.sh/golang/goutil"
+ "margo.sh/mg"
+ "regexp"
+)
+
+var (
+ pkgDirNamePat = regexp.MustCompile(`(\w+)\W*$`)
+)
+
+func PackageNameSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if cx.PkgName != goutil.NilPkgName || cx.Scope != cursor.PackageScope {
+ return nil
+ }
+
+ var cl []mg.Completion
+ seen := map[string]bool{}
+ add := func(name string) {
+ if seen[name] {
+ return
+ }
+ seen[name] = true
+ cl = append(cl, mg.Completion{
+ Query: `package ` + name,
+ Src: `
+ package ` + name + `
+
+ $0
+ `,
+ })
+ }
+
+ dir := cx.View.Dir()
+ pkg, _ := goutil.BuildContext(cx.Ctx).ImportDir(dir, 0)
+ if pkg != nil && pkg.Name != "" {
+ add(pkg.Name)
+ } else {
+ add(pkgDirNamePat.FindString(dir))
+ }
+
+ cl = append(cl, mg.Completion{
+ Query: `package main`,
+ Title: `main{}`,
+ Src: `
+ package main
+
+ func main() {
+ $0
+ }
+ `,
+ })
+
+ add("main")
+
+ return cl
+}
diff --git a/src/margo.sh/golang/snippets/return-snippet.go b/src/margo.sh/golang/snippets/return-snippet.go
new file mode 100644
index 00000000..2967ba95
--- /dev/null
+++ b/src/margo.sh/golang/snippets/return-snippet.go
@@ -0,0 +1,21 @@
+package snippets
+
+import (
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+func ReturnSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if !cx.Scope.Is(cursor.BlockScope) {
+ return nil
+ }
+
+ cl := []mg.Completion{
+ mg.Completion{
+ Query: `return`,
+ Src: `return $0`,
+ },
+ }
+
+ return cl
+}
diff --git a/src/margo.sh/golang/snippets/snippets.go b/src/margo.sh/golang/snippets/snippets.go
new file mode 100644
index 00000000..d2af43a5
--- /dev/null
+++ b/src/margo.sh/golang/snippets/snippets.go
@@ -0,0 +1,27 @@
+package snippets
+
+import (
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+var (
+ DefaultSnippets = []SnippetFunc{
+ PackageNameSnippet,
+ MainFuncSnippet,
+ InitFuncSnippet,
+ FuncSnippet,
+ MethodSnippet,
+ GenDeclSnippet,
+ MapSnippet,
+ TypeSnippet,
+ AppendSnippet,
+ DocSnippet,
+ DeferSnippet,
+ MutexSnippet,
+ ReturnSnippet,
+ HTTPSnippet,
+ }
+)
+
+type SnippetFunc func(*cursor.CurCtx) []mg.Completion
diff --git a/src/margo.sh/golang/snippets/type-snippet.go b/src/margo.sh/golang/snippets/type-snippet.go
new file mode 100644
index 00000000..145e19e0
--- /dev/null
+++ b/src/margo.sh/golang/snippets/type-snippet.go
@@ -0,0 +1,28 @@
+package snippets
+
+import (
+ "margo.sh/golang/cursor"
+ "margo.sh/mg"
+)
+
+func TypeSnippet(cx *cursor.CurCtx) []mg.Completion {
+ if cs := cx.Scope; cs != cursor.FileScope && cs != cursor.BlockScope && !cs.Is(cursor.TypeDeclScope) {
+ return nil
+ }
+ return []mg.Completion{
+ {
+ Query: `type struct`,
+ Title: `struct {}`,
+ Src: `
+ type ${1:T} struct {
+ ${2:V}
+ }
+ `,
+ },
+ {
+ Query: `type`,
+ Title: `type T`,
+ Src: `type ${1:T} ${2:V}`,
+ },
+ }
+}
diff --git a/src/margo.sh/golang/syntaxcheck.go b/src/margo.sh/golang/syntaxcheck.go
new file mode 100644
index 00000000..bb1fe820
--- /dev/null
+++ b/src/margo.sh/golang/syntaxcheck.go
@@ -0,0 +1,66 @@
+package golang
+
+import (
+ "go/scanner"
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+)
+
+type SyntaxCheck struct {
+ mg.ReducerType
+
+ q *mgutil.ChanQ
+}
+
+func (sc *SyntaxCheck) RCond(mx *mg.Ctx) bool {
+ return mx.LangIs(mg.Go)
+}
+
+func (sc *SyntaxCheck) RMount(mx *mg.Ctx) {
+ sc.q = mgutil.NewChanQ(1)
+ go sc.checker()
+}
+
+func (sc *SyntaxCheck) RUnmount(mx *mg.Ctx) {
+ sc.q.Close()
+}
+
+func (sc *SyntaxCheck) Reduce(mx *mg.Ctx) *mg.State {
+ switch mx.Action.(type) {
+ case mg.ViewActivated, mg.ViewModified, mg.ViewSaved:
+ sc.q.Put(mx)
+ }
+ return mx.State
+}
+
+func (sc *SyntaxCheck) checker() {
+ for v := range sc.q.C() {
+ sc.check(v.(*mg.Ctx))
+ }
+}
+
+func (sc *SyntaxCheck) check(mx *mg.Ctx) {
+ src, _ := mx.View.ReadAll()
+ pf := ParseFile(mx, mx.View.Filename(), src)
+ type iKey struct{}
+ mx.Store.Dispatch(mg.StoreIssues{
+ IssueKey: mg.IssueKey{Key: iKey{}},
+ Issues: sc.errsToIssues(mx.View, pf.ErrorList),
+ })
+}
+
+func (sc *SyntaxCheck) errsToIssues(v *mg.View, el scanner.ErrorList) mg.IssueSet {
+ issues := make(mg.IssueSet, len(el))
+ for i, e := range el {
+ issues[i] = mg.Issue{
+ Path: v.Path,
+ Name: v.Name,
+ Row: e.Pos.Line - 1,
+ Col: e.Pos.Column - 1,
+ Message: e.Msg,
+ Tag: mg.Error,
+ Label: "Go/SyntaxCheck",
+ }
+ }
+ return issues
+}
diff --git a/src/margo.sh/golang/typecheck.go b/src/margo.sh/golang/typecheck.go
new file mode 100644
index 00000000..a59a6bbb
--- /dev/null
+++ b/src/margo.sh/golang/typecheck.go
@@ -0,0 +1,196 @@
+package golang
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "go/types"
+ "margo.sh/golang/goutil"
+ "margo.sh/kimporter"
+ "margo.sh/mg"
+ "margo.sh/mgpf"
+ "margo.sh/mgutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+type TypeCheck struct {
+ mg.ReducerType
+
+ q *mgutil.ChanQ
+}
+
+func (tc *TypeCheck) RCond(mx *mg.Ctx) bool {
+ return mx.LangIs(mg.Go)
+}
+
+func (tc *TypeCheck) RMount(mx *mg.Ctx) {
+ tc.q = mgutil.NewChanQ(1)
+ go tc.checker()
+}
+
+func (tc *TypeCheck) RUnmount(mx *mg.Ctx) {
+ tc.q.Close()
+}
+
+func (tc *TypeCheck) Reduce(mx *mg.Ctx) *mg.State {
+ switch mx.Action.(type) {
+ case mg.ViewActivated, mg.ViewModified, mg.ViewSaved:
+ tc.q.Put(mx)
+ }
+ return mx.State
+}
+
+func (tc *TypeCheck) checker() {
+ for v := range tc.q.C() {
+ tc.check(v.(*mg.Ctx))
+ }
+}
+
+func (tc *TypeCheck) check(mx *mg.Ctx) {
+ defer mx.Begin(mg.Task{Title: "Go/TypeCheck"}).Done()
+ pf := mgpf.NewProfile("Go/TypeCheck")
+ defer func() {
+ if pf.Dur().Duration < 100*time.Millisecond {
+ return
+ }
+ mx.Profile.Fprint(os.Stderr, &mgpf.PrintOpts{
+ MinDuration: 10 * time.Millisecond,
+ })
+ }()
+ mx = mx.Copy(func(mx *mg.Ctx) { mx.Profile = pf })
+ v := mx.View
+
+ src, _ := v.ReadAll()
+ issues := []mg.Issue{}
+ if v.Path == "" {
+ pf := goutil.ParseFile(mx, v.Name, src)
+ issues = append(issues, tc.errToIssues(mx, v, pf.Error)...)
+ if pf.Error == nil {
+ tcfg := types.Config{
+ IgnoreFuncBodies: true,
+ FakeImportC: true,
+ Error: func(err error) {
+ issues = append(issues, tc.errToIssues(mx, v, err)...)
+ },
+ Importer: kimporter.New(mx, nil),
+ }
+ tcfg.Check("_", pf.Fset, []*ast.File{pf.AstFile}, nil)
+ }
+ } else {
+ kp := kimporter.New(mx, &kimporter.Config{
+ CheckFuncs: true,
+ CheckImports: true,
+ Tests: strings.HasSuffix(v.Filename(), "_test.go"),
+ SrcMap: map[string][]byte{v.Filename(): src},
+ })
+ _, err := kp.ImportFrom(".", v.Dir(), 0)
+ issues = append(issues, tc.errToIssues(mx, v, err)...)
+ }
+ for i, isu := range issues {
+ if isu.Path == "" {
+ isu.Path = v.Path
+ isu.Name = v.Name
+ }
+ isu.Label = "Go/TypeCheck"
+ isu.Tag = mg.Error
+ issues[i] = isu
+ }
+
+ type K struct{}
+ mx.Store.Dispatch(mg.StoreIssues{
+ IssueKey: mg.IssueKey{Key: K{}},
+ Issues: issues,
+ })
+}
+
+func (tc *TypeCheck) parseFiles(mx *mg.Ctx) (*token.FileSet, []*ast.File, error) {
+ v := mx.View
+ src, _ := v.ReadAll()
+ if v.Path == "" {
+ pf := goutil.ParseFile(mx, v.Name, src)
+ files := []*ast.File{pf.AstFile}
+ if files[0] == nil {
+ files = nil
+ }
+ return pf.Fset, files, pf.Error
+ }
+
+ currNm := v.Basename()
+ dir := v.Dir()
+ bp, err := BuildContext(mx).ImportDir(dir, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ fset := token.NewFileSet()
+ // TODO: caching...
+ fn := v.Filename()
+ af, err := parser.ParseFile(fset, fn, src, parser.ParseComments)
+ if err != nil {
+ return nil, nil, err
+ }
+ pkgFiles := map[string][]*ast.File{}
+ names := append(bp.GoFiles, bp.CgoFiles...)
+ if strings.HasSuffix(fn, "_test.go") {
+ names = append(names, bp.TestGoFiles...)
+ }
+ for _, nm := range names {
+ if nm == currNm {
+ continue
+ }
+ af, err := parser.ParseFile(fset, filepath.Join(dir, nm), nil, parser.ParseComments)
+ if err != nil {
+ return nil, nil, err
+ }
+ pkgFiles[af.Name.String()] = append(pkgFiles[af.Name.String()], af)
+ }
+ files := append(pkgFiles[af.Name.String()], af)
+ return fset, files, nil
+}
+
+func (tc *TypeCheck) posIssue(mx *mg.Ctx, v *mg.View, msg string, p token.Position) mg.Issue {
+ is := mg.Issue{
+ Path: p.Filename,
+ Row: p.Line - 1,
+ Col: p.Column - 1,
+ Message: msg,
+ }
+ if is.Path == "" {
+ is.Name = v.Name
+ }
+ return is
+}
+
+func (tc *TypeCheck) errToIssues(mx *mg.Ctx, v *mg.View, err error) mg.IssueSet {
+ var issues mg.IssueSet
+ switch e := err.(type) {
+ case nil:
+ case scanner.ErrorList:
+ for _, err := range e {
+ issues = append(issues, tc.errToIssues(mx, v, err)...)
+ }
+ case mg.Issue:
+ if e.Name == "" && e.Path == "" {
+ // guard against failure to set .Path
+ e.Name = v.Name
+ }
+ issues = append(issues, e)
+ case scanner.Error:
+ issues = append(issues, tc.posIssue(mx, v, e.Msg, e.Pos))
+ case *scanner.Error:
+ issues = append(issues, tc.posIssue(mx, v, e.Msg, e.Pos))
+ case types.Error:
+ issues = append(issues, tc.posIssue(mx, v, e.Msg, e.Fset.Position(e.Pos)))
+ case *types.Error:
+ issues = append(issues, tc.posIssue(mx, v, e.Msg, e.Fset.Position(e.Pos)))
+ default:
+ issues = append(issues, mg.Issue{
+ Name: v.Name,
+ Message: err.Error(),
+ })
+ }
+ return issues
+}
diff --git a/src/margo.sh/golang/version.go b/src/margo.sh/golang/version.go
new file mode 100644
index 00000000..42671090
--- /dev/null
+++ b/src/margo.sh/golang/version.go
@@ -0,0 +1,12 @@
+package golang
+
+import (
+ "margo.sh/golang/goutil"
+)
+
+var (
+ VersionTag = goutil.VersionTag
+ Version = goutil.Version
+)
+
+type ReleaseVersion = goutil.ReleaseVersion
diff --git a/src/disposa.blue/margo/golang/version_test.go b/src/margo.sh/golang/version_test.go
similarity index 100%
rename from src/disposa.blue/margo/golang/version_test.go
rename to src/margo.sh/golang/version_test.go
diff --git a/src/margo.sh/htm/htm.go b/src/margo.sh/htm/htm.go
new file mode 100644
index 00000000..23abb830
--- /dev/null
+++ b/src/margo.sh/htm/htm.go
@@ -0,0 +1,232 @@
+package htm
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "github.com/ugorji/go/codec"
+ "io"
+ "margo.sh/mg/actions"
+ "strconv"
+ "strings"
+)
+
+var (
+ ArticleAttrs = &Attrs{Class: class{"article"}}
+ HeadingAttrs = &Attrs{Class: class{"heading"}}
+ HighlightAttrs = &Attrs{Class: class{"highlight"}}
+ colonText = Text(": ")
+
+ esc = strings.NewReplacer(
+ `&`, "&",
+ `<`, "<",
+ `>`, ">",
+ )
+)
+
+func Article(heading IElement, content ...Element) Element {
+ cl := append(make([]Element, 0, 3), Span(HeadingAttrs, heading), colonText)
+ switch len(content) {
+ case 0:
+ case 1:
+ cl = append(cl, content[0])
+ default:
+ ul := make([]Element, len(content))
+ for i, c := range content {
+ ul[i] = Li(nil, c)
+ }
+ cl = append(cl, Ul(nil, ul...))
+ }
+ return Div(ArticleAttrs, cl...)
+}
+
+func Text(s string) IElement { return iRawNode{rawNode: rawNode{s: []byte(esc.Replace(s))}} }
+func Textf(format string, a ...interface{}) IElement { return Text(fmt.Sprintf(format, a...)) }
+func HighlightText(s string) IElement { return Span(HighlightAttrs, Text(s)) }
+
+func P(a *Attrs, l ...IElement) BElement { return bnode{node{t: "p", a: a, l: ils(l)}} }
+func Div(a *Attrs, l ...Element) BElement { return bnode{node{t: "div", a: a, l: els(l)}} }
+func Span(a *Attrs, l ...IElement) IElement { return inode{node{t: "span", a: a, l: ils(l)}} }
+func A(a *AAttrs, l ...IElement) IElement { return inode{node{t: "a", a: a, l: ils(l)}} }
+
+func Ul(a *Attrs, l ...Element) BElement { return bnode{node{t: "ul", a: a, l: els(l)}} }
+func Ol(a *Attrs, l ...Element) BElement { return bnode{node{t: "ol", a: a, l: els(l)}} }
+func Li(a *Attrs, l ...Element) BElement { return bnode{node{t: "li", a: a, l: els(l)}} }
+
+func Em(a *Attrs, l ...IElement) IElement { return inode{node{t: "em", a: a, l: ils(l)}} }
+func EmText(s string) IElement { return Em(nil, Text(s)) }
+func Strong(a *Attrs, l ...IElement) IElement { return inode{node{t: "strong", a: a, l: ils(l)}} }
+func StrongText(s string) IElement { return Strong(nil, Text(s)) }
+
+func H1(a *Attrs, l ...Element) BElement { return bnode{node{t: "H1", a: a, l: els(l)}} }
+func H2(a *Attrs, l ...Element) BElement { return bnode{node{t: "H2", a: a, l: els(l)}} }
+func H3(a *Attrs, l ...Element) BElement { return bnode{node{t: "H3", a: a, l: els(l)}} }
+func H4(a *Attrs, l ...Element) BElement { return bnode{node{t: "H4", a: a, l: els(l)}} }
+func H5(a *Attrs, l ...Element) BElement { return bnode{node{t: "H5", a: a, l: els(l)}} }
+func H6(a *Attrs, l ...Element) BElement { return bnode{node{t: "H6", a: a, l: els(l)}} }
+
+type class struct{ s string }
+
+type Attrs struct {
+ Class class
+}
+
+func (a *Attrs) attrs() (string, error) {
+ if a == nil || a.Class.s == "" {
+ return "", nil
+ }
+ return `class=` + strconv.Quote(a.Class.s), nil
+}
+
+type AAttrs struct {
+ Action actions.ClientAction
+ Class string
+}
+
+func (a *AAttrs) attrs() (string, error) {
+ if a == nil {
+ return "", nil
+ }
+ buf := bytes.Buffer{}
+ if a.Class != "" {
+ buf.WriteString(` class=`)
+ buf.WriteString(strconv.Quote(a.Class))
+ }
+ if a.Action != nil {
+ js := []byte{}
+ err := codec.NewEncoderBytes(&js, &codec.JsonHandle{}).Encode(a.Action.ClientAction())
+ if err != nil {
+ return "", err
+ }
+ s := make([]byte, base64.StdEncoding.EncodedLen(len(js)))
+ base64.StdEncoding.Encode(s, js)
+ buf.WriteString(` href="data:application/json;base64,`)
+ buf.Write(s)
+ buf.WriteByte('"')
+ }
+ return strings.TrimSpace(buf.String()), nil
+}
+
+type attrs interface {
+ attrs() (string, error)
+}
+
+type elementType struct{}
+
+func (et elementType) element() {}
+
+type Element interface {
+ element()
+ FPrintHTML(w io.Writer) error
+ FPrintText(w io.Writer) error
+}
+
+type IElement interface {
+ Element
+ iElement()
+}
+
+type BElement interface {
+ Element
+ bElement()
+}
+
+type inode struct{ node }
+
+func (inode) iElement() {}
+
+type bnode struct{ node }
+
+func (bnode) bElement() {}
+
+func (b bnode) FPrintText(w io.Writer) error {
+ if err := b.node.FPrintText(w); err != nil {
+ return err
+ }
+ _, err := w.Write([]byte{'\n'})
+ return err
+}
+
+type node struct {
+ elementType
+ t string
+ void bool
+ a attrs
+ l nodeList
+}
+
+func (n node) FPrintHTML(w io.Writer) error {
+ if n.t != "" {
+ attrs := ""
+ if n.a != nil {
+ s, err := n.a.attrs()
+ if err != nil {
+ return err
+ }
+ attrs = s
+ }
+ if _, err := io.WriteString(w, `<`+n.t+` `+attrs+`>`); err != nil {
+ return err
+ }
+ }
+ for i, l := 0, n.l; i < l.len(); i++ {
+ if err := l.item(i).FPrintHTML(w); err != nil {
+ return err
+ }
+ }
+ if n.t != "" && !n.void {
+ s := `` + n.t + `>`
+ if _, err := io.WriteString(w, s); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (n node) FPrintText(w io.Writer) error {
+ for i, l := 0, n.l; i < l.len(); i++ {
+ if err := l.item(i).FPrintText(w); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type iRawNode struct {
+ rawNode
+ inode
+}
+
+type bRawNode struct {
+ rawNode
+ inode
+}
+
+type rawNode struct {
+ elementType
+ s []byte
+}
+
+func (rn rawNode) FPrintHTML(w io.Writer) error {
+ _, err := w.Write(rn.s)
+ return err
+}
+
+func (rn rawNode) FPrintText(w io.Writer) error {
+ return rn.FPrintHTML(w)
+}
+
+type nodeList interface {
+ len() int
+ item(int) Element
+}
+
+type els []Element
+
+func (l els) len() int { return len(l) }
+func (l els) item(i int) Element { return l[i] }
+
+type ils []IElement
+
+func (l ils) len() int { return len(l) }
+func (l ils) item(i int) Element { return l[i] }
diff --git a/src/margo.sh/internal/compat/lifecycle_test.go b/src/margo.sh/internal/compat/lifecycle_test.go
new file mode 100644
index 00000000..929108b2
--- /dev/null
+++ b/src/margo.sh/internal/compat/lifecycle_test.go
@@ -0,0 +1,215 @@
+package compat
+
+import (
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+type lifecycleState struct {
+ names []string
+}
+
+func (ls *lifecycleState) called() {
+ pc, _, _, _ := runtime.Caller(1)
+ f := runtime.FuncForPC(pc)
+ l := strings.Split(f.Name(), ".")
+ target := l[len(l)-1]
+ for i, s := range ls.names {
+ if s == target {
+ ls.names = append(ls.names[:i], ls.names[i+1:]...)
+ break
+ }
+ }
+}
+
+func (ls *lifecycleState) uncalled() string {
+ return strings.Join(ls.names, ", ")
+}
+
+func (ls *lifecycleState) test(t *testing.T, r mg.Reducer) {
+ t.Helper()
+ ag := newAgent(`{}`)
+ ag.Store.Use(r)
+ ag.Run()
+ if s := ls.uncalled(); s != "" {
+ t.Fatalf("reduction failed to call: %s", s)
+ }
+}
+
+type legacyLifecycleEmbedded struct {
+ mg.Reducer
+}
+
+type lifecycle struct {
+ mg.ReducerType
+ *lifecycleState
+}
+
+func (l *lifecycle) Reduce(mx *mg.Ctx) *mg.State { l.called(); return mx.State }
+func (l *lifecycle) RInit(_ *mg.Ctx) { l.called() }
+func (l *lifecycle) RConfig(_ *mg.Ctx) mg.EditorConfig { l.called(); return nil }
+func (l *lifecycle) RCond(_ *mg.Ctx) bool { l.called(); return true }
+func (l *lifecycle) RMount(_ *mg.Ctx) { l.called() }
+func (l *lifecycle) RUnmount(_ *mg.Ctx) { l.called() }
+
+type legacyLifecycle struct {
+ mg.ReducerType
+ *lifecycleState
+}
+
+func (l *legacyLifecycle) Reduce(mx *mg.Ctx) *mg.State { l.called(); return mx.State }
+func (l *legacyLifecycle) ReducerInit(_ *mg.Ctx) { l.called() }
+func (l *legacyLifecycle) ReducerConfig(_ *mg.Ctx) mg.EditorConfig { l.called(); return nil }
+func (l *legacyLifecycle) ReducerCond(_ *mg.Ctx) bool { l.called(); return true }
+func (l *legacyLifecycle) ReducerMount(_ *mg.Ctx) { l.called() }
+func (l *legacyLifecycle) ReducerUnmount(_ *mg.Ctx) { l.called() }
+
+type lifecycleEmbedded struct {
+ mg.Reducer
+}
+
+func TestLifecycleMethodCalls(t *testing.T) {
+ names := func() []string {
+ return []string{
+ "Reduce", "RInit", "RConfig",
+ "RCond", "RMount", "RUnmount",
+ }
+ }
+ legacyNames := func() []string {
+ return []string{
+ "Reduce", "ReducerInit", "ReducerConfig",
+ "ReducerCond", "ReducerMount", "ReducerUnmount",
+ }
+ }
+ t.Run("Direct Calls", func(t *testing.T) {
+ ls := &lifecycleState{names: names()}
+ ls.test(t, &lifecycle{lifecycleState: ls})
+ })
+ t.Run("Embedded Calls", func(t *testing.T) {
+ ls := &lifecycleState{names: names()}
+ ls.test(t, &lifecycleEmbedded{&lifecycle{lifecycleState: ls}})
+ })
+ t.Run("Legacy Direct Calls", func(t *testing.T) {
+ ls := &lifecycleState{names: legacyNames()}
+ ls.test(t, &legacyLifecycle{lifecycleState: ls})
+ })
+ t.Run("Legacy Embedded Calls", func(t *testing.T) {
+ ls := &lifecycleState{names: legacyNames()}
+ ls.test(t, &lifecycleEmbedded{&legacyLifecycle{lifecycleState: ls}})
+ })
+}
+
+func TestLifecycleMountedAndUnmounted(t *testing.T) {
+ cond := false
+ mount := false
+ unmount := false
+ r := &mg.RFunc{
+ Cond: func(*mg.Ctx) bool {
+ cond = true
+ // we want to return false if the reducer mounted
+ // this tests that RUnmount is called if RMount was called
+ // even if RCond returns false at some point in the future
+ //
+ // at the time this test was written, the implementation was wrong
+ // because if RCond returned falsed, no other method was called
+ // so it would appear correct, but that was just a coincidence
+ if mount {
+ return false
+ }
+ return true
+ },
+ Mount: func(*mg.Ctx) { mount = true },
+ Unmount: func(*mg.Ctx) { unmount = true },
+ }
+ newAgent(`{}`, r).Run()
+ if !cond {
+ t.Fatal("Reducer.RCond was not called")
+ }
+ if !mount {
+ t.Fatal("Reducer.RMount was not called")
+ }
+ if !unmount {
+ t.Fatal("Reducer.RUnmount was not called")
+ }
+}
+
+func TestLifecycleNotMounted(t *testing.T) {
+ init := false
+ config := false
+ cond := false
+ mount := false
+ reduce := false
+ unmount := false
+ r := &mg.RFunc{
+ Init: func(*mg.Ctx) { init = true },
+ Config: func(*mg.Ctx) mg.EditorConfig { config = true; return nil },
+ Cond: func(*mg.Ctx) bool { cond = true; return false },
+ Mount: func(*mg.Ctx) { mount = true },
+ Func: func(mx *mg.Ctx) *mg.State { reduce = true; return mx.State },
+ Unmount: func(*mg.Ctx) { unmount = true },
+ }
+ newAgent(`{}`, r).Run()
+ if !init {
+ t.Fatal("Reducer.RInit was not called")
+ }
+ if !config {
+ t.Fatal("Reducer.RConfig was not called")
+ }
+ if !cond {
+ t.Fatal("Reducer.RCond was not called")
+ }
+ if mount {
+ t.Fatal("Reducer.RMount was called")
+ }
+ if reduce {
+ t.Fatal("Reducer.Reduce was called")
+ }
+ if unmount {
+ t.Fatal("Reducer.RUnmount was called")
+ }
+}
+
+func TestLifecycleMounted(t *testing.T) {
+ init := false
+ config := false
+ mount := false
+ reduce := false
+ unmount := false
+ r := &mg.RFunc{
+ Init: func(*mg.Ctx) { init = true },
+ Config: func(*mg.Ctx) mg.EditorConfig { config = true; return nil },
+ // Cond is implicitly true
+ Mount: func(*mg.Ctx) { mount = true },
+ Func: func(mx *mg.Ctx) *mg.State { reduce = true; return mx.State },
+ Unmount: func(*mg.Ctx) { unmount = true },
+ }
+ newAgent(`{}`, r).Run()
+ if !init {
+ t.Fatal("Reducer.RInit was not called")
+ }
+ if !config {
+ t.Fatal("Reducer.RConfig was not called")
+ }
+ if !mount {
+ t.Fatal("Reducer.RMount was not called")
+ }
+ if !reduce {
+ t.Fatal("Reducer.Reduce was not called")
+ }
+ if !unmount {
+ t.Fatal("Reducer.RUnmount was not called")
+ }
+}
+
+func newAgent(stdinJSON string, use ...mg.Reducer) *mg.Agent {
+ ag, _ := mg.NewAgent(mg.AgentConfig{
+ Stdin: &mgutil.IOWrapper{Reader: strings.NewReader(stdinJSON)},
+ Stdout: &mgutil.IOWrapper{},
+ Stderr: &mgutil.IOWrapper{},
+ })
+ ag.Store.Use(use...)
+ return ag
+}
diff --git a/src/disposa.blue/margo/js/jsonfmt.go b/src/margo.sh/js/jsonfmt.go
similarity index 52%
rename from src/disposa.blue/margo/js/jsonfmt.go
rename to src/margo.sh/js/jsonfmt.go
index 7e160058..1ee453df 100644
--- a/src/disposa.blue/margo/js/jsonfmt.go
+++ b/src/margo.sh/js/jsonfmt.go
@@ -1,38 +1,37 @@
package js
import (
- "disposa.blue/margo/mg"
"encoding/json"
+ "margo.sh/mg"
)
type JsonFmt struct {
+ mg.ReducerType
+
Prefix string
Indent string
}
-func (j JsonFmt) Reduce(mx *mg.Ctx) *mg.State {
- if !mx.View.LangIs("json") {
- return mx.State
- }
- if _, ok := mx.Action.(mg.ViewFmt); !ok {
- return mx.State
- }
+func (j JsonFmt) ReCond(mx *mg.Ctx) bool {
+ return mx.ActionIs(mg.ViewFmt{}) && mx.LangIs(mg.JSON)
+}
+func (j JsonFmt) Reduce(mx *mg.Ctx) *mg.State {
fn := mx.View.Filename()
r, err := mx.View.Open()
if err != nil {
- return mx.Errorf("failed to open %s: %s\n", fn, err)
+ return mx.AddErrorf("failed to open %s: %s\n", fn, err)
}
defer r.Close()
var v interface{}
if err := json.NewDecoder(r).Decode(&v); err != nil {
- return mx.Errorf("failed to unmarshal json %s: %s\n", fn, err)
+ return mx.AddErrorf("failed to unmarshal json %s: %s\n", fn, err)
}
src, err := json.MarshalIndent(v, j.Prefix, j.Indent)
if err != nil {
- return mx.Errorf("failed to marshal json %s: %s\n", fn, err)
+ return mx.AddErrorf("failed to marshal json %s: %s\n", fn, err)
}
- return mx.SetSrc(src)
+ return mx.SetViewSrc(src)
}
diff --git a/src/margo.sh/kimporter/kimporter.go b/src/margo.sh/kimporter/kimporter.go
new file mode 100644
index 00000000..1b81cea8
--- /dev/null
+++ b/src/margo.sh/kimporter/kimporter.go
@@ -0,0 +1,461 @@
+package kimporter
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/token"
+ "go/types"
+ "golang.org/x/crypto/blake2b"
+ "golang.org/x/net/context"
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/go/gcexportdata"
+ "margo.sh/golang/gopkg"
+ "margo.sh/golang/goutil"
+ "margo.sh/memo"
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+var (
+ pkgC = func() *types.Package {
+ p := types.NewPackage("C", "C")
+ p.MarkComplete()
+ return p
+ }()
+)
+
+type stateKey struct {
+ ImportPath string
+ Dir string
+ CheckFuncs bool
+ CheckImports bool
+ Tests bool
+ Tags string
+ GOARCH string
+ GOOS string
+ GOROOT string
+ GOPATH string
+ NoHash bool
+}
+
+func globalState(mx *mg.Ctx, k stateKey) *state {
+ type K struct{ stateKey }
+ return mx.VFS.ReadMemo(k.Dir, K{k}, func() memo.V {
+ return &state{stateKey: k}
+ }).(*state)
+}
+
+type state struct {
+ stateKey
+ chkAt mgutil.AtomicInt
+ invAt mgutil.AtomicInt
+ imby struct {
+ sync.Mutex
+ l []*state
+ }
+ mu sync.Mutex
+ err error
+ pkg *types.Package
+ hash string
+}
+
+func (ks *state) invalidate(invAt int64) {
+ ks.invAt.Set(invAt)
+ ks.imby.Lock()
+ l := ks.imby.l
+ ks.imby.Unlock()
+ for _, p := range l {
+ p.invalidate(invAt)
+ }
+}
+
+func (ks *state) InvalidateMemo(invAt int64) {
+ ks.invalidate(invAt)
+}
+
+func (ks *stateKey) targets(pp *gopkg.PkgPath) bool {
+ return ks.ImportPath == pp.ImportPath || ks.Dir == pp.Dir
+}
+
+func (ks *state) importedBy(p *state) {
+ ks.imby.Lock()
+ defer ks.imby.Unlock()
+
+ for _, q := range ks.imby.l {
+ if p == q {
+ return
+ }
+ }
+ ks.imby.l = append(ks.imby.l[:len(ks.imby.l):len(ks.imby.l)], p)
+}
+
+func (ks *state) valid(hash string) bool {
+ return ks.chkAt.N() > ks.invAt.N() && (ks.NoHash || ks.hash == hash)
+}
+
+func (ks *state) result() (*types.Package, error) {
+ switch {
+ case ks.err != nil:
+ return nil, ks.err
+ case !ks.pkg.Complete():
+ // Package exists but is not complete - we cannot handle this
+ // at the moment since the source importer replaces the package
+ // wholesale rather than augmenting it (see #19337 for details).
+ // Return incomplete package with error (see #16088).
+ return ks.pkg, fmt.Errorf("reimported partially imported package %q", ks.ImportPath)
+ default:
+ return ks.pkg, nil
+ }
+}
+
+type Config struct {
+ SrcMap map[string][]byte
+ CheckFuncs bool
+ CheckImports bool
+ NoConcurrency bool
+ Tests bool
+}
+
+type Importer struct {
+ cfg Config
+ mx *mg.Ctx
+ bld *build.Context
+ ks *state
+ mp *gopkg.ModPath
+ par *Importer
+ tags string
+ hash string
+}
+
+func (kp *Importer) Import(path string) (*types.Package, error) {
+ return kp.ImportFrom(path, ".", 0)
+}
+
+func (kp *Importer) ImportFrom(ipath, srcDir string, mode types.ImportMode) (*types.Package, error) {
+ // TODO: add support for unsaved-files without a package
+ if mode != 0 {
+ panic("non-zero import mode")
+ }
+ return kp.importFrom(ipath, srcDir)
+}
+
+func (kp *Importer) importFrom(ipath, srcDir string) (*types.Package, error) {
+ if pkg := kp.importFakePkg(ipath); pkg != nil {
+ return pkg, nil
+ }
+ if p, err := filepath.Abs(srcDir); err == nil {
+ srcDir = p
+ }
+ if !filepath.IsAbs(srcDir) {
+ return nil, fmt.Errorf("srcDir is not absolute: %s", srcDir)
+ }
+ pp, err := kp.findPkg(ipath, srcDir)
+ if err != nil {
+ return nil, err
+ }
+ return kp.importPkg(pp)
+}
+
+func (kp *Importer) findPkg(ipath, srcDir string) (*gopkg.PkgPath, error) {
+ kp.mx.Profile.Push(`Kim-Porter: findPkg(` + ipath + `)`).Pop()
+ pp, err := kp.mp.FindPkg(kp.mx, ipath, srcDir)
+ return pp, err
+}
+
+func (kp *Importer) stateKey(pp *gopkg.PkgPath) stateKey {
+ cfg := kp.cfg
+ return stateKey{
+ ImportPath: pp.ImportPath,
+ Dir: pp.Dir,
+ CheckFuncs: cfg.CheckFuncs,
+ CheckImports: cfg.CheckImports,
+ Tests: cfg.Tests,
+ Tags: kp.tags,
+ GOOS: kp.bld.GOOS,
+ GOARCH: kp.bld.GOARCH,
+ GOROOT: kp.bld.GOROOT,
+ GOPATH: strings.Join(mgutil.PathList(kp.bld.GOPATH), string(filepath.ListSeparator)),
+ NoHash: kp.hash == "",
+ }
+}
+
+func (kp *Importer) state(pp *gopkg.PkgPath) *state {
+ return globalState(kp.mx, kp.stateKey(pp))
+}
+
+func (kp *Importer) detectCycle(pp *gopkg.PkgPath) error {
+ defer kp.mx.Profile.Start(`Kim-Porter: detectCycle()`).Stop()
+
+ for p := kp; p != nil; p = p.par {
+ if p.ks == nil || !p.ks.targets(pp) {
+ continue
+ }
+ l := []string{pp.ImportPath + "(" + pp.Dir + ")"}
+ for p := kp; ; p = p.par {
+ if p.ks == nil {
+ continue
+ }
+ l = append(l, p.ks.ImportPath+"("+p.ks.Dir+")")
+ if p.ks.targets(pp) {
+ return fmt.Errorf("import cycle: %s", strings.Join(l, " <~ "))
+ }
+ }
+ }
+ return nil
+}
+
+func (kp *Importer) importPkg(pp *gopkg.PkgPath) (pkg *types.Package, err error) {
+ title := `Kim-Porter: import(` + pp.ImportPath + `)`
+ defer kp.mx.Profile.Push(title).Pop()
+ defer kp.mx.Begin(mg.Task{Title: title}).Done()
+
+ if err := kp.detectCycle(pp); err != nil {
+ return nil, err
+ }
+ ks := kp.state(pp)
+ kx := kp.branch(ks, pp)
+ ks.mu.Lock()
+ defer ks.mu.Unlock()
+
+ if ks.valid(kp.hash) {
+ return ks.result()
+ }
+ chkAt := memo.InvAt()
+ ks.pkg, ks.err = kx.check(ks, pp)
+ ks.hash = kp.hash
+ ks.chkAt.Set(chkAt)
+ return ks.result()
+}
+
+func (kp *Importer) check(ks *state, pp *gopkg.PkgPath) (*types.Package, error) {
+ fset := token.NewFileSet()
+ bp, _, astFiles, err := parseDir(kp.mx, kp.bld, fset, pp.Dir, kp.cfg.SrcMap, ks)
+ if err != nil {
+ return nil, err
+ }
+
+ imports, err := kp.importDeps(ks, bp, fset, astFiles)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(bp.CgoFiles) != 0 {
+ pkg, err := kp.importCgoPkg(pp, imports)
+ if err == nil {
+ return pkg, nil
+ }
+ }
+
+ defer kp.mx.Profile.Push(`Kim-Porter: typecheck(` + ks.ImportPath + `)`).Pop()
+ var hardErr error
+ tc := types.Config{
+ FakeImportC: true,
+ IgnoreFuncBodies: !ks.CheckFuncs,
+ DisableUnusedImportCheck: !ks.CheckImports,
+ Error: func(err error) {
+ if te, ok := err.(types.Error); ok && !te.Soft && hardErr == nil {
+ hardErr = err
+ }
+ },
+ Importer: kp,
+ Sizes: types.SizesFor(kp.bld.Compiler, kp.bld.GOARCH),
+ }
+ pkg, err := tc.Check(bp.ImportPath, fset, astFiles, nil)
+ if err == nil && hardErr != nil {
+ err = hardErr
+ }
+ return pkg, err
+}
+
+func (kp *Importer) importCgoPkg(pp *gopkg.PkgPath, imports map[string]*types.Package) (*types.Package, error) {
+ name := `go`
+ args := []string{`list`, `-e`, `-export`, `-f={{.Export}}`, pp.Dir}
+ ctx, cancel := context.WithCancel(context.Background())
+ title := `Kim-Porter: importCgoPkg` + mgutil.QuoteCmd(name, args...) + `)`
+ defer kp.mx.Profile.Push(title).Pop()
+ defer kp.mx.Begin(mg.Task{Title: title, Cancel: cancel}).Done()
+
+ buf := &bytes.Buffer{}
+ cmd := exec.CommandContext(ctx, name, args...)
+ cmd.Dir = pp.Dir
+ cmd.Stdout = buf
+ cmd.Env = kp.mx.Env.Environ()
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("%s: %s", title, err)
+ }
+ fn := string(bytes.TrimSpace(buf.Bytes()))
+ f, err := os.Open(fn)
+ if err != nil {
+ return nil, fmt.Errorf("cannot open %s.a: %s", pp.ImportPath, err)
+ }
+ defer f.Close()
+ rd, err := gcexportdata.NewReader(f)
+ if err != nil {
+ return nil, fmt.Errorf("cannot create export data reader for %s from %s: %s", pp.ImportPath, fn, err)
+ }
+ pkg, err := gcexportdata.Read(rd, token.NewFileSet(), imports, pp.ImportPath)
+ if err != nil {
+ return nil, fmt.Errorf("cannot read export data for %s from %s: %s", pp.ImportPath, fn, err)
+ }
+ return pkg, nil
+}
+
+func (kp *Importer) importFakePkg(ipath string) *types.Package {
+ switch ipath {
+ case "unsafe":
+ return types.Unsafe
+ case "C":
+ return pkgC
+ }
+ return nil
+}
+
+func (kp *Importer) importDeps(ks *state, bp *build.Package, fset *token.FileSet, astFiles []*ast.File) (map[string]*types.Package, error) {
+ defer kp.mx.Profile.Push(`Kim-Porter: importDeps(` + ks.ImportPath + `)`).Pop()
+
+ paths := mgutil.StrSet(bp.Imports)
+ if ks.Tests {
+ paths = paths.Add(bp.TestImports...)
+ }
+ mu := sync.Mutex{}
+ imports := make(map[string]*types.Package, len(paths))
+ doImport := func(ipath string) error {
+ pkg, err := kp.importFrom(ipath, bp.Dir)
+ if err == nil {
+ mu.Lock()
+ imports[ipath] = pkg
+ mu.Unlock()
+ return nil
+ }
+ for _, af := range astFiles {
+ for _, spec := range af.Imports {
+ if spec.Path == nil {
+ continue
+ }
+ s, _ := strconv.Unquote(spec.Path.Value)
+ if ipath != s {
+ continue
+ }
+ tp := fset.Position(spec.Pos())
+ return mg.Issue{
+ Path: tp.Filename,
+ Row: tp.Line - 1,
+ Col: tp.Column - 1,
+ Message: err.Error(),
+ }
+ }
+ }
+ return err
+ }
+ if kp.cfg.NoConcurrency || len(paths) < 2 {
+ for _, ipath := range paths {
+ if err := doImport(ipath); err != nil {
+ return imports, err
+ }
+ }
+ return imports, nil
+ }
+ imps := make(chan string, len(paths))
+ for _, ipath := range paths {
+ imps <- ipath
+ }
+ close(imps)
+ errg := &errgroup.Group{}
+ for i := 0; i < mgutil.MinNumCPU(len(paths)); i++ {
+ errg.Go(func() error {
+ for ipath := range imps {
+ if err := doImport(ipath); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ return imports, errg.Wait()
+}
+
+func (kp *Importer) setupJs(pp *gopkg.PkgPath) {
+ fs := kp.mx.VFS
+ nd := fs.Poke(kp.bld.GOROOT).Poke("src/syscall/js")
+ if fs.Poke(pp.Dir) != nd && fs.Poke(kp.mx.View.Dir()) != nd {
+ return
+ }
+ bld := *kp.bld
+ bld.GOOS = "js"
+ bld.GOARCH = "wasm"
+ kp.bld = &bld
+}
+
+func (kp *Importer) branch(ks *state, pp *gopkg.PkgPath) *Importer {
+ kx := *kp
+ if pp.Mod != nil {
+ kx.mp = pp.Mod
+ }
+ if kp.ks != nil {
+ // TODO: we need clear this if it's no longer true
+ ks.importedBy(kp.ks)
+ }
+ // user settings don't apply when checking deps
+ kx.cfg.CheckFuncs = false
+ kx.cfg.CheckImports = false
+ kx.cfg.Tests = false
+ kx.hash = ""
+ kx.ks = ks
+ kx.par = kp
+ kx.setupJs(pp)
+ return &kx
+}
+
+func New(mx *mg.Ctx, cfg *Config) *Importer {
+ bld := goutil.BuildContext(mx)
+ bld.BuildTags = append(bld.BuildTags, "netgo", "osusergo")
+ kp := &Importer{
+ mx: mx,
+ bld: bld,
+ tags: tagsStr(bld.BuildTags),
+ }
+ if cfg != nil {
+ kp.cfg = *cfg
+ kp.hash = srcMapHash(cfg.SrcMap)
+ }
+ return kp
+}
+
+func srcMapHash(m map[string][]byte) string {
+ if len(m) == 0 {
+ return ""
+ }
+ fns := make(sort.StringSlice, len(m))
+ for fn, _ := range m {
+ fns = append(fns, fn)
+ }
+ fns.Sort()
+ b2, _ := blake2b.New256(nil)
+ for _, fn := range fns {
+ b2.Write([]byte(fn))
+ b2.Write(m[fn])
+ }
+ return hex.EncodeToString(b2.Sum(nil))
+}
+
+func tagsStr(l []string) string {
+ switch len(l) {
+ case 0:
+ return ""
+ case 1:
+ return l[0]
+ }
+ s := append(sort.StringSlice{}, l...)
+ s.Sort()
+ return strings.Join(s, " ")
+}
diff --git a/src/margo.sh/kimporter/parse.go b/src/margo.sh/kimporter/parse.go
new file mode 100644
index 00000000..bc6c49fb
--- /dev/null
+++ b/src/margo.sh/kimporter/parse.go
@@ -0,0 +1,90 @@
+package kimporter
+
+import (
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "margo.sh/mg"
+ "path/filepath"
+ "sync"
+)
+
+type kpFile struct {
+ CheckFuncs bool
+ Mx *mg.Ctx
+ Fset *token.FileSet
+ Fn string
+ Src []byte
+ Err error
+ *ast.File
+}
+
+func (kf *kpFile) init() {
+ if len(kf.Src) == 0 {
+ kf.Src, kf.Err = kf.Mx.VFS.ReadBlob(kf.Fn).ReadFile()
+ if kf.Err != nil {
+ return
+ }
+ }
+ // TODO: try to patch up some of the broken files
+ kf.File, kf.Err = parser.ParseFile(kf.Fset, kf.Fn, kf.Src, 0)
+ if kf.File == nil {
+ return
+ }
+ if !kf.CheckFuncs {
+ // trim func bodies to reduce memory
+ for _, d := range kf.Decls {
+ switch d := d.(type) {
+ case *ast.FuncDecl:
+ d.Body = &ast.BlockStmt{}
+ }
+ }
+ }
+}
+
+func parseDir(mx *mg.Ctx, bcx *build.Context, fset *token.FileSet, dir string, srcMap map[string][]byte, ks *state) (*build.Package, []*kpFile, []*ast.File, error) {
+ defer mx.Profile.Push(`Kim-Porter: parseDir(` + dir + `)`).Pop()
+
+ bp, err := bcx.ImportDir(dir, 0)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ wg := sync.WaitGroup{}
+ testFiles := bp.TestGoFiles
+ if !ks.Tests {
+ testFiles = nil
+ }
+ kpFiles := make([]*kpFile, 0, len(bp.GoFiles)+len(bp.CgoFiles)+len(testFiles))
+ for _, l := range [][]string{bp.GoFiles, bp.CgoFiles, testFiles} {
+ for _, nm := range l {
+ fn := filepath.Join(dir, nm)
+ kf := &kpFile{
+ Mx: mx,
+ Fset: fset,
+ Fn: fn,
+ Src: srcMap[fn],
+ CheckFuncs: ks.CheckFuncs,
+ }
+ kpFiles = append(kpFiles, kf)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ kf.init()
+ }()
+ }
+ }
+ wg.Wait()
+
+ astFiles := make([]*ast.File, 0, len(kpFiles))
+ for _, kf := range kpFiles {
+ if kf.File != nil {
+ astFiles = append(astFiles, kf.File)
+ }
+ if err == nil && kf.Err != nil {
+ err = kf.Err
+ }
+ }
+ return bp, kpFiles, astFiles, err
+}
diff --git a/src/margo.sh/main.go b/src/margo.sh/main.go
new file mode 100644
index 00000000..645d231d
--- /dev/null
+++ b/src/margo.sh/main.go
@@ -0,0 +1,9 @@
+package main // import "margo.sh"
+
+import (
+ "margo.sh/cmdpkg/margo"
+)
+
+func main() {
+ margo.Main()
+}
diff --git a/src/margo.sh/memo/memo.go b/src/margo.sh/memo/memo.go
new file mode 100644
index 00000000..43662fa3
--- /dev/null
+++ b/src/margo.sh/memo/memo.go
@@ -0,0 +1,164 @@
+package memo
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+var (
+ invAtState int64
+)
+
+type K = interface{}
+
+type V = interface{}
+
+type Sticky interface {
+ InvalidateMemo(invAt int64)
+}
+
+func InvAt() int64 {
+ return atomic.AddInt64(&invAtState, 1)
+}
+
+type memo struct {
+ k K
+ sync.Mutex
+ v V
+}
+
+func (m *memo) value() V {
+ if m == nil {
+ return nil
+ }
+ m.Lock()
+ defer m.Unlock()
+
+ return m.v
+}
+
+type M struct {
+ mu sync.Mutex
+ ml []*memo
+}
+
+func (m *M) index(k K) (int, *memo) {
+ for i, p := range m.ml {
+ if p.k == k {
+ return i, p
+ }
+ }
+ return -1, nil
+}
+
+func (m *M) memo(k K) *memo {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ _, p := m.index(k)
+ if p == nil {
+ p = &memo{k: k}
+ m.ml = append(m.ml, p)
+ }
+ return p
+}
+
+func (m *M) Peek(k K) V {
+ if m == nil {
+ return nil
+ }
+
+ m.mu.Lock()
+ _, p := m.index(k)
+ m.mu.Unlock()
+
+ return p.value()
+}
+
+func (m *M) Read(k K, new func() V) V {
+ if m == nil {
+ return new()
+ }
+
+ p := m.memo(k)
+ p.Lock()
+ defer p.Unlock()
+
+ if p.v != nil {
+ return p.v
+ }
+ p.v = new()
+ if p.v != nil {
+ return p.v
+ }
+ m.Del(k)
+ return nil
+}
+
+func (m *M) Del(k K) {
+ if m == nil {
+ return
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ i, _ := m.index(k)
+ if i < 0 {
+ return
+ }
+
+ m.ml[i] = m.ml[len(m.ml)-1]
+ m.ml[len(m.ml)-1] = nil
+ m.ml = m.ml[:len(m.ml)-1]
+}
+
+func (m *M) Clear() {
+ if m == nil {
+ return
+ }
+ invAt := InvAt()
+ stkl := m.clear()
+ for _, stk := range stkl {
+ stk.InvalidateMemo(invAt)
+ }
+}
+
+func (m *M) clear() []Sticky {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ ml := m.ml
+ m.ml = nil
+ stkl := []Sticky{}
+ for _, p := range ml {
+ if stk, ok := p.value().(Sticky); ok {
+ m.ml = append(m.ml, p)
+ stkl = append(stkl, stk)
+ }
+ }
+ return stkl
+}
+
+func (m *M) Values() map[K]V {
+ vals := map[K]V{}
+ m.Range(func(k K, v V) {
+ vals[k] = v
+ })
+ return vals
+}
+
+func (m *M) Range(f func(k K, v V)) {
+ if m == nil {
+ return
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ for _, p := range m.ml {
+ if v := p.value(); v != nil {
+ f(p.k, v)
+ }
+ }
+}
diff --git a/src/margo.sh/mg/action.go b/src/margo.sh/mg/action.go
new file mode 100644
index 00000000..8109dbe6
--- /dev/null
+++ b/src/margo.sh/mg/action.go
@@ -0,0 +1,169 @@
+package mg
+
+import (
+ "margo.sh/mg/actions"
+ "reflect"
+)
+
+var (
+ ActionCreators = (&actions.Registry{}).
+ Register("QueryCompletions", QueryCompletions{}).
+ Register("QueryCmdCompletions", QueryCmdCompletions{}).
+ Register("QueryIssues", QueryIssues{}).
+ Register("Restart", Restart{}).
+ Register("Shutdown", Shutdown{}).
+ Register("ViewActivated", ViewActivated{}).
+ Register("ViewFmt", ViewFmt{}).
+ Register("DisplayIssues", DisplayIssues{}).
+ Register("ViewLoaded", ViewLoaded{}).
+ Register("ViewModified", ViewModified{}).
+ Register("ViewPosChanged", ViewPosChanged{}).
+ Register("ViewPreSave", ViewPreSave{}).
+ Register("ViewSaved", ViewSaved{}).
+ Register("QueryUserCmds", QueryUserCmds{}).
+ Register("QueryTestCmds", QueryTestCmds{}).
+ Register("RunCmd", RunCmd{}).
+ Register("QueryTooltips", QueryTooltips{})
+)
+
+// initAction is dispatched to indicate the start of IPC communication.
+// It's the first action that is dispatched.
+type initAction struct{ ActionType }
+
+type ActionType = actions.ActionType
+
+type Action = actions.Action
+
+type DisplayIssues struct{ ActionType }
+
+func (di DisplayIssues) ClientAction() actions.ClientData {
+ return actions.ClientData{Name: "DisplayIssues", Data: di}
+}
+
+type Activate struct {
+ ActionType
+
+ Path string
+ Name string
+ Row int
+ Col int
+}
+
+func (a Activate) ClientAction() actions.ClientData {
+ return actions.ClientData{Name: "Activate", Data: a}
+}
+
+var Render Action = nil
+
+type QueryCompletions struct{ ActionType }
+
+type QueryCmdCompletions struct {
+ ActionType
+
+ Pos int
+ Src string
+ Name string
+ Args []string
+}
+
+type QueryIssues struct{ ActionType }
+
+// Restart is the action dispatched to initiate a graceful restart of the agent
+type Restart struct{ ActionType }
+
+func (r Restart) ClientAction() actions.ClientData {
+ return actions.ClientData{Name: "Restart"}
+}
+
+// Shutdown is the action dispatched to initiate a graceful shutdown of the agent
+type Shutdown struct{ ActionType }
+
+func (s Shutdown) ClientAction() actions.ClientData {
+ return actions.ClientData{Name: "Shutdown"}
+}
+
+type QueryTooltips struct {
+ ActionType
+
+ Row int
+ Col int
+}
+
+type ViewActivated struct{ ActionType }
+
+type ViewModified struct{ ActionType }
+
+type ViewPosChanged struct{ ActionType }
+
+type ViewFmt struct{ ActionType }
+
+type ViewPreSave struct{ ActionType }
+
+type ViewSaved struct{ ActionType }
+
+type ViewLoaded struct{ ActionType }
+
+type unmount struct{ ActionType }
+
+type ctxActs struct {
+ l []Action
+ i int
+}
+
+func (a *ctxActs) Len() int {
+ return len(a.List())
+}
+
+func (a *ctxActs) Index() int {
+ if a.Len() == 0 {
+ return -1
+ }
+ return a.i
+}
+
+func (a *ctxActs) Current() Action {
+ i := a.Index()
+ if i < 0 || i >= a.Len() {
+ return nil
+ }
+ return a.l[i]
+}
+
+func (a *ctxActs) First() bool {
+ return a.Index() == 0
+}
+
+func (a *ctxActs) Last() bool {
+ return a.Index() == a.Len()-1
+}
+
+func (a *ctxActs) List() []Action {
+ if a == nil {
+ return nil
+ }
+ return a.l
+}
+
+func (a *ctxActs) Include(actions ...Action) bool {
+ for _, p := range a.List() {
+ pt := reflect.TypeOf(p)
+ for _, q := range actions {
+ if reflect.TypeOf(q) == pt {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (a *ctxActs) Set(actPtr interface{}) bool {
+ p := reflect.ValueOf(actPtr).Elem()
+ for _, v := range a.List() {
+ q := reflect.ValueOf(v)
+ if p.Type() == q.Type() {
+ p.Set(q)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/margo.sh/mg/actions/action.go b/src/margo.sh/mg/actions/action.go
new file mode 100644
index 00000000..79988d40
--- /dev/null
+++ b/src/margo.sh/mg/actions/action.go
@@ -0,0 +1,49 @@
+package actions
+
+import (
+ "github.com/ugorji/go/codec"
+)
+
+// Action is an object that's dispatched to update an agent's state.
+type Action interface {
+ actionType()
+
+ // ActionLabel returns the name of an action for display in the editor ui.
+ ActionLabel() string
+}
+
+// ActionType is the base implementation of an action.
+type ActionType struct{}
+
+func (ActionType) actionType() {}
+
+// ActionLabel implemented Action.ActionLabel().
+func (ActionType) ActionLabel() string { return "" }
+
+// ActionData is data coming from the client.
+type ActionData struct {
+ // Name is the name of the action
+ Name string
+
+ // Data is the raw encoded data of the action
+ Data codec.Raw
+
+ // Handle is the handle to use for decoding Data
+ Handle codec.Handle
+}
+
+// Decode decodes the encoded data into the action pointer p.
+func (d ActionData) Decode(p interface{}) error {
+ return codec.NewDecoderBytes(d.Data, d.Handle).Decode(p)
+}
+
+// ClientAction is an action that may be sent to the client.
+type ClientAction interface {
+ ClientAction() ClientData
+}
+
+// ClientData is the marshal-able form of a ClientAction.
+type ClientData struct {
+ Name string
+ Data interface{}
+}
diff --git a/src/margo.sh/mg/actions/registry.go b/src/margo.sh/mg/actions/registry.go
new file mode 100644
index 00000000..dc33af13
--- /dev/null
+++ b/src/margo.sh/mg/actions/registry.go
@@ -0,0 +1,62 @@
+package actions
+
+import (
+ "reflect"
+ "sync"
+)
+
+// ActionCreator creates a new action.
+type ActionCreator func(ActionData) (Action, error)
+
+// Registry is a map of known action creators for actions coming from the client
+type Registry struct {
+ mu sync.RWMutex
+ m map[string]ActionCreator
+}
+
+// Lookup returns the action creator named name or nil if doesn't exist.
+func (r *Registry) Lookup(name string) ActionCreator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ return r.m[name]
+}
+
+// Register is equivalent of RegisterCreator(name, MakeActionCreator(zero)).
+func (r *Registry) Register(name string, zero Action) *Registry {
+ return r.RegisterCreator(name, MakeActionCreator(zero))
+}
+
+// RegisterCreator registers the action creator f.
+//
+// NOTE: If a function is already registered with that name, it panics.
+func (r *Registry) RegisterCreator(name string, f ActionCreator) *Registry {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if _, exists := r.m[name]; exists {
+ panic("ActionCreator " + name + " is already registered")
+ }
+
+ if r.m == nil {
+ r.m = map[string]ActionCreator{}
+ }
+
+ r.m[name] = f
+ return r
+}
+
+// MakeActionCreator returns an action creator that decodes the ActionData into a copy of zero.
+func MakeActionCreator(zero Action) ActionCreator {
+ z := reflect.ValueOf(zero)
+ t := z.Type()
+ return func(d ActionData) (_ Action, err error) {
+ p := reflect.New(t)
+ v := p.Elem()
+ v.Set(z)
+ if len(d.Data) != 0 {
+ err = d.Decode(p.Interface())
+ }
+ return v.Interface().(Action), err
+ }
+}
diff --git a/src/margo.sh/mg/agent.go b/src/margo.sh/mg/agent.go
new file mode 100644
index 00000000..6ad1b510
--- /dev/null
+++ b/src/margo.sh/mg/agent.go
@@ -0,0 +1,353 @@
+package mg
+
+import (
+ "bufio"
+ "fmt"
+ "github.com/ugorji/go/codec"
+ "io"
+ "margo.sh/mg/actions"
+ "margo.sh/mgpf"
+ "margo.sh/mgutil"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ // DefaultCodec is the name of the default codec used for IPC communication
+ DefaultCodec = "json"
+
+ // codecHandles is the map of all valid codec handles
+ codecHandles = func() map[string]codec.Handle {
+ m := map[string]codec.Handle{
+ "cbor": &codec.CborHandle{},
+ "json": &codec.JsonHandle{
+ Indent: 2,
+ TermWhitespace: true,
+ },
+ "msgpack": &codec.MsgpackHandle{},
+ }
+ m[""] = m[DefaultCodec]
+ return m
+ }()
+
+ // CodecNames is the list of names of all valid codec handles
+ CodecNames = func() []string {
+ l := make([]string, 0, len(codecHandles))
+ for k, _ := range codecHandles {
+ if k != "" {
+ l = append(l, k)
+ }
+ }
+ sort.Strings(l)
+ return l
+ }()
+
+ // CodecNamesStr is the list of names of all valid codec handles in the form `a, b or c`
+ CodecNamesStr = func() string {
+ i := len(CodecNames) - 1
+ return strings.Join(CodecNames[:i], ", ") + " or " + CodecNames[i]
+
+ }()
+)
+
+type AgentConfig struct {
+ // the name of the agent as used in the command `margo.sh [start...] $AgentName`
+ AgentName string
+
+ // Codec is the name of the codec to use for IPC
+ // Valid values are json, cbor or msgpack
+ // Default: json
+ Codec string
+
+ // Stdin is the stream through which the client sends encoded request data
+ // It's closed when Agent.Run() returns
+ Stdin io.ReadCloser
+
+ // Stdout is the stream through which the server (the Agent type) sends encoded responses
+ // It's closed when Agent.Run() returns
+ Stdout io.WriteCloser
+
+ // Stderr is used for logging
+ // Clients are encouraged to leave it open until the process exits
+ // to allow for logging to keep working during process shutdown
+ Stderr io.Writer
+}
+
+type agentReq struct {
+ Cookie string
+ Actions []actions.ActionData
+ Props clientProps
+ Sent string
+ Profile *mgpf.Profile
+}
+
+func newAgentReq(kvs KVStore) *agentReq {
+ return &agentReq{
+ Props: makeClientProps(kvs),
+ Profile: mgpf.NewProfile(""),
+ }
+}
+
+func (rq *agentReq) finalize(ag *Agent) {
+ rq.Profile.SetName(rq.Cookie)
+ const layout = "2006-01-02T15:04:05.000000"
+ if t, err := time.ParseInLocation(layout, rq.Sent, time.UTC); err == nil {
+ rq.Profile.Sample("ipc|transport", time.Since(t))
+ }
+ rq.Props.finalize(ag)
+ for i, _ := range rq.Actions {
+ rq.Actions[i].Handle = ag.handle
+ }
+}
+
+type agentRes struct {
+ Cookie string
+ Error string
+ State *State
+}
+
+func (rs agentRes) finalize() interface{} {
+ out := struct {
+ _struct struct{} `codec:",omitempty"`
+
+ agentRes
+ State struct {
+ _struct struct{} `codec:",omitempty"`
+ Profile,
+ Editor,
+ Env struct{}
+
+ State
+ Config interface{}
+ ClientActions []actions.ClientData
+ Status []string
+ Issues IssueSet
+ }
+ }{}
+
+ out.agentRes = rs
+ if rs.State == nil {
+ return out
+ }
+
+ out.State.State = *rs.State
+ inSt := &out.State.State
+ outSt := &out.State
+
+ outSt.Issues = outSt.Issues.merge(outSt.View, inSt.Issues...)
+
+ outSt.Status = make([]string, len(inSt.Status))
+ for i, s := range inSt.Status {
+ outSt.Status[i] = StatusPrefix + s
+ }
+
+ outSt.ClientActions = inSt.clientActions
+
+ if out.Error == "" {
+ out.Error = strings.Join([]string(outSt.Errors), "\n")
+ }
+
+ if outSt.View.changed == 0 {
+ outSt.View = nil
+ }
+
+ if ec := inSt.Config; ec != nil {
+ outSt.Config = ec.EditorConfig()
+ }
+
+ return out
+}
+
+type Agent struct {
+ Name string
+ Done <-chan struct{}
+ Log *Logger
+ Store *Store
+
+ mu sync.Mutex
+
+ stdin io.ReadCloser
+ stdout io.WriteCloser
+ stderr io.Writer
+
+ handle codec.Handle
+ enc *codec.Encoder
+ encWr *bufio.Writer
+ dec *codec.Decoder
+ wg sync.WaitGroup
+
+ sd struct {
+ mu sync.Mutex
+ done chan<- struct{}
+ closed bool
+ }
+ closed bool
+}
+
+// Run starts the Agent's event loop. It returns immediately on the first error.
+func (ag *Agent) Run() error {
+ defer ag.shutdown()
+ return ag.communicate()
+}
+
+func (ag *Agent) communicate() error {
+ sto := ag.Store
+ unsub := sto.Subscribe(ag.sub)
+ defer unsub()
+
+ sto.mount()
+
+ for {
+ rq := newAgentReq(sto)
+ if err := ag.dec.Decode(rq); err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return fmt.Errorf("ipc.decode: %s", err)
+ }
+
+ rq.finalize(ag)
+ ag.handleReq(rq)
+ }
+}
+
+func (ag *Agent) handleReq(rq *agentReq) {
+ rq.Profile.Push("queue.wait")
+ ag.wg.Add(1)
+ ag.Store.dsp.hi <- func() {
+ defer ag.wg.Done()
+ rq.Profile.Pop()
+
+ ag.Store.handleReq(rq)
+ }
+}
+
+func (ag *Agent) createAction(d actions.ActionData) (Action, error) {
+ if create := ActionCreators.Lookup(d.Name); create != nil {
+ return create(d)
+ }
+ return nil, fmt.Errorf("Unknown action: %s", d.Name)
+}
+
+func (ag *Agent) sub(mx *Ctx) {
+ err := ag.send(agentRes{
+ State: mx.State,
+ Cookie: mx.Cookie,
+ })
+ if err != nil {
+ ag.Log.Println("agent.send failed. shutting down ipc:", err)
+ go ag.shutdown()
+ }
+}
+
+func (ag *Agent) send(res agentRes) error {
+ ag.mu.Lock()
+ defer ag.mu.Unlock()
+
+ defer ag.encWr.Flush()
+ return ag.enc.Encode(res.finalize())
+}
+
+// shutdown sequence:
+// * stop incoming requests
+// * wait for all reqs to complete
+// * tell reducers to unmount
+// * stop outgoing responses
+// * tell the world we're done
+func (ag *Agent) shutdown() {
+ sd := &ag.sd
+ sd.mu.Lock()
+ defer sd.mu.Unlock()
+
+ if sd.closed {
+ return
+ }
+ sd.closed = true
+
+ // defers because we want *some* guarantee that all these steps will be taken
+ defer close(sd.done)
+ defer ag.stdout.Close()
+ defer ag.Store.unmount()
+ defer ag.wg.Wait()
+ defer ag.stdin.Close()
+}
+
+// NewAgent returns a new Agent, initialised using the settings in cfg.
+// If cfg.Codec is invalid (see CodecNames), `DefaultCodec` will be used as the
+// codec and an error returned.
+// An initialised, usable agent object is always returned.
+//
+// For tests, NewTestingAgent(), NewTestingStore() and NewTestingCtx()
+// are preferred to creating a new agent directly
+func NewAgent(cfg AgentConfig) (*Agent, error) {
+ var err error
+ done := make(chan struct{})
+ ag := &Agent{
+ Name: cfg.AgentName,
+ Done: done,
+ stdin: cfg.Stdin,
+ stdout: cfg.Stdout,
+ stderr: cfg.Stderr,
+ handle: codecHandles[cfg.Codec],
+ }
+ ag.sd.done = done
+ if ag.stdin == nil {
+ ag.stdin = os.Stdin
+ }
+ if ag.stdout == nil {
+ ag.stdout = os.Stdout
+ }
+ if ag.stderr == nil {
+ ag.stderr = os.Stderr
+ }
+ ag.stdin = &mgutil.IOWrapper{
+ Locker: &sync.Mutex{},
+ Reader: ag.stdin,
+ Closer: ag.stdin,
+ }
+ ag.stdout = &mgutil.IOWrapper{
+ Locker: &sync.Mutex{},
+ Writer: ag.stdout,
+ Closer: ag.stdout,
+ }
+ ag.stderr = &mgutil.IOWrapper{
+ Locker: &sync.Mutex{},
+ Writer: ag.stderr,
+ }
+ ag.Log = NewLogger(ag.stderr)
+
+ ag.Store = newStore(ag, ag.sub)
+ dr := DefaultReducers
+ dr.mu.Lock()
+ ag.Store.Before(dr.before...)
+ ag.Store.Use(dr.use...)
+ ag.Store.After(dr.after...)
+ dr.mu.Unlock()
+
+ if e := os.Getenv("MARGO_BUILD_ERROR"); e != "" {
+ ag.Store.Use(NewReducer(func(mx *Ctx) *State {
+ return mx.AddStatus(e)
+ }))
+ }
+
+ if ag.handle == nil {
+ err = fmt.Errorf("Invalid codec '%s'. Expected %s", cfg.Codec, CodecNamesStr)
+ ag.handle = codecHandles[DefaultCodec]
+ }
+ ag.encWr = bufio.NewWriter(ag.stdout)
+ ag.enc = codec.NewEncoder(ag.encWr, ag.handle)
+ ag.dec = codec.NewDecoder(bufio.NewReader(ag.stdin), ag.handle)
+
+ return ag, err
+}
+
+// Args returns a new copy of agent's Args.
+func (ag *Agent) Args() Args {
+ return Args{
+ Store: ag.Store,
+ Log: ag.Log,
+ }
+}
diff --git a/src/margo.sh/mg/agent_test.go b/src/margo.sh/mg/agent_test.go
new file mode 100644
index 00000000..94dd6797
--- /dev/null
+++ b/src/margo.sh/mg/agent_test.go
@@ -0,0 +1,142 @@
+package mg
+
+import (
+ "io"
+ "margo.sh/mgutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+// TestDefaults tries to verify some assumptions that are, or will be, made throughout the code-base
+// the following should hold true regardless of what configuration is exposed in the future
+// * the default codec should be json
+// * logs should go to os.Stderr by default
+// * IPC communication should be done on os.Stdin and os.Stdout by default
+func TestDefaults(t *testing.T) {
+ ag, err := NewAgent(AgentConfig{
+ Codec: "invalidcodec",
+ })
+ if err == nil {
+ t.Error("NewAgent() = (nil); want (error)")
+ }
+ if ag == nil {
+ t.Fatal("ag = (nil); want (*Agent)")
+ }
+ if ag.handle != codecHandles[DefaultCodec] {
+ t.Errorf("ag.handle = (%v), want (%v)", ag.handle, codecHandles[DefaultCodec])
+ }
+
+ ag, err = NewAgent(AgentConfig{})
+ if err != nil {
+ t.Fatalf("agent creation failed: %s", err)
+ }
+
+ var stdin io.Reader = ag.stdin
+ if w, ok := stdin.(*mgutil.IOWrapper); ok {
+ stdin = w.Reader
+ }
+ var stdout io.Writer = ag.stdout
+ if w, ok := stdout.(*mgutil.IOWrapper); ok {
+ stdout = w.Writer
+ }
+ var stderr io.Writer = ag.stderr
+ if w, ok := stderr.(*mgutil.IOWrapper); ok {
+ stderr = w.Writer
+ }
+
+ cases := []struct {
+ name string
+ expect interface{}
+ got interface{}
+ }{
+ {`DefaultCodec == json`, true, DefaultCodec == "json"},
+ {`codecHandles[DefaultCodec] exists`, true, codecHandles[DefaultCodec] != nil},
+ {`codecHandles[""] == codecHandles[DefaultCodec]`, true, codecHandles[""] == codecHandles[DefaultCodec]},
+ {`default Agent.stdin`, os.Stdin, stdin},
+ {`default Agent.stdout`, os.Stdout, stdout},
+ {`default Agent.stderr`, os.Stderr, stderr},
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ if c.expect != c.got {
+ t.Errorf("expected '%v', got '%v'", c.expect, c.got)
+ }
+ })
+ }
+}
+
+func TestFirstAction(t *testing.T) {
+ nrwc := &mgutil.IOWrapper{
+ Reader: strings.NewReader("{}\n"),
+ }
+ ag, err := NewAgent(AgentConfig{
+ Stdin: nrwc,
+ Stdout: nrwc,
+ Stderr: nrwc,
+ })
+ if err != nil {
+ t.Fatalf("agent creation failed: %s", err)
+ }
+
+ actions := make(chan Action, 1)
+ ag.Store.Use(NewReducer(func(mx *Ctx) *State {
+ select {
+ case actions <- mx.Action:
+ default:
+ }
+ return mx.State
+ }))
+}
+
+type readWriteCloseStub struct {
+ mgutil.IOWrapper
+ closed bool
+ CloseFunc func() error
+}
+
+func (r *readWriteCloseStub) Close() error { return r.CloseFunc() }
+
+func TestAgentShutdown(t *testing.T) {
+ nrc := &readWriteCloseStub{}
+ nwc := &readWriteCloseStub{}
+ nerrc := &readWriteCloseStub{}
+ nrc.CloseFunc = func() error {
+ nrc.closed = true
+ return nil
+ }
+ nwc.CloseFunc = func() error {
+ nwc.closed = true
+ return nil
+ }
+ nerrc.CloseFunc = func() error {
+ nerrc.closed = true
+ return nil
+ }
+
+ ag, err := NewAgent(AgentConfig{
+ Stdin: nrc,
+ Stdout: nwc,
+ Stderr: nerrc,
+ Codec: "msgpack",
+ })
+ if err != nil {
+ t.Fatalf("agent creation: err = (%#v); want (nil)", err)
+ }
+ ag.Store = newStore(ag, ag.sub)
+ err = ag.Run()
+ if err != nil {
+ t.Fatalf("ag.Run() = (%#v); want (nil)", err)
+ }
+
+ if !nrc.closed {
+ t.Error("nrc.Close() want not called")
+ }
+ if !nwc.closed {
+ t.Error("nwc.Close() want not called")
+ }
+ if !ag.sd.closed {
+ t.Error("ag.sd.closed = (true); want (false)")
+ }
+}
diff --git a/src/margo.sh/mg/builtins.go b/src/margo.sh/mg/builtins.go
new file mode 100644
index 00000000..6b6c0acd
--- /dev/null
+++ b/src/margo.sh/mg/builtins.go
@@ -0,0 +1,259 @@
+package mg
+
+import (
+ "bytes"
+ "fmt"
+ "margo.sh/mgutil"
+ "os"
+ "sort"
+ "sync"
+)
+
+var (
+ // Builtins is the set of pre-defined builtin commands
+ Builtins = &builtins{}
+)
+
+// BuiltinCmdList is a list of BuiltinCmds
+type BuiltinCmdList []BuiltinCmd
+
+// Lookup looks up the builtin command `name` in the list.
+// If the command is not found, it returns `Builtins.Commands().Lookup(".exec")`.
+// In either case, `found` indicates whether or not `name` was actually found.
+func (bcl BuiltinCmdList) Lookup(name string) (cmd BuiltinCmd, found bool) {
+ for _, c := range bcl {
+ if c.Name == name {
+ return c, true
+ }
+ }
+ for _, c := range Builtins.Commands() {
+ if c.Name == ".exec" {
+ return c, false
+ }
+ }
+ panic("internal error: the `.exec` BuiltinCmd is not defined")
+}
+
+// Filter returns a copy of the list consisting only
+// of commands for which filter returns true
+func (bcl BuiltinCmdList) Filter(filter func(BuiltinCmd) bool) BuiltinCmdList {
+ cmds := BuiltinCmdList{}
+ for _, l := range []BuiltinCmdList{bcl, Builtins.Commands()} {
+ for _, c := range l {
+ if filter(c) {
+ cmds = append(cmds, c)
+ }
+ }
+ }
+ return cmds
+}
+
+// BuiltinCmds implements various builtin commands.
+type builtins struct{ ReducerType }
+
+// ExecCmd implements the `.exec` builtin.
+func (bc builtins) ExecCmd(cx *CmdCtx) *State {
+ go bc.execCmd(cx)
+ return cx.State
+}
+
+func (bc builtins) nopRun(cx *CmdCtx) *State {
+ defer cx.Output.Close()
+ return cx.State
+}
+
+func (bc builtins) execCmd(cx *CmdCtx) {
+ defer cx.Output.Close()
+
+ if cx.Name == ".exec" {
+ if len(cx.Args) == 0 {
+ return
+ }
+ cx = cx.Copy(func(cx *CmdCtx) {
+ cx.Name = cx.Args[0]
+ cx.Args = cx.Args[1:]
+ })
+ }
+
+ cx.RunProc()
+}
+
+// TypeCmd tries to find the cx.Args in commands, and writes the description of
+// the commands into provided buffer. If the Args is empty, it uses all
+// available commands.
+func (bc builtins) TypeCmd(cx *CmdCtx) *State {
+ defer cx.Output.Close()
+
+ cmds := cx.BuiltinCmds
+ names := cx.Args
+ if len(names) == 0 {
+ names = make([]string, len(cmds))
+ for i, c := range cmds {
+ names[i] = c.Name
+ }
+ }
+
+ buf := &bytes.Buffer{}
+ for _, name := range names {
+ c, _ := cmds.Lookup(name)
+ fmt.Fprintf(buf, "%s: builtin: %s, desc: %s\n", name, c.Name, c.Desc)
+ }
+
+ cx.Output.Write(buf.Bytes())
+ return cx.State
+}
+
+// EnvCmd finds all environment variables corresponding to cx.Args into the
+// cx.Output buffer.
+func (bc builtins) EnvCmd(cx *CmdCtx) *State {
+ defer cx.Output.Close()
+
+ buf := &bytes.Buffer{}
+ names := cx.Args
+ if len(names) == 0 {
+ names = make([]string, 0, len(cx.Env))
+ for k, _ := range cx.Env {
+ names = append(names, k)
+ }
+ sort.Strings(names)
+ }
+ for _, k := range names {
+ v := cx.Env.Get(k, os.Getenv(k))
+ fmt.Fprintf(buf, "%s=%s\n", k, v)
+ }
+ cx.Output.Write(buf.Bytes())
+ return cx.State
+}
+
+// Commands returns a list of predefined commands.
+func (bc builtins) Commands() BuiltinCmdList {
+ return []BuiltinCmd{
+ BuiltinCmd{Name: ".env", Desc: "List env vars", Run: bc.EnvCmd},
+ BuiltinCmd{Name: ".exec", Desc: "Run a command through os/exec", Run: bc.ExecCmd},
+ BuiltinCmd{Name: ".type", Desc: "Lists all builtins or which builtin handles a command", Run: bc.TypeCmd},
+
+ // virtual commands implemented by other reducers
+ // these are fallbacks, so no error is reported for the missing command
+ BuiltinCmd{Name: RcActuate, Desc: "Trigger a mouse-like action at the cursor e.g. goto.definition", Run: bc.nopRun},
+ }
+}
+
+// Reduce adds the list of predefined builtins for the RunCmd.
+func (bc builtins) Reduce(mx *Ctx) *State {
+ if _, ok := mx.Action.(RunCmd); ok {
+ return mx.State.AddBuiltinCmds(bc.Commands()...)
+ }
+ return mx.State
+}
+
+// CmdCtx holds details about a command execution
+type CmdCtx struct {
+ // Ctx is the underlying Ctx for the current reduction
+ *Ctx
+
+ // RunCmd is the action that was dispatched
+ RunCmd
+
+ // Output is the `stdout` of the command.
+ // Commands must close it when are done.
+ Output OutputStream
+
+ // Verbose if true prints the command being run (prefixed by "# ")
+ Verbose bool
+}
+
+func (cx *CmdCtx) update(updaters ...func(*CmdCtx)) *CmdCtx {
+ for _, f := range updaters {
+ f(cx)
+ }
+ return cx
+}
+
+// Copy returns a shallow copy of the CmdCtx
+func (cx *CmdCtx) Copy(updaters ...func(*CmdCtx)) *CmdCtx {
+ x := *cx
+ return x.update(updaters...)
+}
+
+// WithCmd returns a copy of cx RunCmd updated with Name name and Args args
+func (cx *CmdCtx) WithCmd(name string, args ...string) *CmdCtx {
+ return cx.Copy(func(cx *CmdCtx) {
+ rc := cx.RunCmd
+ rc.Name = name
+ rc.Args = args
+ cx.RunCmd = rc
+ })
+}
+
+// Run runs the list of builtin commands with name CmtCtx.RunCmd.Name.
+// If no commands exist with that name, it calls Builtins.ExecCmd instead.
+func (cx *CmdCtx) Run() *State {
+ if cx.Verbose {
+ fmt.Fprintln(cx.Output, "#", mgutil.QuoteCmd(cx.Name, cx.Args...))
+ }
+
+ cmds := cx.BuiltinCmds.Filter(func(c BuiltinCmd) bool { return c.Name == cx.Name })
+ switch len(cmds) {
+ case 0:
+ return Builtins.ExecCmd(cx)
+ case 1:
+ return cmds[0].Run(cx)
+ }
+
+ stream := cx.Output
+ defer stream.Close()
+ wg := &sync.WaitGroup{}
+ defer wg.Wait()
+
+ st := cx.State
+ for _, c := range cmds {
+ cx = cx.Copy(func(x *CmdCtx) {
+ x.Ctx = x.Ctx.SetState(st)
+ x.Output = newOutputStreamRef(wg, stream)
+ })
+ st = c.Run(cx)
+ }
+ return st
+}
+
+// RunProc is a convenience function that:
+// * calls StartProc()
+// * waits for the process to complete
+// * and logs any returned error to CmdCtx.Output
+func (cx *CmdCtx) RunProc() {
+ p, err := cx.StartProc()
+ if err == nil {
+ err = p.Wait()
+ }
+ if err != nil {
+ fmt.Fprintf(cx.Output, "`%s` exited: %s\n", p.Title, err)
+ }
+}
+
+// StartProc creates a new Proc and starts the underlying process.
+// It always returns an initialised Proc.
+func (cx *CmdCtx) StartProc() (*Proc, error) {
+ if cx.Verbose {
+ fmt.Fprintln(cx.Output, "#", mgutil.QuoteCmd(cx.Name, cx.Args...))
+ }
+
+ p := newProc(cx)
+ return p, p.start()
+}
+
+// BuiltinCmdRunFunc is the BuiltinCmd.Run function
+//
+// Where possible, implementations should prefer to do real work in a goroutine.
+type BuiltinCmdRunFunc func(*CmdCtx) *State
+
+// BuiltinCmd describes a builtin command
+type BuiltinCmd struct {
+ // Name is the name of the name.
+ Name string
+
+ // Desc is a description of what the command does
+ Desc string
+
+ // Run is called to carry out the operation of the command
+ Run BuiltinCmdRunFunc
+}
diff --git a/src/margo.sh/mg/builtins_test.go b/src/margo.sh/mg/builtins_test.go
new file mode 100644
index 00000000..35129d88
--- /dev/null
+++ b/src/margo.sh/mg/builtins_test.go
@@ -0,0 +1,248 @@
+package mg_test
+
+import (
+ "bytes"
+ "io"
+ "margo.sh/mg"
+ "margo.sh/mgutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestBuiltinCmdList_Lookup(t *testing.T) {
+ t.Parallel()
+ exec := func() mg.BuiltinCmd {
+ r, _ := mg.Builtins.Commands().Lookup(".exec")
+ return r
+ }()
+ item := mg.BuiltinCmd{
+ Name: "this name",
+ Desc: "description",
+ Run: func(*mg.CmdCtx) *mg.State { return nil },
+ }
+ tcs := []struct {
+ name string
+ bcl mg.BuiltinCmdList
+ input string
+ wantCmd mg.BuiltinCmd
+ wantFound bool
+ }{
+ {"empty cmd list", mg.BuiltinCmdList{}, "nothing to find", exec, false},
+ {"not found", mg.BuiltinCmdList{item}, "not found", exec, false},
+ {"found", mg.BuiltinCmdList{item}, item.Name, item, true},
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ gotCmd, gotFound := tc.bcl.Lookup(tc.input)
+ // there is no way to compare functions, therefore we just check the names.
+ if gotCmd.Name != tc.wantCmd.Name {
+ t.Errorf("Lookup(): gotCmd = (%v); want (%v)", gotCmd, tc.wantCmd)
+ }
+ if gotFound != tc.wantFound {
+ t.Errorf("Lookup(): gotFound = (%v); want (%v)", gotFound, tc.wantFound)
+ }
+ })
+ }
+}
+
+// tests when the Args is empty, it should pick up the available BuiltinCmd(s).
+func TestTypeCmdEmptyArgs(t *testing.T) {
+ t.Parallel()
+ item1 := mg.BuiltinCmd{Name: "this name", Desc: "this description"}
+ item2 := mg.BuiltinCmd{Name: "another one", Desc: "should appear too"}
+ buf := new(bytes.Buffer)
+ input := &mg.CmdCtx{
+ Ctx: &mg.Ctx{
+ State: &mg.State{
+ BuiltinCmds: mg.BuiltinCmdList{item1, item2},
+ },
+ },
+ Output: &mgutil.IOWrapper{
+ Writer: buf,
+ },
+ }
+
+ if got := mg.Builtins.TypeCmd(input); got != input.State {
+ t.Errorf("TypeCmd() = %v, want %v", got, input.State)
+ }
+ out := buf.String()
+ for _, item := range []mg.BuiltinCmd{item1, item2} {
+ if !strings.Contains(out, item.Name) {
+ t.Errorf("buf.String() = (%s); want (%s) in it", out, item.Name)
+ }
+ if !strings.Contains(out, item.Desc) {
+ t.Errorf("buf.String() = (%s); want (%s) in it", out, item.Desc)
+ }
+ }
+}
+
+func setupBuiltinCmdCtx(cmds mg.BuiltinCmdList, args []string, envMap mg.EnvMap, buf io.Writer) (*mg.CmdCtx, func()) {
+ ctx := mg.NewTestingCtx(nil)
+ ctx.State = ctx.AddBuiltinCmds(cmds...)
+ ctx.Env = envMap
+ rc := mg.RunCmd{Args: args}
+
+ cmd := &mg.CmdCtx{
+ Ctx: ctx,
+ RunCmd: rc,
+ Output: &mgutil.IOWrapper{
+ Writer: buf,
+ },
+ }
+ return cmd, ctx.Cancel
+}
+
+// tests when command is found, it should choose it.
+func TestTypeCmdLookupCmd(t *testing.T) {
+ t.Parallel()
+ item1 := mg.BuiltinCmd{Name: "this name", Desc: "this description"}
+ item2 := mg.BuiltinCmd{Name: "another one", Desc: "should not appear"}
+ buf := new(bytes.Buffer)
+ input, cleanup := setupBuiltinCmdCtx(mg.BuiltinCmdList{item1, item2}, []string{item2.Name}, nil, buf)
+ defer cleanup()
+
+ if got := mg.Builtins.TypeCmd(input); got != input.State {
+ t.Errorf("TypeCmd() = %v, want %v", got, input.State)
+ }
+ out := buf.String()
+ if strings.Contains(out, item1.Name) {
+ t.Errorf("buf.String() = (%s); didn't expect (%s) in it", out, item1.Name)
+ }
+ if strings.Contains(out, item1.Name) {
+ t.Errorf("buf.String() = (%s); didn't expect (%s) in it", out, item1.Name)
+ }
+ if !strings.Contains(out, item2.Name) {
+ t.Errorf("buf.String() = (%s); want (%s) in it", out, item2.Name)
+ }
+ if !strings.Contains(out, item2.Name) {
+ t.Errorf("buf.String() = (%s); want (%s) in it", out, item2.Name)
+ }
+}
+
+type envPair struct {
+ key string
+ value string
+}
+
+func setupEnvCmd(t *testing.T) ([]envPair, []mg.BuiltinCmd, func()) {
+ envs := []envPair{
+ {"thiskey", "the value"},
+ {"anotherkey", "another value"},
+ }
+ items := make([]mg.BuiltinCmd, len(envs))
+ for i, e := range envs {
+ if err := os.Setenv(e.key, e.value); err != nil {
+ t.Fatalf("cannot set environment values: %v", err)
+ }
+ items[i] = mg.BuiltinCmd{Name: e.key, Desc: "doesn't matter"}
+ }
+ cleanup := func() {
+ for _, e := range envs {
+ os.Unsetenv(e.key)
+ }
+ }
+ return envs, items, cleanup
+}
+
+func TestEnvCmd(t *testing.T) {
+ t.Parallel()
+ envs, items, cleanup := setupEnvCmd(t)
+ defer cleanup()
+
+ tcs := []struct {
+ name string
+ cmds mg.BuiltinCmdList
+ args []string
+ envs mg.EnvMap
+ wantEnvs []envPair
+ }{
+ {"no cmd no env", mg.BuiltinCmdList{}, []string{}, nil, nil},
+ {
+ "no cmd with env",
+ mg.BuiltinCmdList{},
+ []string{},
+ mg.EnvMap{"tAlbt": "gRuVbi", "wILHI": "XOmsUdw"},
+ []envPair{{"tAlbt", "gRuVbi"}, {"wILHI", "XOmsUdw"}},
+ },
+ {
+ "one env pair", mg.BuiltinCmdList{items[0]},
+ []string{items[0].Name},
+ nil,
+ []envPair{envs[0]},
+ },
+ {
+ "multiple env pairs",
+ mg.BuiltinCmdList{items[0], items[1]},
+ []string{items[0].Name, items[1].Name},
+ nil,
+ []envPair{envs[0], envs[1]},
+ },
+ }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ buf := new(bytes.Buffer)
+ input, cleanup := setupBuiltinCmdCtx(tc.cmds, tc.args, tc.envs, buf)
+ defer cleanup()
+ if got := mg.Builtins.EnvCmd(input); got == nil {
+ t.Error("EnvCmd() = (nil); want (*State)")
+ }
+ out := buf.String()
+ for _, e := range tc.wantEnvs {
+ if !strings.Contains(out, e.key) {
+ t.Errorf("buf.String() = (%s); want (%s) in it", out, e.key)
+ }
+ if !strings.Contains(out, e.value) {
+ t.Errorf("buf.String() = (%s); want (%s) in it", out, e.value)
+ }
+ }
+ })
+ }
+}
+
+func TestBuiltinCmdsReduce(t *testing.T) {
+ t.Parallel()
+ isIn := func(cmd mg.BuiltinCmd, haystack mg.BuiltinCmdList) bool {
+ for _, h := range haystack {
+ if h.Name == cmd.Name && h.Desc == cmd.Desc {
+ return true
+ }
+ }
+ return false
+ }
+
+ item := mg.BuiltinCmd{Name: "qNgEYow", Desc: "YKjYxqMnt"}
+ ctx := &mg.Ctx{
+ State: &mg.State{
+ BuiltinCmds: mg.BuiltinCmdList{item},
+ },
+ }
+
+ bc := mg.Builtins
+ state := bc.Reduce(ctx)
+ if state == nil {
+ t.Fatal("bc.Reduce() = nil, want *State")
+ }
+ for _, cmd := range bc.Commands() {
+ if isIn(cmd, state.BuiltinCmds) {
+ t.Errorf("didn't want %v in %v", cmd, bc.Commands())
+ }
+ }
+ if !isIn(item, state.BuiltinCmds) {
+ t.Errorf("want %v in %v", item, bc.Commands())
+ }
+
+ ctx.Action = mg.RunCmd{}
+ state = bc.Reduce(ctx)
+ if state == nil {
+ t.Fatal("bc.Reduce() = nil, want *State")
+ }
+ for _, cmd := range bc.Commands() {
+ if !isIn(cmd, state.BuiltinCmds) {
+ t.Errorf("want %v in %v", cmd, bc.Commands())
+ }
+ }
+ if !isIn(item, state.BuiltinCmds) {
+ t.Errorf("want %v in %v", item, bc.Commands())
+ }
+}
diff --git a/src/margo.sh/mg/client-actions.go b/src/margo.sh/mg/client-actions.go
new file mode 100644
index 00000000..a14dbe5e
--- /dev/null
+++ b/src/margo.sh/mg/client-actions.go
@@ -0,0 +1,27 @@
+package mg
+
+import (
+ "margo.sh/mg/actions"
+)
+
+var (
+ _ actions.ClientAction = CmdOutput{}
+ _ actions.ClientAction = Activate{}
+ _ actions.ClientAction = Restart{}
+ _ actions.ClientAction = Shutdown{}
+)
+
+type clientActionSupport struct{ ReducerType }
+
+func (cas *clientActionSupport) Reduce(mx *Ctx) *State {
+ if act, ok := mx.Action.(actions.ClientAction); ok {
+ switch act := act.(type) {
+ case Activate:
+ mx.Log.Printf("client action Activate(%s:%d:%d) dispatched\n", act.Path, act.Row, act.Col)
+ case Restart, Shutdown:
+ mx.Log.Printf("client action %s dispatched\n", act.ClientAction().Name)
+ }
+ return mx.addClientActions(act)
+ }
+ return mx.State
+}
diff --git a/src/margo.sh/mg/cmd.go b/src/margo.sh/mg/cmd.go
new file mode 100644
index 00000000..9555c1d3
--- /dev/null
+++ b/src/margo.sh/mg/cmd.go
@@ -0,0 +1,441 @@
+package mg
+
+import (
+ "bytes"
+ "flag"
+ "io"
+ "margo.sh/mg/actions"
+ "margo.sh/mgutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+)
+
+var (
+ // OutputStreamFlushInterval specifies how often to flush command output.
+ OutputStreamFlushInterval = 500 * time.Millisecond
+
+ _ OutputStream = (*CmdOut)(nil)
+ _ OutputStream = (*IssueOut)(nil)
+ _ OutputStream = (OutputStreams)(nil)
+ _ OutputStream = (*mgutil.IOWrapper)(nil)
+)
+
+type ErrorList []error
+
+func (el ErrorList) First() error {
+ for _, e := range el {
+ if e != nil {
+ return e
+ }
+ }
+ return nil
+}
+
+func (el ErrorList) Filter() ErrorList {
+ if len(el) == 0 {
+ return nil
+ }
+ res := make(ErrorList, 0, len(el))
+ for _, e := range el {
+ if e != nil {
+ res = append(res, e)
+ }
+ }
+ return res
+}
+
+func (el ErrorList) Error() string {
+ buf := &bytes.Buffer{}
+ for _, e := range el {
+ if e == nil {
+ continue
+ }
+ if buf.Len() != 0 {
+ buf.WriteByte('\n')
+ }
+ buf.WriteString(e.Error())
+ }
+ return buf.String()
+}
+
+// OutputStream describes an object that's capable of dispatching command output.
+//
+// An OutputSream is safe for concurrent use.
+//
+// The main implementation is CmdOut.
+type OutputStream = mgutil.OutputStream
+
+// OutputStreams delegates to a list of OutputStreams.
+//
+// For each method (Write, Close, Flush):
+//
+// If none of the underlying methods return an error, a nil error is returned.
+//
+// Otherwise an ErrorList length == len(OutputStreams) is returned.
+// For each entry OutputStreams[i], ErrorList[i] contains the error
+// returned for the method called on that OutputStream.
+type OutputStreams []OutputStream
+
+// Write calls Write() on all each OutputStream
+func (sl OutputStreams) Write(p []byte) (int, error) {
+ return len(p), sl.forEach(func(s OutputStream) error {
+ n, err := s.Write(p)
+ if err == nil && n != len(p) {
+ return io.ErrShortWrite
+ }
+ return err
+ })
+}
+
+// Close calls Close() on all each OutputStream
+func (sl OutputStreams) Close() error {
+ return sl.forEach(func(s OutputStream) error { return s.Close() })
+}
+
+// Flush calls Flush() on all each OutputStream
+func (sl OutputStreams) Flush() error {
+ return sl.forEach(func(s OutputStream) error { return s.Flush() })
+}
+
+// forEach calls f on each entry in the list
+// it takes care of implementing the documented error returned by OutputStreams' methods
+func (sl OutputStreams) forEach(f func(OutputStream) error) error {
+ var el ErrorList
+ for i, s := range sl {
+ err := f(s)
+ if err == nil {
+ continue
+ }
+ if len(el) == 0 {
+ el = make(ErrorList, len(sl))
+ }
+ el[i] = err
+ }
+ if len(el) == 0 {
+ return nil
+ }
+ return el
+}
+
+type CmdOut struct {
+ Fd string
+ Dispatch Dispatcher
+
+ mu sync.Mutex
+ buf []byte
+ closed bool
+}
+
+func (w *CmdOut) Write(p []byte) (int, error) {
+ return w.write(false, p)
+}
+
+func (w *CmdOut) write(writeIfClosed bool, p []byte) (int, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.closed && !writeIfClosed {
+ return 0, os.ErrClosed
+ }
+
+ w.buf = append(w.buf, p...)
+ return len(p), nil
+}
+
+// Close closes the writer.
+// It returns os.ErrClosed if Close has already been called.
+func (w *CmdOut) Close() error {
+ defer w.Flush()
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.closed {
+ return os.ErrClosed
+ }
+
+ w.closed = true
+ return nil
+}
+
+// Flush implements OutputStream.Flush
+//
+// If w.Dispatch is set, it's used to dispatch Output{} actions.
+// It never returns an error.
+func (w *CmdOut) Flush() error {
+ if w.Dispatch == nil || w.Fd == "" {
+ return nil
+ }
+
+ out := w.Output()
+ if len(out.Output) != 0 || out.Close {
+ w.Dispatch(out)
+ }
+
+ return nil
+}
+
+// Output returns the data buffered from previous calls to w.Write() and clears
+// the buffer.
+func (w *CmdOut) Output() CmdOutput {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ out := CmdOutput{Fd: w.Fd, Output: w.buf, Close: w.closed}
+ w.buf = nil
+ return out
+}
+
+type CmdOutput struct {
+ ActionType
+
+ Fd string
+ Output []byte
+ Close bool
+}
+
+func (out CmdOutput) ClientAction() actions.ClientData {
+ return actions.ClientData{Name: "CmdOutput", Data: out}
+}
+
+type cmdSupport struct{ ReducerType }
+
+func (cs *cmdSupport) Reduce(mx *Ctx) *State {
+ switch act := mx.Action.(type) {
+ case RunCmd:
+ return runCmd(mx, act)
+ }
+ return mx.State
+}
+
+func runCmd(mx *Ctx, rc RunCmd) *State {
+ rc = rc.Interpolate(mx)
+ cx := &CmdCtx{
+ Ctx: mx,
+ RunCmd: rc,
+ Output: &CmdOut{Fd: rc.Fd, Dispatch: mx.Store.Dispatch},
+ }
+ defer mx.Profile.Push(cx.Name).Pop()
+ return cx.Run()
+}
+
+type outputStreamRef struct {
+ wg *sync.WaitGroup
+ once sync.Once
+ OutputStream
+}
+
+func newOutputStreamRef(wg *sync.WaitGroup, w OutputStream) OutputStream {
+ wg.Add(1)
+ return &outputStreamRef{
+ wg: wg,
+ OutputStream: w,
+ }
+}
+
+func (osr *outputStreamRef) Close() error {
+ osr.once.Do(osr.wg.Done)
+ return nil
+}
+
+type runCmdData struct {
+ *Ctx
+ RunCmd
+}
+
+type RunCmdFlagSet struct {
+ RunCmd RunCmd
+ *flag.FlagSet
+}
+
+func (fs RunCmdFlagSet) Parse() error {
+ return fs.FlagSet.Parse(fs.RunCmd.Args)
+}
+
+type RunDmc = RunCmd
+type RunCmd struct {
+ ActionType
+
+ Fd string
+ Input bool
+ Name string
+ Dir string
+ Args []string
+ CancelID string
+ Prompts []string
+}
+
+func (rc RunCmd) Flags() RunCmdFlagSet {
+ return RunCmdFlagSet{
+ RunCmd: rc,
+ FlagSet: flag.NewFlagSet(rc.Name, flag.ContinueOnError),
+ }
+}
+
+func (rc RunCmd) StringFlag(name, value string) string {
+ fs := rc.Flags()
+ v := fs.String(name, value, "")
+ fs.Parse()
+ return *v
+}
+
+func (rc RunCmd) BoolFlag(name string, value bool) bool {
+ fs := rc.Flags()
+ v := fs.Bool(name, value, "")
+ fs.Parse()
+ return *v
+}
+
+func (rc RunCmd) IntFlag(name string, value int) int {
+ fs := rc.Flags()
+ v := fs.Int(name, value, "")
+ fs.Parse()
+ return *v
+}
+
+func (rc RunCmd) Interpolate(mx *Ctx) RunCmd {
+ data := runCmdData{
+ Ctx: mx,
+ RunCmd: rc,
+ }
+ tpl := template.New("")
+ buf := &bytes.Buffer{}
+ rc.Name = rc.interp(data, tpl, buf, rc.Name)
+ for i, s := range rc.Args {
+ rc.Args[i] = rc.interp(data, tpl, buf, s)
+ }
+ return rc
+}
+
+func (rc RunCmd) interp(data runCmdData, tpl *template.Template, buf *bytes.Buffer, s string) string {
+ if strings.Contains(s, "{{") && strings.Contains(s, "}}") {
+ if tpl, err := tpl.Parse(s); err == nil {
+ buf.Reset()
+ if err := tpl.Execute(buf, data); err == nil {
+ s = buf.String()
+ }
+ }
+ }
+ return os.Expand(s, func(k string) string {
+ if v, ok := data.Env[k]; ok {
+ return v
+ }
+ return "${" + k + "}"
+ })
+}
+
+// Wd returns rc.Dir if set or v.Dir()
+func (rc RunCmd) Wd(v *View) string {
+ if rc.Dir != "" {
+ return rc.Dir
+ }
+ return v.Dir()
+}
+
+type Proc struct {
+ Title string
+
+ cx *CmdCtx
+ mu sync.RWMutex
+ done chan struct{}
+ closed bool
+ cmd *exec.Cmd
+ task *TaskTicket
+ cid string
+}
+
+func newProc(cx *CmdCtx) *Proc {
+ cmd := exec.Command(cx.Name, cx.Args...)
+ if cx.Input {
+ s, _ := cx.View.ReadAll()
+ cmd.Stdin = bytes.NewReader(s)
+ }
+ cmd.Dir = cx.Wd(cx.View)
+ cmd.Env = cx.Env.Environ()
+ cmd.Stdout = cx.Output
+ cmd.Stderr = cx.Output
+ cmd.SysProcAttr = pgSysProcAttr
+
+ name := filepath.Base(cx.Name)
+ args := make([]string, len(cx.Args))
+ for i, s := range cx.Args {
+ if filepath.IsAbs(s) {
+ s = filepath.Base(s)
+ }
+ args[i] = s
+ }
+
+ return &Proc{
+ Title: "`" + mgutil.QuoteCmd(name, args...) + "`",
+ done: make(chan struct{}),
+ cx: cx,
+ cmd: cmd,
+ cid: cx.CancelID,
+ }
+}
+
+func (p *Proc) Cancel() {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ select {
+ case <-p.done:
+ default:
+ pgKill(p.cmd.Process)
+ }
+}
+
+func (p *Proc) start() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ p.task = p.cx.Begin(Task{
+ CancelID: p.cid,
+ Title: p.Title,
+ Cancel: p.Cancel,
+ })
+ go p.dispatcher()
+
+ if err := p.cmd.Start(); err != nil {
+ p.close()
+ return err
+ }
+ return nil
+}
+
+func (p *Proc) dispatcher() {
+ defer p.task.Done()
+
+ for {
+ select {
+ case <-p.done:
+ return
+ case <-time.After(OutputStreamFlushInterval):
+ p.cx.Output.Flush()
+ }
+ }
+}
+
+func (p *Proc) close() {
+ if p.closed {
+ return
+ }
+ p.closed = true
+ close(p.done)
+}
+
+func (p *Proc) Wait() error {
+ defer func() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ p.close()
+ }()
+
+ return p.cmd.Wait()
+}
diff --git a/src/margo.sh/mg/cmd_internal_test.go b/src/margo.sh/mg/cmd_internal_test.go
new file mode 100644
index 00000000..dad4d331
--- /dev/null
+++ b/src/margo.sh/mg/cmd_internal_test.go
@@ -0,0 +1,69 @@
+package mg
+
+import (
+ "testing"
+)
+
+func TestCmdSupport_Reduce_noCalls(t *testing.T) {
+ type unknown struct{ ActionType }
+ cs := &cmdSupport{}
+ ctx := NewTestingCtx(nil)
+ defer ctx.Cancel()
+
+ if state := cs.Reduce(ctx); state != ctx.State {
+ t.Errorf("cmdSupport.Reduce() = %v, want %v", state, ctx.State)
+ }
+
+ ctx.Action = new(unknown)
+ if state := cs.Reduce(ctx); state != ctx.State {
+ t.Errorf("cmdSupport.Reduce() = %v, want %v", state, ctx.State)
+ }
+}
+
+func TestCmdSupport_Reduce_withRunCmd(t *testing.T) {
+ var called bool
+ cs := &cmdSupport{}
+ ctx := NewTestingCtx(RunCmd{
+ Fd: "rHX23",
+ Name: ".mytest",
+ })
+ defer ctx.Cancel()
+
+ ctx.State = ctx.AddBuiltinCmds(BuiltinCmd{
+ Name: ".mytest",
+ Run: func(cx *CmdCtx) *State {
+ called = true
+ return cx.State
+ },
+ })
+
+ if state := cs.Reduce(ctx); state != ctx.State {
+ t.Errorf("cmdSupport.Reduce() = %v, want %v", state, ctx.State)
+ }
+ if !called {
+ t.Errorf("cs.Reduce(%v): cs.runCmd() wasn't called", ctx)
+ }
+}
+
+func TestCmdSupport_Reduce_withCmdOutput(t *testing.T) {
+ var called bool
+ fd := "CIlZ7zBWHIAL"
+ cs := &clientActionSupport{}
+ ctx := NewTestingCtx(nil)
+ defer ctx.Cancel()
+
+ ctx.Action = CmdOutput{
+ Fd: fd,
+ }
+
+ state := cs.Reduce(ctx)
+ for _, c := range state.clientActions {
+ if d, ok := c.Data.(CmdOutput); ok && d.Fd == fd {
+ called = true
+ break
+ }
+ }
+ if !called {
+ t.Errorf("cs.Reduce(%v): cs.cmdOutput() wasn't called", ctx)
+ }
+}
diff --git a/src/margo.sh/mg/cmd_nix.go b/src/margo.sh/mg/cmd_nix.go
new file mode 100644
index 00000000..fe69a7d7
--- /dev/null
+++ b/src/margo.sh/mg/cmd_nix.go
@@ -0,0 +1,20 @@
+// +build !windows
+
+package mg
+
+import (
+ "os"
+ "syscall"
+)
+
+var (
+ pgSysProcAttr = &syscall.SysProcAttr{
+ Setpgid: true,
+ }
+)
+
+func pgKill(p *os.Process) {
+ if p != nil {
+ syscall.Kill(-p.Pid, syscall.SIGINT)
+ }
+}
diff --git a/src/margo.sh/mg/cmd_win.go b/src/margo.sh/mg/cmd_win.go
new file mode 100644
index 00000000..62ba6c8e
--- /dev/null
+++ b/src/margo.sh/mg/cmd_win.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package mg
+
+import (
+ "os"
+ "syscall"
+)
+
+var (
+ pgSysProcAttr *syscall.SysProcAttr
+)
+
+func pgKill(p *os.Process) {
+ if p != nil {
+ p.Kill()
+ }
+}
diff --git a/src/margo.sh/mg/common.go b/src/margo.sh/mg/common.go
new file mode 100644
index 00000000..a64c53f9
--- /dev/null
+++ b/src/margo.sh/mg/common.go
@@ -0,0 +1,19 @@
+package mg
+
+import (
+ "margo.sh/mgutil"
+)
+
+type StrSet = mgutil.StrSet
+
+type EnvMap = mgutil.EnvMap
+
+// PathList is an alias of mgutil.PathList
+func PathList(s string) []string {
+ return mgutil.PathList(s)
+}
+
+// IsParentDir is an alias of mgutil.IsParentDir
+func IsParentDir(parentDir, childPath string) bool {
+ return mgutil.IsParentDir(parentDir, childPath)
+}
diff --git a/src/disposa.blue/margo/mg/completion.go b/src/margo.sh/mg/completion.go
similarity index 100%
rename from src/disposa.blue/margo/mg/completion.go
rename to src/margo.sh/mg/completion.go
diff --git a/src/margo.sh/mg/ctx.go b/src/margo.sh/mg/ctx.go
new file mode 100644
index 00000000..1f131339
--- /dev/null
+++ b/src/margo.sh/mg/ctx.go
@@ -0,0 +1,226 @@
+package mg
+
+import (
+ "context"
+ "github.com/ugorji/go/codec"
+ "margo.sh/mgpf"
+ "margo.sh/vfs"
+ "reflect"
+ "regexp"
+ "sync"
+ "time"
+)
+
+var (
+ // StatusPrefix is the prefix used for all status elements.
+ StatusPrefix = "‣ "
+
+ _ context.Context = (*Ctx)(nil)
+)
+
+// Ctx holds data about the current request/reduction.
+//
+// To create a new instance, use Store.NewCtx()
+//
+// NOTE: Ctx should be treated as readonly and users should not assign to any
+// of its fields or the fields of any of its members.
+// If a field must be updated, you should use one of the methods like Copy
+//
+// Unless a field is tagged with `mg.Nillable:"true"`, it will never be nil
+// and if updated, no field should be set to nil
+type Ctx struct {
+ // State is the current state of the world
+ *State
+
+ // Action is the action that was dispatched.
+ // It's a hint telling reducers about some action that happened,
+ // e.g. that the view is about to be saved or that it was changed.
+ Action Action `mg.Nillable:"true"`
+
+ Acts *ctxActs
+
+ // KVMap is an in-memory cache of data with for the lifetime of the Ctx.
+ *KVMap
+
+ // Store is the global store
+ Store *Store
+
+ // Log is the global logger
+ Log *Logger
+
+ Cookie string
+
+ Profile *mgpf.Profile
+
+ VFS *vfs.FS
+
+ doneC chan struct{}
+ cancelOnce *sync.Once
+ handle codec.Handle
+ defr *redFns
+}
+
+// newCtx creates a new Ctx
+// if st is nil, the state will be set to the equivalent of Store.state.new()
+// if p is nil a new Profile will be created with cookie as its name
+func newCtx(sto *Store, st *State, acts *ctxActs, cookie string, p *mgpf.Profile, kv *KVMap) *Ctx {
+ if st == nil {
+ st = sto.state.new()
+ }
+ if st.Config == nil {
+ st = st.SetConfig(sto.cfg)
+ }
+ if p == nil {
+ p = mgpf.NewProfile(cookie)
+ }
+ if kv == nil {
+ kv = &KVMap{}
+ }
+ return &Ctx{
+ State: st,
+ Action: acts.Current(),
+ Acts: acts,
+ KVMap: kv,
+ Store: sto,
+ Log: sto.ag.Log,
+ Cookie: cookie,
+ Profile: p,
+ VFS: VFS,
+ doneC: make(chan struct{}),
+ cancelOnce: &sync.Once{},
+ handle: sto.ag.handle,
+ defr: &redFns{},
+ }
+}
+
+// Deadline implements context.Context.Deadline
+func (*Ctx) Deadline() (time.Time, bool) {
+ return time.Time{}, false
+}
+
+// Cancel cancels the ctx by arranging for the Ctx.Done() channel to be closed.
+// Canceling this Ctx cancels all other Ctxs Copy()ed from it.
+func (mx *Ctx) Cancel() {
+ mx.cancelOnce.Do(func() {
+ close(mx.doneC)
+ })
+}
+
+// Done implements context.Context.Done()
+func (mx *Ctx) Done() <-chan struct{} {
+ return mx.doneC
+}
+
+// Err implements context.Context.Err()
+func (mx *Ctx) Err() error {
+ select {
+ case <-mx.Done():
+ return context.Canceled
+ default:
+ return nil
+ }
+}
+
+// Value implements context.Context.Value() but always returns nil
+func (mx *Ctx) Value(k interface{}) interface{} {
+ return nil
+}
+
+// AgentName returns the name of the agent if set
+// if set, it's usually the agent name as used in the command `margo.sh [run...] $agent`
+func (mx *Ctx) AgentName() string {
+ return mx.Store.ag.Name
+}
+
+// ActionIs returns true if Ctx.Action is the same type as any of those in actions
+// for convenience, it returns true if actions is nil
+func (mx *Ctx) ActionIs(actions ...Action) bool {
+ if actions == nil {
+ return true
+ }
+ typ := reflect.TypeOf(mx.Action)
+ for _, act := range actions {
+ if reflect.TypeOf(act) == typ {
+ return true
+ }
+ }
+ return false
+}
+
+// LangIs is equivalent to View.LangIs(langs...)
+func (mx *Ctx) LangIs(langs ...Lang) bool {
+ return mx.View.LangIs(langs...)
+}
+
+// CommonPatterns is equivalent to View.CommonPatterns()
+func (mx *Ctx) CommonPatterns() []*regexp.Regexp {
+ return mx.View.CommonPatterns()
+}
+
+// Copy create a shallow copy of the Ctx.
+//
+// It applies the functions in updaters to the new object.
+// Updating the new Ctx via these functions is preferred to assigning to the new object
+func (mx *Ctx) Copy(updaters ...func(*Ctx)) *Ctx {
+ x := *mx
+ mx = &x
+
+ for _, f := range updaters {
+ f(mx)
+ }
+ return mx
+}
+
+func (mx *Ctx) SetState(st *State) *Ctx {
+ if mx.State == st {
+ return mx
+ }
+ mx = mx.Copy()
+ mx.State = st
+ return mx
+}
+
+func (mx *Ctx) SetView(v *View) *Ctx {
+ return mx.SetState(mx.State.SetView(v))
+}
+
+// Begin is a short-hand for Ctx.Store.Begin
+func (mx *Ctx) Begin(t Task) *TaskTicket {
+ return mx.Store.Begin(t)
+}
+
+func (mx *Ctx) Defer(f ReduceFn) *State {
+ mx.defr.prepend(f)
+ return mx.State
+}
+
+type redFns struct {
+ sync.RWMutex
+ l []ReduceFn
+}
+
+func (r *redFns) prepend(f ReduceFn) {
+ r.Lock()
+ defer r.Unlock()
+
+ r.l = append([]ReduceFn{f}, r.l...)
+}
+
+func (r *redFns) append(f ReduceFn) {
+ r.Lock()
+ defer r.Unlock()
+
+ r.l = append(r.l[:len(r.l):len(r.l)], f)
+}
+
+func (r *redFns) reduction(mx *Ctx) *Ctx {
+ r.RLock()
+ l := r.l
+ r.l = nil
+ r.RUnlock()
+
+ for _, reduce := range l {
+ mx = mx.SetState(reduce(mx))
+ }
+ return mx
+}
diff --git a/src/margo.sh/mg/db.go b/src/margo.sh/mg/db.go
new file mode 100644
index 00000000..65a03767
--- /dev/null
+++ b/src/margo.sh/mg/db.go
@@ -0,0 +1,143 @@
+package mg
+
+import (
+ "sync"
+)
+
+var (
+ _ KVStore = (KVStores)(nil)
+ _ KVStore = (*Store)(nil)
+ _ KVStore = (*KVMap)(nil)
+)
+
+// KVStore represents a generic key value store.
+//
+// All operations are safe for concurrent access.
+//
+// The main implementation in this and related packages is Store.
+type KVStore interface {
+ // Put stores the value in the store with identifier key
+ // NOTE: use Del instead of storing nil
+ Put(key, value interface{})
+
+ // Get returns the value stored using identifier key
+ // NOTE: if the value doesn't exist, nil is returned
+ Get(key interface{}) interface{}
+
+ // Del removes the value identified by key from the store
+ Del(key interface{})
+}
+
+// KVStores implements a KVStore that duplicates its operations on a list of k/v stores
+//
+// NOTE: All operations are no-ops for nil KVStores
+type KVStores []KVStore
+
+// Put calls .Put on each of k/v stores in the list
+func (kvl KVStores) Put(k, v interface{}) {
+ for _, kvs := range kvl {
+ if kvs == nil {
+ continue
+ }
+ kvs.Put(k, v)
+ }
+}
+
+// Get returns the first value identified by k found in the list of k/v stores
+func (kvl KVStores) Get(k interface{}) interface{} {
+ for _, kvs := range kvl {
+ if kvs == nil {
+ continue
+ }
+ if v := kvs.Get(k); v != nil {
+ return v
+ }
+ }
+ return nil
+}
+
+// Del removed the value identified by k from all k/v stores in the list
+func (kvl KVStores) Del(k interface{}) {
+ for _, kvs := range kvl {
+ if kvs == nil {
+ continue
+ }
+ kvs.Del(k)
+ }
+}
+
+// KVMap implements a KVStore using a map.
+// The zero-value is safe for use with all operations.
+//
+// NOTE: All operations are no-ops on a nil KVMap
+type KVMap struct {
+ vals map[interface{}]interface{}
+ mu sync.Mutex
+}
+
+// Put implements KVStore.Put
+func (m *KVMap) Put(k interface{}, v interface{}) {
+ if m == nil {
+ return
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if m.vals == nil {
+ m.vals = map[interface{}]interface{}{}
+ }
+ m.vals[k] = v
+}
+
+// Get implements KVStore.Get
+func (m *KVMap) Get(k interface{}) interface{} {
+ if m == nil {
+ return nil
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ return m.vals[k]
+}
+
+// Del implements KVStore.Del
+func (m *KVMap) Del(k interface{}) {
+ if m == nil {
+ return
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ delete(m.vals, k)
+}
+
+// Clear removes all values from the store
+func (m *KVMap) Clear() {
+ if m == nil {
+ return
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ m.vals = nil
+}
+
+// Values returns a copy of all values stored
+func (m *KVMap) Values() map[interface{}]interface{} {
+ if m == nil {
+ return nil
+ }
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ vals := make(map[interface{}]interface{}, len(m.vals))
+ for k, v := range m.vals {
+ vals[k] = v
+ }
+ return vals
+}
diff --git a/src/margo.sh/mg/doc.go b/src/margo.sh/mg/doc.go
new file mode 100644
index 00000000..4c896965
--- /dev/null
+++ b/src/margo.sh/mg/doc.go
@@ -0,0 +1 @@
+package mg // import "margo.sh/mg"
diff --git a/src/disposa.blue/margo/mg/extension.go b/src/margo.sh/mg/extension.go
similarity index 67%
rename from src/disposa.blue/margo/mg/extension.go
rename to src/margo.sh/mg/extension.go
index 90f601c8..daf1d54c 100644
--- a/src/disposa.blue/margo/mg/extension.go
+++ b/src/margo.sh/mg/extension.go
@@ -1,8 +1,8 @@
package mg
type Args struct {
- Store *Store
- Log *Logger
+ *Store
+ Log *Logger
}
type MargoFunc func(Args)
diff --git a/src/margo.sh/mg/hud.go b/src/margo.sh/mg/hud.go
new file mode 100644
index 00000000..ecd8c22c
--- /dev/null
+++ b/src/margo.sh/mg/hud.go
@@ -0,0 +1,18 @@
+package mg
+
+import (
+ "bytes"
+ "margo.sh/htm"
+)
+
+type HUDState struct {
+ Articles []string
+}
+
+func (h HUDState) AddArticle(heading htm.IElement, content ...htm.Element) HUDState {
+ buf := &bytes.Buffer{}
+ htm.Article(heading, content...).FPrintHTML(buf)
+ l := h.Articles
+ h.Articles = append(l[:len(l):len(l)], buf.String())
+ return h
+}
diff --git a/src/margo.sh/mg/issue.go b/src/margo.sh/mg/issue.go
new file mode 100644
index 00000000..7fe5f98d
--- /dev/null
+++ b/src/margo.sh/mg/issue.go
@@ -0,0 +1,526 @@
+package mg
+
+import (
+ "bytes"
+ "fmt"
+ "margo.sh/htm"
+ "margo.sh/mgutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+var (
+ commonPatterns = struct {
+ sync.RWMutex
+ m map[Lang][]*regexp.Regexp
+ }{
+ m: map[Lang][]*regexp.Regexp{
+ "": {
+ regexp.MustCompile(`^\s*(?P.+?\.\w+):(?P\d+:)(?P\d+:?)?(?P.+)$`),
+ regexp.MustCompile(`^\s*(?P.+?\.\w+)\((?P\d+)(?:,(?P\d+))?\):(?P.+)$`),
+ },
+ },
+ }
+)
+
+type commonPattern struct {
+ Lang Lang
+ Patterns []*regexp.Regexp
+}
+
+func AddCommonPatterns(lang Lang, l ...*regexp.Regexp) {
+ p := &commonPatterns
+ p.Lock()
+ defer p.Unlock()
+
+ for _, k := range []Lang{"", lang} {
+ p.m[k] = append(p.m[k], l...)
+ }
+}
+
+func CommonPatterns(langs ...Lang) []*regexp.Regexp {
+ p := &commonPatterns
+ p.RLock()
+ defer p.RUnlock()
+
+ l := p.m[""]
+ l = l[:len(l):len(l)]
+ for _, lang := range langs {
+ l = append(l, p.m[lang]...)
+ }
+ return l
+}
+
+type IssueTag string
+
+const (
+ Error = IssueTag("error")
+ Warning = IssueTag("warning")
+ Notice = IssueTag("notice")
+)
+
+type issueHash struct {
+ loc string
+ row int
+ msg string
+}
+
+type Issue struct {
+ Path string
+ Name string
+ Row int
+ Col int
+ End int
+ Tag IssueTag
+ Label string
+ Message string
+}
+
+func (isu Issue) Error() string {
+ msg := isu.Message
+ pfx := ""
+ if isu.Tag != "" {
+ pfx = "[" + string(isu.Tag) + "]" + pfx
+ }
+ if isu.Label != "" {
+ pfx = "[" + isu.Label + "]" + pfx
+ }
+ if pfx != "" {
+ pfx = pfx + ": "
+ }
+ fn := isu.Path
+ if fn == "" {
+ fn = isu.Name
+ }
+ return fmt.Sprintf("%s:%d:%d: %s%s", fn, isu.Row+1, isu.Col+1, pfx, msg)
+}
+
+func (isu *Issue) finalize(view *View) Issue {
+ v := *isu
+ if v.Tag == "" {
+ v.Tag = Error
+ }
+ if isu.InView(view) {
+ v.Path = ""
+ v.Name = view.Name
+ }
+ return v
+}
+
+func (isu *Issue) hash() issueHash {
+ h := issueHash{
+ loc: isu.Path,
+ row: isu.Row,
+ msg: isu.Message,
+ }
+ if h.loc == "" {
+ h.loc = isu.Name
+ }
+ return h
+}
+
+func (isu *Issue) Equal(p Issue) bool {
+ return isu.hash() == p.hash()
+}
+
+func (isu *Issue) SameFile(p Issue) bool {
+ if isu.Path != "" {
+ return isu.Path == p.Path
+ }
+ return isu.Name == p.Name
+}
+
+func (isu *Issue) InView(v *View) bool {
+ return (isu.Name != "" && isu.Name == v.Name) ||
+ (isu.Path != "" && isu.Path == v.Path) ||
+ (isu.Path != "" && filepath.Base(isu.Path) == v.Name)
+}
+
+func (isu *Issue) Valid() bool {
+ return (isu.Name != "" || isu.Path != "") && isu.Message != ""
+}
+
+type IssueSet []Issue
+
+func (s IssueSet) Equal(issues IssueSet) bool {
+ if len(s) != len(issues) {
+ return false
+ }
+ for _, p := range s {
+ if !issues.Has(p) {
+ return false
+ }
+ }
+ return true
+}
+
+func (s IssueSet) Add(l ...Issue) IssueSet {
+ return s.merge(nil, l...)
+}
+
+func (s IssueSet) merge(view *View, l ...Issue) IssueSet {
+ if len(l) == 0 {
+ return s
+ }
+ res := make(IssueSet, 0, len(s)+len(l))
+ seen := make(map[issueHash]bool, cap(res))
+ for _, isus := range [][]Issue{s, l} {
+ for _, isu := range isus {
+ if view != nil {
+ isu = isu.finalize(view)
+ }
+ ish := isu.hash()
+ if seen[ish] {
+ continue
+ }
+ seen[ish] = true
+ res = append(res, isu)
+ }
+ }
+ return res
+}
+
+func (s IssueSet) Remove(l ...Issue) IssueSet {
+ res := make(IssueSet, 0, len(s)+len(l))
+ q := IssueSet(l)
+ for _, p := range s {
+ if !q.Has(p) {
+ res = append(res, p)
+ }
+ }
+ return res
+}
+
+func (s IssueSet) Has(p Issue) bool {
+ for _, q := range s {
+ if p.Equal(q) {
+ return true
+ }
+ }
+ return false
+}
+
+func (is IssueSet) AllInView(v *View) IssueSet {
+ issues := make(IssueSet, 0, len(is))
+ for _, i := range is {
+ if i.InView(v) {
+ issues = append(issues, i)
+ }
+ }
+ return issues
+}
+
+type StoreIssues struct {
+ ActionType
+
+ IssueKey
+ Issues IssueSet
+}
+
+type IssueKey struct {
+ Key interface{}
+ Name string
+ Path string
+ Dir string
+}
+
+type issueKeySupport struct {
+ ReducerType
+ issues map[IssueKey]IssueSet
+}
+
+func (iks *issueKeySupport) RMount(mx *Ctx) {
+ iks.issues = map[IssueKey]IssueSet{}
+}
+
+func (iks *issueKeySupport) Reduce(mx *Ctx) *State {
+ switch act := mx.Action.(type) {
+ case StoreIssues:
+ if len(act.Issues) == 0 {
+ delete(iks.issues, act.IssueKey)
+ } else {
+ iks.issues[act.IssueKey] = act.Issues
+ }
+ }
+
+ issues := IssueSet{}
+ norm := filepath.Clean
+ name := norm(mx.View.Name)
+ path := norm(mx.View.Path)
+ dir := norm(mx.View.Dir())
+ match := func(k IssueKey) bool {
+ // no restrictions were set
+ k.Key = nil
+ if k == (IssueKey{}) {
+ return true
+ }
+
+ if path != "" && path == k.Path {
+ return true
+ }
+ if name != "" && name == k.Name {
+ return true
+ }
+ // if the view doesn't exist on disk, the dir is unreliable
+ if path != "" && dir != "" && dir == k.Dir {
+ return true
+ }
+ return false
+ }
+ for k, v := range iks.issues {
+ if match(k) {
+ issues = append(issues, v...)
+ }
+ }
+
+ return mx.State.AddIssues(issues...)
+}
+
+type issueStatusSupport struct {
+ ReducerType
+ buf bytes.Buffer
+}
+
+func (re *issueStatusSupport) Reduce(mx *Ctx) *State {
+ if len(mx.Issues) == 0 {
+ return mx.State
+ }
+
+ type Cfg struct {
+ title string
+ loc int
+ rem int
+ }
+ cfgs := map[IssueTag]*Cfg{
+ Error: {title: "Error"},
+ Warning: {title: "Warning"},
+ Notice: {title: "Notice"},
+ }
+
+ msg := ""
+ els := []htm.Element{}
+ for _, isu := range mx.Issues {
+ cfg, ok := cfgs[isu.Tag]
+ if !ok {
+ cfg = cfgs[Error]
+ }
+
+ if !isu.InView(mx.View) {
+ cfg.rem++
+ continue
+ }
+ cfg.loc++
+
+ if isu.Message == "" || isu.Row != mx.View.Row {
+ continue
+ }
+
+ s := ""
+ if isu.Label == "" {
+ s = isu.Message
+ } else {
+ s = isu.Label + ": " + isu.Message
+ }
+ els = append(els, htm.Text(s))
+ if len(msg) <= 1 {
+ msg = s
+ }
+ }
+
+ status := make([]string, 0, len(cfgs)+1)
+ for _, k := range []IssueTag{Error, Warning, Notice} {
+ cfg := cfgs[k]
+ if cfg.loc == 0 && cfg.rem == 0 {
+ continue
+ }
+ re.buf.Reset()
+ loc, rem := mgutil.PrimaryDigits, mgutil.SecondaryDigits
+ if cfg.loc == 0 {
+ loc, rem = rem, loc
+ }
+ re.buf.WriteString(cfg.title)
+ re.buf.WriteByte(' ')
+ loc.DrawInto(cfg.loc, &re.buf)
+ re.buf.WriteRune('ꞏ')
+ rem.DrawInto(cfg.rem, &re.buf)
+ status = append(status, re.buf.String())
+ }
+ st := mx.State.AddHUD(
+ htm.Span(nil,
+ htm.A(&htm.AAttrs{Action: DisplayIssues{}}, htm.Text("Issues")),
+ htm.Textf(" ( %s )", strings.Join(status, ", ")),
+ ),
+ els...,
+ )
+ if msg != "" {
+ status = append(status, msg)
+ }
+ return st.AddStatus(status...)
+}
+
+type IssueOut struct {
+ Patterns []*regexp.Regexp
+ Base Issue
+ Dir string
+ Done chan<- struct{}
+
+ buf []byte
+ mu sync.Mutex
+ issues IssueSet
+ isu *Issue
+ pfx []byte
+ closed bool
+}
+
+func (w *IssueOut) Write(p []byte) (n int, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.closed {
+ return 0, os.ErrClosed
+ }
+
+ w.buf = append(w.buf, p...)
+ w.scan(false)
+ return len(p), nil
+}
+
+func (w *IssueOut) Close() error {
+ defer w.Flush()
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.closed {
+ return os.ErrClosed
+ }
+
+ w.closed = true
+ if w.Done != nil {
+ close(w.Done)
+ }
+
+ return nil
+}
+
+func (w *IssueOut) Flush() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.flush()
+ return nil
+}
+
+func (w *IssueOut) Issues() IssueSet {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ w.scan(true)
+ issues := make(IssueSet, len(w.issues))
+ copy(issues, w.issues)
+ return issues
+}
+
+func (w *IssueOut) scan(scanTail bool) {
+ lines := bytes.Split(w.buf, []byte{'\n'})
+ var tail []byte
+ if !scanTail {
+ n := len(lines) - 1
+ tail, lines = lines[n], lines[:n]
+ }
+
+ for _, ln := range lines {
+ w.scanLine(bytes.TrimRight(ln, "\r"))
+ }
+
+ w.buf = append(w.buf[:0], tail...)
+}
+
+func (w *IssueOut) scanLine(ln []byte) {
+ pfx := ln[:len(ln)-len(bytes.TrimLeft(ln, " \t"))]
+ ind := bytes.TrimPrefix(pfx, w.pfx)
+ if n := len(ind); n > 0 && w.isu != nil {
+ w.isu.Message += "\n" + string(ln[len(pfx)-n:])
+ return
+ }
+ w.flush()
+
+ w.pfx = pfx
+ ln = ln[len(pfx):]
+ w.isu = w.match(ln)
+}
+
+func (w *IssueOut) flush() {
+ if w.isu == nil {
+ return
+ }
+ isu := *w.isu
+ w.isu = nil
+ if isu.Valid() && !w.issues.Has(isu) {
+ w.issues = append(w.issues, isu)
+ }
+}
+
+func (w *IssueOut) match(s []byte) *Issue {
+ for _, p := range w.Patterns {
+ if isu := w.matchOne(p, s); isu != nil {
+ return isu
+ }
+ }
+ return nil
+}
+
+func (w *IssueOut) matchOne(p *regexp.Regexp, s []byte) *Issue {
+ submatch := p.FindSubmatch(s)
+ if submatch == nil {
+ return nil
+ }
+
+ str := func(s []byte) string {
+ return string(bytes.Trim(s, ": \t\r\n"))
+ }
+ num := func(s []byte) int {
+ if n, _ := strconv.Atoi(str(s)); n > 0 {
+ return n - 1
+ }
+ return 0
+ }
+
+ isu := w.Base
+ for i, k := range p.SubexpNames() {
+ v := submatch[i]
+ switch k {
+ case "path":
+ isu.Path = str(v)
+ if isu.Path != "" && w.Dir != "" && !filepath.IsAbs(isu.Path) {
+ isu.Path = filepath.Join(w.Dir, isu.Path)
+ }
+ case "line":
+ isu.Row = num(v)
+ case "column":
+ isu.Col = num(v)
+ case "end":
+ isu.End = num(v)
+ case "label":
+ lbl := str(v)
+ if lbl != "" {
+ isu.Label = lbl
+ }
+ case "error", "warning", "notice":
+ isu.Tag = IssueTag(k)
+ isu.Message = str(v)
+ case "message":
+ isu.Message = str(v)
+ case "tag":
+ tag := IssueTag(str(v))
+ if tag == Warning || tag == Error || tag == Notice {
+ isu.Tag = tag
+ }
+ }
+ }
+ return &isu
+}
diff --git a/src/margo.sh/mg/issue_test.go b/src/margo.sh/mg/issue_test.go
new file mode 100644
index 00000000..5d7c07eb
--- /dev/null
+++ b/src/margo.sh/mg/issue_test.go
@@ -0,0 +1,56 @@
+// +build !windows
+
+package mg
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestIssueWriter(t *testing.T) {
+ base := Issue{Label: "lbl", Tag: Warning}
+ w := &IssueOut{
+ Dir: "/abc",
+ Base: base,
+ Patterns: CommonPatterns(),
+ }
+ fmt.Fprintln(w, "abc.go:555:666: hello world")
+ fmt.Fprintln(w, "no match")
+ fmt.Fprint(w, "abc.go:555:")
+ fmt.Fprint(w, "666: hello\n")
+ fmt.Fprintln(w, " world")
+ fmt.Fprintln(w, "no match")
+ w.Flush()
+
+ expect := IssueSet{
+ Issue{Path: "/abc/abc.go", Row: 555 - 1, Col: 666 - 1, Tag: base.Tag, Label: base.Label, Message: "hello world"},
+ Issue{Path: "/abc/abc.go", Row: 555 - 1, Col: 666 - 1, Tag: base.Tag, Label: base.Label, Message: "hello\n world"},
+ }
+ issues := w.Issues()
+ if !expect.Equal(issues) {
+ t.Errorf("IssueWriter parsing failed. Expected %#v, got %#v", expect, issues)
+ }
+}
+
+func BenchmarkIssueSetAdd(b *testing.B) {
+ // if we make a syntax error at the top of a large file
+ // we can end up with thousands of errors
+ large := make(IssueSet, 2000)
+ for i, _ := range large {
+ large[i] = Issue{Row: i, Col: i}
+ }
+ small := large[:100]
+
+ run := func(b *testing.B, s, add IssueSet) {
+ b.Helper()
+ for i := 0; i < b.N; i++ {
+ s.Add(add...)
+ }
+ }
+ b.Run("empty, large", func(b *testing.B) { run(b, IssueSet{}, large) })
+ b.Run("small, large", func(b *testing.B) { run(b, small, large) })
+ b.Run("large, large", func(b *testing.B) { run(b, large, large) })
+ b.Run("empty, small", func(b *testing.B) { run(b, IssueSet{}, small) })
+ b.Run("small, small", func(b *testing.B) { run(b, small, small) })
+ b.Run("large, small", func(b *testing.B) { run(b, large, small) })
+}
diff --git a/src/margo.sh/mg/langs.go b/src/margo.sh/mg/langs.go
new file mode 100644
index 00000000..a9763656
--- /dev/null
+++ b/src/margo.sh/mg/langs.go
@@ -0,0 +1,60 @@
+package mg
+
+const (
+ AllLangs Lang = "*"
+
+ ActionScript Lang = "actionscript"
+ AppleScript Lang = "applescript"
+ ASP Lang = "asp"
+ C Lang = "c"
+ Clojure Lang = "clojure"
+ CPP Lang = "c++"
+ CSharp Lang = "cs"
+ CSS Lang = "css"
+ D Lang = "d"
+ Diff Lang = "diff"
+ DosBatch Lang = "dosbatch"
+ Dot Lang = "dot"
+ Empty Lang = "empty"
+ Erl Lang = "erl"
+ Erlang Lang = "erlang"
+ Go Lang = "go"
+ GoMod Lang = "go.mod"
+ GoSum Lang = "go.sum"
+ Groovy Lang = "groovy"
+ Haskell Lang = "haskell"
+ HTML Lang = "html"
+ Java Lang = "java"
+ JS Lang = "js"
+ JSON Lang = "json"
+ JSX Lang = "JSX"
+ LaTeX Lang = "latex"
+ LISP Lang = "lisp"
+ Lua Lang = "lua"
+ Makefile Lang = "makefile"
+ Matlab Lang = "matlab"
+ ObjC Lang = "objc"
+ Ocaml Lang = "ocaml"
+ Octave Lang = "octave"
+ Pascal Lang = "pascal"
+ Perl Lang = "perl"
+ PHP Lang = "php"
+ Plist Lang = "plist"
+ Python Lang = "python"
+ Rlang Lang = "r"
+ Ruby Lang = "ruby"
+ Rust Lang = "rust"
+ Scala Lang = "scala"
+ ShellScript Lang = "shell"
+ SQL Lang = "sql"
+ SVG Lang = "svg"
+ Tcl Lang = "tcl"
+ TS Lang = "ts"
+ TSX Lang = "tsx"
+ XML Lang = "xml"
+ Yaml Lang = "yaml"
+)
+
+// Lang is the lower-case name of a language i.e. "go", not "Go"
+// where possible, the predefined instances in should be used
+type Lang string
diff --git a/src/margo.sh/mg/lint.go b/src/margo.sh/mg/lint.go
new file mode 100644
index 00000000..ca190069
--- /dev/null
+++ b/src/margo.sh/mg/lint.go
@@ -0,0 +1,129 @@
+package mg
+
+import (
+ "margo.sh/mgutil"
+ "os"
+ "os/exec"
+)
+
+type Linter struct {
+ ReducerType
+
+ Langs []Lang
+ Actions []Action
+
+ Name string
+ Args []string
+
+ IssueKey func(*Ctx) IssueKey
+ Tag IssueTag
+ Label string
+ TempDir []string
+
+ q *mgutil.ChanQ
+}
+
+func (lt *Linter) RCond(mx *Ctx) bool {
+ return mx.LangIs(lt.Langs...) &&
+ (mx.ActionIs(lt.userActs()...) || mx.ActionIs(lt.auxActs()...))
+}
+
+func (lt *Linter) userActs() []Action {
+ if acts := lt.Actions; len(acts) != 0 {
+ return acts
+ }
+ return []Action{ViewSaved{}}
+}
+
+func (lt *Linter) auxActs() []Action {
+ return []Action{QueryUserCmds{}}
+}
+
+func (lt *Linter) Reduce(mx *Ctx) *State {
+ // keep non-default actions in sync with auxActs()
+ switch mx.Action.(type) {
+ case QueryUserCmds:
+ return lt.userCmds(mx)
+ default:
+ lt.q.Put(mx)
+ return mx.State
+ }
+}
+
+func (lt *Linter) RMount(mx *Ctx) {
+ lt.q = mgutil.NewChanQ(1)
+ go lt.loop()
+}
+
+func (lt *Linter) RUnmount(mx *Ctx) {
+ lt.q.Close()
+}
+
+func (lt *Linter) userCmds(mx *Ctx) *State {
+ lbl := lt.Label
+ if lbl == "" {
+ lbl = lt.Name
+ }
+ return mx.AddUserCmds(UserCmd{
+ Name: lt.Name,
+ Args: lt.Args,
+ Title: "Linter: " + lbl,
+ })
+}
+
+func (lt *Linter) loop() {
+ for v := range lt.q.C() {
+ lt.lint(v.(*Ctx))
+ }
+}
+
+func (lt *Linter) key(mx *Ctx) IssueKey {
+ if ik := lt.IssueKey; ik != nil {
+ return ik(mx)
+ }
+ return IssueKey{Key: lt}
+}
+
+func (lt *Linter) lint(mx *Ctx) {
+ res := StoreIssues{}
+ res.Key = lt.key(mx)
+ // make sure to clear any old issues, even if we return early
+ defer func() { mx.Store.Dispatch(res) }()
+
+ cmdStr := mgutil.QuoteCmd(lt.Name, lt.Args...)
+ if len(lt.TempDir) != 0 {
+ tmpDir, err := MkTempDir(lt.Label)
+ if err != nil {
+ mx.Log.Printf("cannot create tempDir for linter `%s`: %s\n", cmdStr, err)
+ return
+ }
+ defer os.RemoveAll(tmpDir)
+
+ m := mx.Env
+ for _, k := range lt.TempDir {
+ m = m.Add(k, tmpDir)
+ }
+ mx = mx.SetState(mx.State.SetEnv(m))
+ }
+
+ dir := mx.View.Dir()
+ iw := &IssueOut{
+ Dir: dir,
+ Patterns: mx.CommonPatterns(),
+ Base: Issue{Label: lt.Label, Tag: lt.Tag},
+ }
+
+ cmd := exec.Command(lt.Name, lt.Args...)
+ cmd.Stdout = iw
+ cmd.Stderr = iw
+ cmd.Env = mx.Env.Environ()
+ cmd.Dir = dir
+
+ if err := cmd.Start(); err != nil {
+ mx.Log.Printf("cannot start linter `%s`: %s", cmdStr, err)
+ return
+ }
+ cmd.Wait()
+ iw.Close()
+ res.Issues = iw.Issues()
+}
diff --git a/src/margo.sh/mg/log.go b/src/margo.sh/mg/log.go
new file mode 100644
index 00000000..2e7edb44
--- /dev/null
+++ b/src/margo.sh/mg/log.go
@@ -0,0 +1,18 @@
+package mg
+
+import (
+ "io"
+ "log"
+)
+
+type Logger struct {
+ *log.Logger
+ Dbg *log.Logger
+}
+
+func NewLogger(w io.Writer) *Logger {
+ return &Logger{
+ Logger: log.New(w, "", log.Lshortfile),
+ Dbg: log.New(w, "DBG: ", log.Lshortfile),
+ }
+}
diff --git a/src/margo.sh/mg/motd.go b/src/margo.sh/mg/motd.go
new file mode 100644
index 00000000..76af7948
--- /dev/null
+++ b/src/margo.sh/mg/motd.go
@@ -0,0 +1,250 @@
+package mg
+
+import (
+ "encoding/json"
+ "fmt"
+ "margo.sh/bolt"
+ "margo.sh/mgpf"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+)
+
+var (
+ motdK = motdKey{K: "motdState"}
+)
+
+type motdAct struct {
+ ActionType
+ msg string
+}
+
+type motdKey struct{ K string }
+
+type motdState struct {
+ LastUpdate time.Time
+ Result struct {
+ Message string
+ Error string
+ ANN struct {
+ Tag struct {
+ Y int
+ M int
+ D int
+ N int
+ }
+ Content string
+ }
+ Tag string
+ }
+}
+
+// MOTD keeps you updated about new versions and important announcements
+//
+// It adds a new command `motd.sync` available via the UserCmd palette as `Sync MOTD (check for updates)`
+//
+// Interval can be set in order to enable automatic update fetching.
+//
+// When new updates are found, it displays the message in the status bar
+// e.g. `★ margo.sh/cl/18.09.14 ★` a url where you see the upcoming changes before updating
+//
+// It sends the following data to the url https://api.margo.sh/motd.json:
+// * current editor plugin name e.g. `?client=gosublime`
+// this tells us which editor plugin's changelog to check
+// * current editor plugin version e.g. `?tag=r18.09.14-1`
+// this allows us to determine if there any updates
+// * whether or not this is the first request of the day e.g. `?firstHit=1`
+// this allows us to get an estimated count of active users without storing
+// any personally identifiable data
+//
+// No other data is sent. For more info contact privacy at kuroku.io
+//
+type MOTD struct {
+ ReducerType
+
+ // Endpoint is the URL to check for new messages
+ // By default it's https://api.margo.sh/motd.json
+ Endpoint string
+
+ // Interval, if set, specifies how often to automatically fetch messages from Endpoint
+ Interval time.Duration
+
+ htc http.Client
+ msg string
+
+ mu sync.Mutex
+}
+
+func (m *MOTD) RInit(mx *Ctx) {
+ if m.Endpoint == "" {
+ m.Endpoint = "https://api.margo.sh/motd.json"
+ }
+}
+
+func (m *MOTD) RCond(mx *Ctx) bool {
+ return mx.Editor.Ready()
+}
+
+func (m *MOTD) RMount(mx *Ctx) {
+ go m.proc(mx)
+}
+
+func (m *MOTD) Reduce(mx *Ctx) *State {
+ st := mx.State
+ switch act := mx.Action.(type) {
+ case RunCmd:
+ st = st.AddBuiltinCmds(BuiltinCmd{Name: "motd.sync", Run: m.motdSyncCmd})
+ case QueryUserCmds:
+ st = st.AddUserCmds(UserCmd{Title: "Sync MOTD (check for updates)", Name: "motd.sync"})
+ case motdAct:
+ m.msg = act.msg
+ }
+ if m.msg != "" {
+ st = st.AddStatus(m.msg)
+ }
+ return st
+}
+
+func (m *MOTD) motdSyncCmd(bx *CmdCtx) *State {
+ go func() {
+ defer bx.Output.Close()
+
+ err := m.sync(bx.Ctx)
+ ms, _ := m.loadState()
+ if err != nil {
+ fmt.Fprintln(bx.Output, "Error:", err)
+ } else {
+ fmt.Fprintln(bx.Output, "MOTD:", ms.Result.Message)
+ fmt.Fprintln(bx.Output, ms.Result.ANN.Content)
+ }
+ }()
+ return bx.State
+}
+
+func (m *MOTD) loadState() (motdState, error) {
+ ms := motdState{}
+ err := bolt.DS.Load(motdK, &ms)
+ return ms, err
+}
+
+func (m *MOTD) storeState(ms motdState) error {
+ return bolt.DS.Store(motdK, ms)
+}
+
+func (m *MOTD) sync(mx *Ctx) error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ dest, err := url.Parse(m.Endpoint)
+ if err != nil {
+ return fmt.Errorf("sync: cannot parse endpoint: %s: %s", m.Endpoint, err)
+ }
+ qry := dest.Query()
+ now := time.Now().UTC()
+ qry.Set("client", mx.Editor.Client.Name)
+ qry.Set("tag", mx.Editor.Client.Tag)
+ curr, _ := m.loadState()
+ if layout := "2006-01-02"; curr.LastUpdate.Format(layout) != now.Format(layout) {
+ qry.Set("firstHit", "1")
+ } else {
+ qry.Set("firstHit", "0")
+ }
+ dest.RawQuery = qry.Encode()
+
+ req, err := http.NewRequest("GET", dest.String(), nil)
+ if err != nil {
+ return fmt.Errorf("sync: cannot create request: %s", err)
+ }
+ req.Header.Set("User-Agent", "margo.motd")
+
+ mx.Log.Println("motd: sync: fetching", dest)
+
+ res, err := m.htc.Do(req)
+ if err != nil {
+ return fmt.Errorf("sync: cannot fetch updates: %s", err)
+ }
+ defer res.Body.Close()
+
+ next := motdState{LastUpdate: now}
+ if err := json.NewDecoder(res.Body).Decode(&next.Result); err != nil {
+ return fmt.Errorf("sync: cannot decode request: %s", err)
+ }
+ m.dispatchMsg(mx, next)
+
+ if err := m.storeState(next); err != nil {
+ return fmt.Errorf("sync: cannot store state: %s", err)
+ }
+
+ return nil
+}
+
+func (_ *MOTD) fmtTag(s string) (string, error) {
+ var y, m, d, n int
+ scanned, err := fmt.Sscanf(s, "%02d.%02d.%02d-%d", &y, &m, &d, &n)
+ if scanned < 4 {
+ _, err = fmt.Sscanf(s, "%02d.%02d.%02d", &y, &m, &d)
+ }
+ if err != nil {
+ return "", err
+ }
+ if n <= 0 {
+ n = 1
+ }
+ return fmt.Sprintf("%02d.%02d.%02d-%d", y, m, d, n), nil
+}
+
+func (m *MOTD) dispatchMsg(mx *Ctx, ms motdState) {
+ res := ms.Result
+ act := motdAct{}
+ ctag := mx.Editor.Client.Tag
+ srvTag, srvTagErr := m.fmtTag(res.Tag)
+ cliTag, cliTagErr := m.fmtTag(ctag)
+ switch {
+ case ctag == "":
+ mx.Log.Println("motd: client tag is undefined; you might need to restart the editor")
+ case srvTagErr != nil:
+ mx.Log.Println("motd: cannot parse server tag:", srvTagErr)
+ case cliTagErr != nil:
+ mx.Log.Println("motd: cannot parse client tag:", cliTagErr)
+ case cliTag < srvTag:
+ act.msg = res.Message
+ }
+ mx.Store.Dispatch(act)
+}
+
+func (m *MOTD) proc(mx *Ctx) {
+ printl := func(a ...interface{}) {
+ mx.Log.Println(append([]interface{}{"motd:"}, a...)...)
+ }
+
+ wait := func(d time.Duration) {
+ if d <= time.Second {
+ return
+ }
+
+ printl("waiting for", mgpf.D(d))
+ time.Sleep(d)
+ }
+
+ ms, _ := m.loadState()
+ m.dispatchMsg(mx, ms)
+
+ iv := m.Interval
+ if iv <= 0 {
+ return
+ }
+
+ if m := 30 * time.Second; iv < m {
+ iv = m
+ }
+ printl("auto-update enabled... checking every", mgpf.D(iv))
+ wait(iv - time.Since(ms.LastUpdate))
+
+ for {
+ if err := m.sync(mx); err != nil {
+ printl(err)
+ }
+ wait(iv)
+ }
+}
diff --git a/src/margo.sh/mg/oom.go b/src/margo.sh/mg/oom.go
new file mode 100644
index 00000000..a232a7bb
--- /dev/null
+++ b/src/margo.sh/mg/oom.go
@@ -0,0 +1,5 @@
+package mg
+
+const (
+ DefaultMemoryLimit = 2 << 30
+)
diff --git a/src/margo.sh/mg/oom_nix.go b/src/margo.sh/mg/oom_nix.go
new file mode 100644
index 00000000..1d0ed414
--- /dev/null
+++ b/src/margo.sh/mg/oom_nix.go
@@ -0,0 +1,26 @@
+//+build !windows
+
+package mg
+
+import (
+ "syscall"
+)
+
+func SetMemoryLimit(logs interface {
+ Printf(string, ...interface{})
+}, b uint64) {
+ rlim := &syscall.Rlimit{Cur: b, Max: b}
+ if err := syscall.Getrlimit(syscall.RLIMIT_DATA, rlim); err != nil {
+ logs.Printf("SetMemoryLimit: cannot get RLIMIT_DATA: %s\n", err)
+ return
+ }
+ rlim.Cur = b
+ mib := b / (1 << 20)
+ if err := syscall.Setrlimit(syscall.RLIMIT_DATA, rlim); err != nil {
+ logs.Printf("SetMemoryLimit: limit=%dMiB, cannot set RLIMIT_DATA: %s\n", rlim.Cur/(1<<20), err)
+ return
+ }
+ // re-read it so we see what it was actually set to
+ syscall.Getrlimit(syscall.RLIMIT_DATA, rlim)
+ logs.Printf("SetMemoryLimit: limit=%dMiB, RLIMIT_DATA={Cur: %dMiB, Max:%dMiB}\n", mib, rlim.Cur/(1<<20), rlim.Max/(1<<20))
+}
diff --git a/src/margo.sh/mg/oom_win.go b/src/margo.sh/mg/oom_win.go
new file mode 100644
index 00000000..f3eb175f
--- /dev/null
+++ b/src/margo.sh/mg/oom_win.go
@@ -0,0 +1,9 @@
+//+build windows
+
+package mg
+
+func SetMemoryLimit(logs interface {
+ Printf(string, ...interface{})
+}, b uint64) {
+ logs.Printf("SetMemoryLimit: not supported on Windows")
+}
diff --git a/src/margo.sh/mg/reducers.go b/src/margo.sh/mg/reducers.go
new file mode 100644
index 00000000..c810bb70
--- /dev/null
+++ b/src/margo.sh/mg/reducers.go
@@ -0,0 +1,443 @@
+package mg
+
+import (
+ "reflect"
+ "runtime"
+ "sync"
+)
+
+var (
+ // DefaultReducers enables the automatic registration of reducers to the Agent's store
+ //
+ // This can be used to register reducers without user-interaction
+ // but where possible, it should not be used.
+ //
+ // its methods should only be callsed during init()
+ // any calls after this may ignored
+ DefaultReducers = &defaultReducers{
+ before: reducerList{
+ &issueKeySupport{},
+ Builtins,
+ },
+ after: reducerList{
+ &issueStatusSupport{},
+ &cmdSupport{},
+ &restartSupport{},
+ &clientActionSupport{},
+ },
+ }
+
+ nopReducer = NewReducer(func(mx *Ctx) *State { return mx.State })
+)
+
+type defaultReducers struct {
+ mu sync.Mutex
+ before, use, after reducerList
+}
+
+// Before arranges for the reducers in l to be registered when the agent starts
+// it's the equivalent of the user manually calling Store.Before(l...)
+func (dr *defaultReducers) Before(l ...Reducer) {
+ dr.mu.Lock()
+ defer dr.mu.Unlock()
+
+ dr.before = dr.before.Add(l...)
+}
+
+// Use arranges for the reducers in l to be registered when the agent starts
+// it's the equivalent of the user manually calling Store.Use(l...)
+func (dr *defaultReducers) Use(l ...Reducer) {
+ dr.mu.Lock()
+ defer dr.mu.Unlock()
+
+ dr.use = dr.use.Add(l...)
+}
+
+// After arranges for the reducers in l to be registered when the agent starts
+// it's the equivalent of the user manually calling Store.After(l...)
+func (dr *defaultReducers) After(l ...Reducer) {
+ dr.mu.Lock()
+ defer dr.mu.Unlock()
+
+ dr.after = dr.after.Add(l...)
+}
+
+// A Reducer is the main method of state transitions in margo.
+//
+// The methods are called in the order listed below:
+//
+// * RInit
+// this is called during the first action (initAction{} FKA Started{})
+//
+// * RConfig
+// this is called on each reduction
+//
+// * RCond
+// this is called on each reduction
+// if it returns false, no other method is called
+//
+// * RMount
+// this is called once, after the first time RCond returns true
+//
+// * Reduce
+// this is called on each reduction until the agent begins shutting down
+//
+// * RUnmount
+// this is called once when the agent is shutting down,
+// iif RMount was called
+//
+// For simplicity and the ability to extend the interface in the future,
+// users should embed `ReducerType` in their types to complete the interface.
+//
+// For convenience, it also implements all optional (non-Reduce()) methods.
+//
+// The method prefix `^R[A-Z]\w+` and name `Reduce` are reserved, and should not be used.
+//
+// For backwards compatibility the legacy methods:
+// ReducerInit, ReducerConfig, ReducerCond, ReducerMount and ReducerUnmount
+// will be called if the reducer does *not* defined the corresponding lifecycle method.
+// i.e. if a reducer defines `ReducerInit` but not `RInit`, `ReducerInit` will be called.
+//
+// NewReducer() can be used to convert a function to a reducer.
+//
+// For reducers that are backed by goroutines that are only interested
+// in the *last* of some value e.g. *Ctx, mgutil.ChanQ might be of use.
+type Reducer interface {
+ // Reduce takes as input a Ctx describing the current state of the world
+ // and an Action describing some action that happened.
+ // Based on this action, the reducer returns a new state of the world.
+ //
+ // Reducers are called sequentially in the order they were registered
+ // with Store.Before(), Store.Use() or Store.After().
+ //
+ // A reducer should not call Store.State().
+ //
+ // Reducers should complete their work as quickly as possible,
+ // ideally only updating the state and not doing any work in the reducer itself.
+ //
+ // If a reducer is slow it might block the editor UI because some actions like
+ // fmt'ing the view must wait for the new src before the user
+ // can continue editing or saving the file.
+ //
+ // e.g. during the ViewFmt or ViewPreSave action, a reducer that knows how to
+ // fmt the file might update the state to hold a fmt'd copy of the view's src.
+ //
+ // or it can implement a linter that kicks off a goroutine to try to compile
+ // a package when one of its files when the ViewSaved action is dispatched.
+ Reduce(*Ctx) *State
+
+ // RLabel returns a string that can be used to name the reducer
+ // in pf.Profile and other display scenarios
+ RLabel() string
+ ReducerLabel() string
+
+ // RInit is called for the first reduction
+ // * it's only called once and can be used to initialise reducer state
+ // e.g. for initialising an embedded type
+ // * it's called before RConfig()
+ RInit(*Ctx)
+ ReducerInit(*Ctx)
+
+ // RConfig is called on each reduction, before RCond
+ // if it returns a new EditorConfig, it's equivalent to State.SetConfig()
+ // but is always run before RCond() so is usefull for making sure
+ // configuration changes are always applied, even if Reduce() isn't called
+ RConfig(*Ctx) EditorConfig
+ ReducerConfig(*Ctx) EditorConfig
+
+ // RCond is called before Reduce and RMount is called
+ // if it returns false, no other methods are called
+ //
+ // It can be used as a pre-condition in combination with Reducer(Un)Mount
+ RCond(*Ctx) bool
+ ReducerCond(*Ctx) bool
+
+ // RMount is called once, after the first time that RCond returns true
+ RMount(*Ctx)
+ ReducerMount(*Ctx)
+
+ // RUnmount is called when communication with the client will stop
+ // it is only called if RMount was called
+ //
+ // It can be used to clean up any resources created in RMount
+ //
+ // After this method is called, Reduce will never be called again
+ RUnmount(*Ctx)
+ ReducerUnmount(*Ctx)
+
+ reducerType() *ReducerType
+}
+
+// ReducerType implements all optional methods of a reducer
+type ReducerType struct {
+ parent Reducer
+ mounted bool
+ unmounted bool
+}
+
+// RLabel implements Reducer.RLabel
+func (rt *ReducerType) RLabel() string { return rt.r().ReducerLabel() }
+
+// ReducerLabel implements Reducer.ReducerLabel
+func (rt *ReducerType) ReducerLabel() string { return "" }
+
+// RInit implements Reducer.RInit
+func (rt *ReducerType) RInit(mx *Ctx) { rt.r().ReducerInit(mx) }
+
+// ReducerInit implements Reducer.ReducerInit
+func (rt *ReducerType) ReducerInit(*Ctx) {}
+
+// RCond implements Reducer.RCond
+func (rt *ReducerType) RCond(mx *Ctx) bool { return rt.r().ReducerCond(mx) }
+
+// ReducerCond implements Reducer.ReducerCond
+func (rt *ReducerType) ReducerCond(*Ctx) bool { return true }
+
+// RConfig implements Reducer.RConfig
+func (rt *ReducerType) RConfig(mx *Ctx) EditorConfig {
+ return rt.r().ReducerConfig(mx)
+}
+
+// ReducerConfig implements Reducer.ReducerConfig
+func (rt *ReducerType) ReducerConfig(*Ctx) EditorConfig { return nil }
+
+// RMount implements Reducer.RMount
+func (rt *ReducerType) RMount(mx *Ctx) { rt.r().ReducerMount(mx) }
+
+// ReducerMount implements Reducer.ReducerMount
+func (rt *ReducerType) ReducerMount(*Ctx) {}
+
+// RUnmount implements Reducer.RUnmount
+func (rt *ReducerType) RUnmount(mx *Ctx) { rt.r().ReducerUnmount(mx) }
+
+// ReducerUnmount implements Reducer.ReducerUnmount
+func (rt *ReducerType) ReducerUnmount(*Ctx) {}
+
+func (rt *ReducerType) r() Reducer {
+ if rt.parent != nil {
+ return rt.parent
+ }
+ return nopReducer
+}
+
+func (rt *ReducerType) reducerType() *ReducerType { return rt }
+
+func (rt *ReducerType) bootstrap(parent Reducer) {
+ switch {
+ case rt.parent == nil:
+ rt.parent = parent
+ case rt.parent != parent:
+ panic("impossibru!")
+ }
+}
+
+func (rt *ReducerType) reduction(mx *Ctx, r Reducer) *Ctx {
+ rt.bootstrap(r)
+
+ defer mx.Profile.Push(ReducerLabel(r)).Pop()
+
+ rt.init(mx)
+
+ if c := rt.config(mx); c != nil {
+ mx = mx.SetState(mx.State.SetConfig(c))
+ }
+
+ if !rt.cond(mx) {
+ // if mount was called, unmount must be called, even if cond returns false
+ rt.unmount(mx)
+ return mx
+ }
+
+ rt.mount(mx)
+
+ if rt.unmount(mx) {
+ return mx
+ }
+
+ return rt.reduce(mx)
+}
+
+func (rt *ReducerType) init(mx *Ctx) {
+ if _, ok := mx.Action.(initAction); !ok {
+ return
+ }
+
+ defer mx.Profile.Push("Init").Pop()
+ rt.r().RInit(mx)
+}
+
+func (rt *ReducerType) config(mx *Ctx) EditorConfig {
+ defer mx.Profile.Push("Config").Pop()
+ return rt.r().RConfig(mx)
+}
+
+func (rt *ReducerType) cond(mx *Ctx) bool {
+ defer mx.Profile.Push("Cond").Pop()
+ return rt.r().RCond(mx)
+}
+
+func (rt *ReducerType) mount(mx *Ctx) {
+ if rt.mounted {
+ return
+ }
+
+ defer mx.Profile.Push("Mount").Pop()
+ rt.mounted = true
+ rt.r().RMount(mx)
+}
+
+func (rt *ReducerType) unmount(mx *Ctx) bool {
+ if !rt.mounted || rt.unmounted || !mx.ActionIs(unmount{}) {
+ return false
+ }
+
+ defer mx.Profile.Push("Unmount").Pop()
+ rt.unmounted = true
+ rt.r().RUnmount(mx)
+ return true
+}
+
+func (rt *ReducerType) reduce(mx *Ctx) *Ctx {
+ defer mx.Profile.Push("Reduce").Pop()
+ return mx.SetState(rt.r().Reduce(mx))
+}
+
+// Add adds new reducers to the list. It returns a new list.
+func (rl reducerList) Add(reducers ...Reducer) reducerList {
+ return append(rl[:len(rl):len(rl)], reducers...)
+}
+
+// reducerList is a slice of reducers
+type reducerList []Reducer
+
+func (rl reducerList) reduction(mx *Ctx) *Ctx {
+ for _, r := range rl {
+ mx = r.reducerType().reduction(mx, r)
+ }
+ return mx
+}
+
+// RFunc wraps a function to be used as a reducer
+// New instances should ideally be created using the global NewReducer() function
+type RFunc struct {
+ ReducerType
+
+ // Label is an optional string that may be used as a name for the reducer.
+ // If unset, a name based on the Func type will be used.
+ Label string
+
+ // Func is the equivalent of Reducer.Reduce
+ // If Func is nil, the current state is returned as-is
+ Func ReduceFn
+
+ // The following optional fields correspond with the Reducer lifecycle methods
+
+ // Init is the equivalent of Reducer.RInit
+ Init func(mx *Ctx)
+
+ // Cond is the equivalent of Reducer.RCond
+ Cond func(mx *Ctx) bool
+
+ // RCnfig is the equivalent of Reducer.RConfig
+ Config func(mx *Ctx) EditorConfig
+
+ // Rount is the equivalent of Reducer.RMount
+ Mount func(mx *Ctx)
+
+ // RUnount is the equivalent of Reducer.RUnmount
+ Unmount func(mx *Ctx)
+}
+
+// ReduceFunc is an alias for RFunc
+type ReduceFunc = RFunc
+
+// RLabel implements Reducer.RLabel
+func (rf *RFunc) RLabel() string {
+ if s := rf.Label; s != "" {
+ return s
+ }
+ nm := ""
+ if rf.Func != nil {
+ p := runtime.FuncForPC(reflect.ValueOf(rf.Func).Pointer())
+ if p == nil {
+ nm = p.Name()
+ }
+ }
+ return "mg.Reduce(" + nm + ")"
+}
+
+// RInit delegates to RFunc.Init if it's not nil
+func (rf *RFunc) RInit(mx *Ctx) {
+ if rf.Init != nil {
+ rf.Init(mx)
+ } else {
+ rf.ReducerType.RInit(mx)
+ }
+}
+
+// RCond delegates to RFunc.Cond if it's not nil
+func (rf *RFunc) RCond(mx *Ctx) bool {
+ if rf.Cond != nil {
+ return rf.Cond(mx)
+ }
+ return rf.ReducerType.RCond(mx)
+}
+
+// RConfig delegates to RFunc.Config if it's not nil
+func (rf *RFunc) RConfig(mx *Ctx) EditorConfig {
+ if rf.Config != nil {
+ return rf.Config(mx)
+ }
+ return rf.ReducerType.RConfig(mx)
+}
+
+// RMount delegates to RFunc.Mount if it's not nil
+func (rf *RFunc) RMount(mx *Ctx) {
+ if rf.Mount != nil {
+ rf.Mount(mx)
+ } else {
+ rf.ReducerType.RMount(mx)
+ }
+}
+
+// RUnmount delegates to RFunc.Unmount if it's not nil
+func (rf *RFunc) RUnmount(mx *Ctx) {
+ if rf.Unmount != nil {
+ rf.Unmount(mx)
+ } else {
+ rf.ReducerType.RUnmount(mx)
+ }
+}
+
+// Reduce implements the Reducer interface, delegating to RFunc.Func if it's not nil
+func (rf *RFunc) Reduce(mx *Ctx) *State {
+ if rf.Func != nil {
+ return rf.Func(mx)
+ }
+ return mx.State
+}
+
+// NewReducer creates a new RFunc
+// reduce can be nil, in which case RFunc.Reduce method will simply return the current state
+// each function in options is called on the newly created RFunc
+func NewReducer(reduce ReduceFn, options ...func(*RFunc)) *RFunc {
+ rf := &RFunc{Func: reduce}
+ for _, o := range options {
+ o(rf)
+ }
+ return rf
+}
+
+// ReducerLabel returns a label for the reducer r.
+// It takes into account the Reducer.RLabel method.
+func ReducerLabel(r Reducer) string {
+ if lbl := r.RLabel(); lbl != "" {
+ return lbl
+ }
+ if t := reflect.TypeOf(r); t != nil {
+ return t.String()
+ }
+ return "mg.Reducer"
+}
+
+type ReduceFn func(*Ctx) *State
diff --git a/src/margo.sh/mg/restart.go b/src/margo.sh/mg/restart.go
new file mode 100644
index 00000000..b5ad7614
--- /dev/null
+++ b/src/margo.sh/mg/restart.go
@@ -0,0 +1,156 @@
+package mg
+
+import (
+ "bytes"
+ "go/build"
+ "margo.sh/mgutil"
+ youtsuba "margo.sh/why_would_you_make_yotsuba_cry"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+type rsIssues struct {
+ ActionType
+ issues IssueSet
+}
+
+type restartSupport struct {
+ ReducerType
+ q *mgutil.ChanQ
+ issues IssueSet
+}
+
+func (rs *restartSupport) RLabel() string {
+ return "Mg/Restart"
+}
+
+func (rs *restartSupport) RInit(mx *Ctx) {
+ go rs.onInit(mx)
+}
+
+func (rs *restartSupport) RCond(mx *Ctx) bool {
+ if len(rs.issues) != 0 || mx.ActionIs(rsIssues{}) {
+ return true
+ }
+ if mx.LangIs(Go) && mx.ActionIs(ViewSaved{}) {
+ return true
+ }
+ return false
+}
+
+func (rs *restartSupport) RMount(mx *Ctx) {
+ rs.q = mgutil.NewChanQ(1)
+ go rs.loop()
+}
+
+func (rs *restartSupport) RUnmount(mx *Ctx) {
+ rs.q.Close()
+}
+
+func (rs *restartSupport) Reduce(mx *Ctx) *State {
+ switch act := mx.Action.(type) {
+ case rsIssues:
+ rs.issues = act.issues
+ case ViewSaved:
+ rs.q.Put(mx)
+ }
+ return mx.State.AddIssues(rs.issues...)
+}
+
+func (rs *restartSupport) loop() {
+ for v := range rs.q.C() {
+ rs.onSave(v.(*Ctx))
+ }
+}
+
+func (rs *restartSupport) mgPkg(mx *Ctx) *build.Package {
+ pkg, _ := youtsuba.AgentBuildContext.ImportDir(mx.View.Dir(), 0)
+ if pkg == nil || pkg.ImportPath == "" {
+ return nil
+ }
+ imp := pkg.ImportPath + "/"
+ if strings.Contains(imp, "/cmd/") {
+ return nil
+ }
+ if !strings.HasPrefix(imp, "margo/") && !strings.HasPrefix(imp, "margo.sh/") {
+ return nil
+ }
+ return pkg
+}
+
+func (rs *restartSupport) onInit(mx *Ctx) {
+ if os.Getenv("MARGO_BUILD_ERROR") == "" {
+ return
+ }
+
+ res := rsIssues{issues: rs.slowLint(mx, nil)}
+ if len(res.issues) != 0 {
+ mx.Store.Dispatch(res)
+ }
+}
+
+func (rs *restartSupport) onSave(mx *Ctx) {
+ v := mx.View
+ if !strings.HasSuffix(v.Path, ".go") || strings.HasSuffix(v.Path, "_test.go") {
+ return
+ }
+ pkg := rs.mgPkg(mx)
+ if pkg == nil {
+ return
+ }
+ res := rsIssues{issues: rs.slowLint(mx, pkg)}
+ mx.Store.Dispatch(res)
+ if mx.Env.Get("MARGO_NO_RESTART", "") == "1" {
+ return
+ }
+ src, _ := mx.View.ReadAll()
+ if s := []byte(`//margo:no-restart`); bytes.Contains(src, s) {
+ return
+ }
+ if len(res.issues) == 0 {
+ mx.Log.Println(pkg.ImportPath, "saved with no issues, restarting")
+ mx.Store.Dispatch(Restart{})
+ }
+}
+
+func (rs *restartSupport) slowLint(mx *Ctx, pkg *build.Package) IssueSet {
+ defer mx.Begin(Task{Title: "prepping margo restart"}).Done()
+
+ cmds := []*exec.Cmd{
+ exec.Command("margo.sh", "build", mx.AgentName()),
+ }
+ if pkg != nil && pkg.ImportPath != "margo" {
+ cmds = append([]*exec.Cmd{
+ exec.Command("margo.sh", "ci", "-quick"),
+ }, cmds...)
+ }
+
+ buf := &bytes.Buffer{}
+ var err error
+ for _, cmd := range cmds {
+ cmd.Dir = mx.View.Dir()
+ cmd.Env = mx.Env.Environ()
+ cmd.Stdout = buf
+ cmd.Stderr = buf
+ err = cmd.Run()
+ if err != nil {
+ break
+ }
+ }
+
+ output := buf.Bytes()
+ isuOut := &IssueOut{
+ Dir: mx.View.Dir(),
+ Patterns: mx.CommonPatterns(),
+ Base: Issue{Label: rs.RLabel()},
+ }
+ isuOut.Write(output)
+ isuOut.Close()
+ issues := isuOut.Issues()
+
+ if err != nil {
+ mx.Log.Printf(rs.RLabel()+": %s\n%s\n", err, output)
+ }
+ return issues
+}
diff --git a/src/margo.sh/mg/state.go b/src/margo.sh/mg/state.go
new file mode 100644
index 00000000..f3ab7165
--- /dev/null
+++ b/src/margo.sh/mg/state.go
@@ -0,0 +1,338 @@
+package mg
+
+import (
+ "fmt"
+ "github.com/ugorji/go/codec"
+ "margo.sh/htm"
+ "margo.sh/mg/actions"
+ "reflect"
+)
+
+var (
+ // ErrNoSettings is the error returned from EditorProps.Settings()
+ // when there was no settings sent from the editor
+ ErrNoSettings = fmt.Errorf("no editor settings")
+)
+
+type EditorClientProps struct {
+ // Name is the name of the client
+ Name string
+
+ // Tag is the client's version
+ Tag string
+}
+
+// EditorProps holds data about the text editor
+type EditorProps struct {
+ // Name is the name of the editor
+ Name string
+
+ // Version is the editor's version
+ Version string
+
+ // Client hold details about client (the editor plugin)
+ Client EditorClientProps
+
+ handle codec.Handle `mg.Nillable:"true"`
+ settings codec.Raw
+}
+
+// Ready returns true if the editor state has synced
+//
+// Reducers can call Ready in their RCond method to avoid mounting until
+// the editor has communicated its state.
+// Before the editor is ready, State.View, State.Editor, etc. might not contain usable data.
+func (ep *EditorProps) Ready() bool {
+ return ep.Name != ""
+}
+
+// Settings unmarshals the internal settings sent from the editor into v.
+// If no settings were sent, it returns ErrNoSettings,
+// otherwise it returns any error from unmarshalling.
+func (ep *EditorProps) Settings(v interface{}) error {
+ if ep.handle == nil || len(ep.settings) == 0 {
+ return ErrNoSettings
+ }
+ return codec.NewDecoderBytes(ep.settings, ep.handle).Decode(v)
+}
+
+// EditorConfig is the common interface between internally supported editors.
+//
+// The main implementation is `sublime.Config`
+type EditorConfig interface {
+ // EditorConfig returns data to be sent to the editor.
+ EditorConfig() interface{}
+
+ // EnabledForLangs is a hint to the editor listing the languages
+ // for which actions should be dispatched.
+ //
+ // To request actions for all languages, use `"*"` (the default)
+ EnabledForLangs(langs ...Lang) EditorConfig
+}
+
+// StickyState is state that's persisted from one reduction to the next.
+// It holds the current state of the editor.
+//
+// All fields are readonly and should only be assigned to during a call to State.Copy().
+// Child fields esp. View should not be assigned to.
+type StickyState struct {
+ // View describes the current state of the view.
+ // When constructed correctly (through Store.NewCtx()), View is never nil.
+ View *View
+
+ // Env holds environment variables sent from the editor.
+ // For "go" views in the "margo.sh" tree and "margo" package,
+ // "GOPATH" is set to the GOPATH that was used to build the agent.
+ Env EnvMap
+
+ // Editor holds data about the editor
+ Editor EditorProps
+
+ // Config holds config data for the editor to use
+ Config EditorConfig `mg.Nillable:"true"`
+}
+
+// State holds data about the state of the editor, and transformations made by reducers
+//
+// All fields are readonly and should only be assigned to during a call to State.Copy()
+// Methods on this object that return *State, return a new object.
+// As an optimization/implementation details, the methods may choose to return
+// the input state object if no updates are done.
+//
+// New instances can be obtained through Store.NewCtx()
+//
+// Except StickyState, all fields are cleared at the start of a new dispatch.
+// Fields that to be present for some time, e.g. Status and Issues,
+// Should be populated at each call to the reducer
+// even if the action is not its primary action.
+// e.g. for linters, they should kill off a goroutine to do a compilation
+// after the file has been saved (ViewSaved) but always return its cached issues.
+//
+// If a reducer fails to return their state unless their primary action is dispatched
+// it could result in flickering in the editor for visible elements like the status
+type State struct {
+ // StickyState holds the current state of the editor
+ StickyState
+
+ // Status holds the list of status messages to show in the view
+ Status StrSet
+
+ // Errors hold the list of error to display to the user
+ Errors StrSet
+
+ // Completions holds the list of completions to show to the user
+ Completions []Completion
+
+ // Issues holds the list of issues to present to the user
+ Issues IssueSet
+
+ // BuiltinCmds holds the list of builtin commands.
+ // It's usually populated during the RunCmd action.
+ BuiltinCmds BuiltinCmdList
+
+ // UserCmds holds the list of user commands.
+ // It's usually populated during the QueryUserCmds and QueryTestCmds actions.
+ UserCmds UserCmdList
+
+ // Tooltips is a list of tips to show the user
+ Tooltips []Tooltip
+
+ // HUD contains information to the displayed to the user
+ HUD HUDState
+
+ // clientActions is a list of client actions to dispatch in the editor
+ clientActions []actions.ClientData
+}
+
+// ActionLabel returns a label for the actions act.
+// It takes into account mg.Render being an alias for nil.
+func ActionLabel(act Action) string {
+ t := reflect.TypeOf(act)
+ if t != nil {
+ if s := act.ActionLabel(); s != "" {
+ return s
+ }
+ return t.String()
+ }
+ return "mg.Render"
+}
+
+// new creates a new State sharing State.StickyState
+func (st *State) new() *State {
+ return &State{StickyState: st.StickyState}
+}
+
+// Copy create a shallow copy of the State.
+//
+// It applies the functions in updaters to the new object.
+// Updating the new State via these functions is preferred to assigning to the new object
+func (st *State) Copy(updaters ...func(*State)) *State {
+ x := *st
+ st = &x
+
+ for _, f := range updaters {
+ f(st)
+ }
+ return st
+}
+
+// AddHUD adds a new article to State.HUD
+func (st *State) AddHUD(heading htm.IElement, content ...htm.Element) *State {
+ return st.Copy(func(st *State) {
+ st.HUD = st.HUD.AddArticle(heading, content...)
+ })
+}
+
+// AddTooltips add the list of tooltips l to State.Tooltips
+func (st *State) AddTooltips(l ...Tooltip) *State {
+ if len(l) == 0 {
+ return st
+ }
+ return st.Copy(func(st *State) {
+ st.Tooltips = append(st.Tooltips[:len(st.Tooltips):len(st.Tooltips)], l...)
+ })
+}
+
+// AddStatusf is equivalent to State.AddStatus(fmt.Sprintf())
+func (st *State) AddStatusf(format string, a ...interface{}) *State {
+ return st.AddStatus(fmt.Sprintf(format, a...))
+}
+
+// AddStatus adds the list of messages in l to State.Status.
+func (st *State) AddStatus(l ...string) *State {
+ if len(l) == 0 {
+ return st
+ }
+ return st.Copy(func(st *State) {
+ st.Status = st.Status.Add(l...)
+ })
+}
+
+// AddErrorf is equivalent to State.AddError(fmt.Sprintf())
+func (st *State) AddErrorf(format string, a ...interface{}) *State {
+ return st.AddError(fmt.Errorf(format, a...))
+}
+
+// AddError adds the non-nil errors in l to State.Errors.
+func (st *State) AddError(l ...error) *State {
+ if len(l) == 0 {
+ return st
+ }
+ return st.Copy(func(st *State) {
+ for _, e := range l {
+ if e != nil {
+ st.Errors = st.Errors.Add(e.Error())
+ }
+ }
+ })
+}
+
+// SetConfig updates the State.Config.
+func (st *State) SetConfig(c EditorConfig) *State {
+ return st.Copy(func(st *State) {
+ st.Config = c
+ })
+}
+
+// SetEnv updates State.Env.
+func (st *State) SetEnv(m EnvMap) *State {
+ return st.Copy(func(st *State) {
+ st.Env = m
+ })
+}
+
+// SetSrc is a wrapper around View.SetSrc().
+// If `len(src) == 0` it does nothing because this is almost always a bug.
+func (st *State) SetViewSrc(src []byte) *State {
+ if len(src) == 0 {
+ return st
+ }
+ return st.SetView(st.View.SetSrc(src))
+}
+
+func (st *State) SetView(v *View) *State {
+ if st.View == v {
+ return st
+ }
+ st = st.Copy()
+ st.View = v
+ return st
+}
+
+// AddCompletions adds the completions in l to State.Completions
+func (st *State) AddCompletions(l ...Completion) *State {
+ if len(l) == 0 {
+ return st
+ }
+ return st.Copy(func(st *State) {
+ st.Completions = append(st.Completions[:len(st.Completions):len(st.Completions)], l...)
+ })
+}
+
+// AddIssues adds the list of issues in l to State.Issues
+func (st *State) AddIssues(l ...Issue) *State {
+ if len(l) == 0 {
+ return st
+ }
+ return st.Copy(func(st *State) {
+ st.Issues = st.Issues.Add(l...)
+ })
+}
+
+// AddBuiltinCmds adds the list of builtin commands in l to State.BuiltinCmds
+func (st *State) AddBuiltinCmds(l ...BuiltinCmd) *State {
+ if len(l) == 0 {
+ return st
+ }
+ return st.Copy(func(st *State) {
+ st.BuiltinCmds = append(st.BuiltinCmds[:len(st.BuiltinCmds):len(st.BuiltinCmds)], l...)
+ })
+}
+
+// AddUserCmds adds the list of user commands in l to State.userCmds
+func (st *State) AddUserCmds(l ...UserCmd) *State {
+ if len(l) == 0 {
+ return st
+ }
+ return st.Copy(func(st *State) {
+ st.UserCmds = append(st.UserCmds[:len(st.UserCmds):len(st.UserCmds)], l...)
+ })
+}
+
+// addClientActions adds the list of client actions in l to State.clientActions
+func (st *State) addClientActions(l ...actions.ClientAction) *State {
+ if len(l) == 0 {
+ return st
+ }
+ return st.Copy(func(st *State) {
+ el := make([]actions.ClientData, 0, len(st.clientActions)+len(l))
+ el = append(el, st.clientActions...)
+ for _, ca := range l {
+ el = append(el, ca.ClientAction())
+ }
+ st.clientActions = el
+ })
+}
+
+type clientProps struct {
+ Editor struct {
+ EditorProps
+ Settings codec.Raw
+ }
+ Env EnvMap
+ View *View
+}
+
+func (cp *clientProps) finalize(ag *Agent) {
+ ce := &cp.Editor
+ ep := &cp.Editor.EditorProps
+ ep.handle = ag.handle
+ ep.settings = ce.Settings
+}
+
+func makeClientProps(kvs KVStore) clientProps {
+ return clientProps{
+ Env: EnvMap{},
+ View: newView(kvs),
+ }
+}
diff --git a/src/margo.sh/mg/state_test.go b/src/margo.sh/mg/state_test.go
new file mode 100644
index 00000000..c385f171
--- /dev/null
+++ b/src/margo.sh/mg/state_test.go
@@ -0,0 +1,60 @@
+package mg
+
+import (
+ "reflect"
+ "testing"
+ "time"
+)
+
+func checkNonNil(v interface{}, handler func(sel string)) {
+ checkNonNilVal(reflect.ValueOf(v), "", handler)
+}
+
+func checkNonNilVal(v reflect.Value, sel string, handler func(sel string)) {
+ switch kind := v.Kind(); {
+ case kind == reflect.Ptr && !v.IsNil():
+ checkNonNilVal(v.Elem(), sel, handler)
+ case kind == reflect.Struct:
+ typ := v.Type()
+ switch typ {
+ case reflect.TypeOf(time.Time{}):
+ return
+ }
+
+ for i := 0; i < v.NumField(); i++ {
+ f := v.Field(i)
+ t := typ.Field(i)
+ switch f.Kind() {
+ case reflect.Struct:
+ checkNonNilVal(f, sel+"."+t.Name, handler)
+ case reflect.Ptr, reflect.Interface:
+ if f.IsNil() && t.Tag.Get(`mg.Nillable`) != "true" {
+ handler(sel + "." + t.Name)
+ }
+ }
+ }
+ }
+}
+
+// TestNonNilFields checks that NewAgent() doesn't return a Agent, Store,
+// State, Ctx or View that has nil fields that are not tagged `mg.Nillable:"true"`
+func TestNewAgentNillableFields(t *testing.T) {
+ ag := NewTestingAgent(nil, nil, nil)
+ mx := ag.Store.NewCtx(nil)
+ cases := []interface{}{
+ ag,
+ ag.Store,
+ mx,
+ mx.State,
+ mx.State.View,
+ }
+
+ for _, c := range cases {
+ name := reflect.TypeOf(c).String()
+ t.Run(name, func(t *testing.T) {
+ checkNonNil(c, func(sel string) {
+ t.Errorf("(%s)%s is nil but is not tagged mg.Nillable", name, sel)
+ })
+ })
+ }
+}
diff --git a/src/margo.sh/mg/store.go b/src/margo.sh/mg/store.go
new file mode 100644
index 00000000..9c74b7c8
--- /dev/null
+++ b/src/margo.sh/mg/store.go
@@ -0,0 +1,356 @@
+package mg
+
+import (
+ "margo.sh/mgpf"
+ yotsuba "margo.sh/why_would_you_make_yotsuba_cry"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+var _ Dispatcher = (&Store{}).Dispatch
+
+// Dispatcher is the signature of the Store.Dispatch method
+type Dispatcher func(Action)
+
+// Subscriber is the signature of the function accepted by Store.Subscribe
+type Subscriber func(*Ctx)
+
+type dispatchHandler func()
+
+type storeReducers struct {
+ before reducerList
+ use reducerList
+ after reducerList
+}
+
+func (sr storeReducers) reduction(mx *Ctx) *Ctx {
+ mx.Profile.Do("Before", func() {
+ mx = sr.before.reduction(mx)
+ })
+ mx.Profile.Do("Use", func() {
+ mx = sr.use.reduction(mx)
+ })
+ mx.Profile.Do("After", func() {
+ mx = sr.after.reduction(mx)
+ })
+ return mx.defr.reduction(mx)
+}
+
+func (sr storeReducers) Copy(updaters ...func(*storeReducers)) storeReducers {
+ for _, f := range updaters {
+ f(&sr)
+ }
+ return sr
+}
+
+// Store holds global, shared state
+type Store struct {
+ // KVMap is an in-memory cache of data with automatic eviction.
+ // Eviction might happen if the active view changes.
+ //
+ // NOTE: it's not safe to store values with *Ctx objects here; use *Ctx.KVMap instead
+ KVMap
+
+ mu sync.Mutex
+ state *State
+ subs []*struct{ Subscriber }
+ sub Subscriber
+ reducers struct {
+ sync.Mutex
+ storeReducers
+ }
+ cfg EditorConfig `mg.Nillable:"true"`
+ ag *Agent
+ tasks *taskTracker
+ cache struct {
+ sync.RWMutex
+ vName string
+ vHash string
+ }
+
+ dsp struct {
+ sync.RWMutex
+ lo chan dispatchHandler
+ hi chan dispatchHandler
+ unmounted bool
+ }
+}
+
+func (sto *Store) mount() {
+ go sto.dispatcher()
+}
+
+func (sto *Store) unmount() {
+ done := make(chan struct{})
+ sto.dsp.hi <- func() {
+ defer close(done)
+
+ sto.dsp.Lock()
+ defer sto.dsp.Unlock()
+
+ if sto.dsp.unmounted {
+ return
+ }
+ sto.dsp.unmounted = true
+
+ sto.handleAct(unmount{}, nil)
+ }
+ <-done
+}
+
+// Dispatch schedules a new reduction with Action act
+//
+// * actions coming from the editor has a higher priority
+// * as a result, if Shutdown is dispatched, the action might be dropped
+func (sto *Store) Dispatch(act Action) {
+ c := sto.dsp.lo
+ f := func() { sto.handleAct(act, nil) }
+ select {
+ case c <- f:
+ default:
+ go func() { c <- f }()
+ }
+}
+
+func (sto *Store) nextDispatcher() dispatchHandler {
+ var h dispatchHandler
+ select {
+ case h = <-sto.dsp.hi:
+ default:
+ select {
+ case h = <-sto.dsp.hi:
+ case h = <-sto.dsp.lo:
+ }
+ }
+
+ sto.dsp.RLock()
+ defer sto.dsp.RUnlock()
+
+ if sto.dsp.unmounted {
+ return nil
+ }
+ return h
+}
+
+func (sto *Store) dispatcher() {
+ sto.ag.Log.Println("started")
+ sto.handleAct(initAction{}, nil)
+
+ for {
+ if f := sto.nextDispatcher(); f != nil {
+ f()
+ } else {
+ return
+ }
+ }
+}
+
+func (sto *Store) handleReduction(mx *Ctx, cookie string, pf *mgpf.Profile) *Ctx {
+ for mx.Acts.i = 0; mx.Acts.i < len(mx.Acts.l); mx.Acts.i++ {
+ st := mx.State.new()
+ st.Errors = mx.State.Errors
+ mx = newCtx(sto, st, mx.Acts, cookie, pf, mx.KVMap)
+ mx.Profile.Do("action|"+ActionLabel(mx.Action), func() {
+ mx = sto.reducers.reduction(mx)
+ })
+ }
+ return mx
+}
+
+func (sto *Store) handle(h func() *Ctx, p *mgpf.Profile) {
+ p.Push("handleRequest")
+ sto.mu.Lock()
+
+ mx := h()
+ sto.state = mx.State
+ subs := sto.subs
+
+ sto.mu.Unlock()
+ p.Pop()
+
+ for _, p := range subs {
+ p.Subscriber(mx)
+ }
+}
+
+func (sto *Store) handleAct(act Action, p *mgpf.Profile) {
+ if p == nil {
+ p = mgpf.NewProfile("")
+ }
+ sto.handle(func() *Ctx {
+ mx := newCtx(sto, nil, &ctxActs{l: []Action{act}}, "", p, nil)
+ return sto.handleReduction(mx, "", p)
+ }, p)
+}
+
+func (sto *Store) handleReq(rq *agentReq) {
+ sto.handle(func() *Ctx {
+ mx := sto.handleReqInit(rq, newCtx(sto, nil, nil, rq.Cookie, rq.Profile, nil))
+ return sto.handleReduction(mx, rq.Cookie, rq.Profile)
+ }, rq.Profile)
+}
+
+func (sto *Store) handleReqInit(rq *agentReq, mx *Ctx) *Ctx {
+ defer mx.Profile.Push("init").Pop()
+
+ if mx.Acts == nil {
+ mx.Acts = &ctxActs{l: make([]Action, 0, len(rq.Actions))}
+ }
+ for _, ra := range rq.Actions {
+ act, err := sto.ag.createAction(ra)
+ if err != nil {
+ mx.State = mx.AddErrorf("createAction(%s): %s", ra.Name, err)
+ } else {
+ mx.Acts.l = append(mx.Acts.l, act)
+ }
+ }
+
+ if cfg := sto.cfg; cfg != nil {
+ mx.Config = cfg
+ }
+ props := rq.Props
+ if ep := props.Editor.EditorProps; ep.Name != "" {
+ mx.Editor = ep
+ }
+ if v := props.View; v != nil && v.Name != "" {
+ mx.View = v
+ sto.initCache(v)
+ v.finalize()
+ }
+ if len(props.Env) != 0 {
+ mx.Env = props.Env
+ }
+ mx.Env = sto.autoSwitchInternalGOPATH(mx)
+ return mx
+}
+
+// autoSwitchInternalGOPATH returns mx.Env with GOPATH set to the agent's GOPATH
+// if mx.View.Filename is a child of said GOPATH
+func (sto *Store) autoSwitchInternalGOPATH(mx *Ctx) EnvMap {
+ fn := mx.View.Path
+ if fn == "" {
+ return mx.Env
+ }
+ gp := yotsuba.AgentBuildContext.GOPATH
+ for _, dir := range strings.Split(gp, string(filepath.ListSeparator)) {
+ if IsParentDir(dir, fn) {
+ return mx.Env.Add("GOPATH", gp)
+ }
+ }
+ return mx.Env
+}
+
+// NewCtx returns a new Ctx initialized using the internal StickyState.
+// The caller is responsible for calling Ctx.Cancel() when done with the Ctx
+func (sto *Store) NewCtx(act Action) *Ctx {
+ sto.mu.Lock()
+ defer sto.mu.Unlock()
+
+ return newCtx(sto, nil, &ctxActs{l: []Action{act}}, "", nil, nil)
+}
+
+func newStore(ag *Agent, sub Subscriber) *Store {
+ sto := &Store{
+ sub: sub,
+ ag: ag,
+ }
+ sto.state = &State{
+ StickyState: StickyState{View: newView(sto)},
+ }
+ sto.tasks = &taskTracker{}
+ sto.After(sto.tasks)
+
+ // 640 slots ought to be enough for anybody
+ sto.dsp.lo = make(chan dispatchHandler, 640)
+ sto.dsp.hi = make(chan dispatchHandler, 640)
+
+ return sto
+}
+
+// Subscribe arranges for sub to be called after each reduction takes place
+// the function returned can be used to unsubscribe from further notifications
+func (sto *Store) Subscribe(sub Subscriber) (unsubscribe func()) {
+ sto.mu.Lock()
+ defer sto.mu.Unlock()
+
+ p := &struct{ Subscriber }{sub}
+ sto.subs = append(sto.subs[:len(sto.subs):len(sto.subs)], p)
+
+ return func() {
+ sto.mu.Lock()
+ defer sto.mu.Unlock()
+
+ subs := make([]*struct{ Subscriber }, 0, len(sto.subs)-1)
+ for _, q := range sto.subs {
+ if p != q {
+ subs = append(subs, q)
+ }
+ }
+ sto.subs = subs
+ }
+}
+
+func (sto *Store) updateReducers(updaters ...func(*storeReducers)) *Store {
+ sto.reducers.Lock()
+ defer sto.reducers.Unlock()
+
+ sto.reducers.storeReducers = sto.reducers.Copy(updaters...)
+ return sto
+}
+
+// Before adds reducers to the list of reducers
+// they're are called before normal (Store.Use) reducers
+func (sto *Store) Before(reducers ...Reducer) *Store {
+ return sto.updateReducers(func(sr *storeReducers) {
+ sr.before = sr.before.Add(reducers...)
+ })
+}
+
+// Use adds reducers to the list of reducers
+// they're called after reducers added with Store.Before
+// and before reducers added with Store.After
+func (sto *Store) Use(reducers ...Reducer) *Store {
+ return sto.updateReducers(func(sr *storeReducers) {
+ sr.use = sr.use.Add(reducers...)
+ })
+}
+
+// After adds reducers to the list of reducers
+// they're are called after normal (Store.Use) reducers
+func (sto *Store) After(reducers ...Reducer) *Store {
+ return sto.updateReducers(func(sr *storeReducers) {
+ sr.after = sr.after.Add(reducers...)
+ })
+}
+
+// SetBaseConfig sets the EditorConfig on which State.Config is based
+//
+// this method is made available for use by editor/client integration
+// normal users should use State.SetConfig instead
+func (sto *Store) SetBaseConfig(cfg EditorConfig) *Store {
+ sto.mu.Lock()
+ defer sto.mu.Unlock()
+
+ sto.cfg = cfg
+ return sto
+}
+
+// Begin starts a new task and returns its ticket
+func (sto *Store) Begin(t Task) *TaskTicket {
+ return sto.tasks.Begin(t)
+}
+
+func (sto *Store) initCache(v *View) {
+ cc := &sto.cache
+ cc.Lock()
+ defer cc.Unlock()
+
+ if cc.vHash == v.Hash && cc.vName == v.Name {
+ return
+ }
+
+ sto.KVMap.Clear()
+ cc.vHash = v.Hash
+ cc.vName = v.Name
+}
diff --git a/src/margo.sh/mg/tasks.go b/src/margo.sh/mg/tasks.go
new file mode 100644
index 00000000..492b6c81
--- /dev/null
+++ b/src/margo.sh/mg/tasks.go
@@ -0,0 +1,291 @@
+package mg
+
+import (
+ "bytes"
+ "fmt"
+ "margo.sh/mgpf"
+ "margo.sh/mgutil"
+ "sync"
+ "time"
+)
+
+type Task struct {
+ Title string
+ Cancel func()
+ CancelID string
+ ShowNow bool
+ NoEcho bool
+}
+
+type TaskTicket struct {
+ Task
+ ID string
+ Start time.Time
+
+ tracker *taskTracker
+}
+
+func (ti *TaskTicket) Done() {
+ if ti.tracker != nil {
+ ti.tracker.done(ti.ID)
+ }
+}
+
+func (ti *TaskTicket) Cancel() {
+ if f := ti.Task.Cancel; f != nil {
+ f()
+ }
+}
+
+func (ti *TaskTicket) Cancellable() bool {
+ return ti.Task.Cancel != nil
+}
+
+type taskTracker struct {
+ ReducerType
+ mu sync.Mutex
+ id uint64
+ tickets []*TaskTicket
+ buf bytes.Buffer
+ dispatch Dispatcher
+ status string
+ timer *time.Timer
+}
+
+func (tr *taskTracker) RInit(mx *Ctx) {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+
+ tr.dispatch = mx.Store.Dispatch
+}
+
+func (tr *taskTracker) RUnmount(*Ctx) {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+
+ for _, t := range tr.tickets {
+ t.Cancel()
+ }
+}
+
+func (tr *taskTracker) Reduce(mx *Ctx) *State {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+
+ st := mx.State
+ switch mx.Action.(type) {
+ case RunCmd:
+ st = tr.runCmd(st)
+ case QueryUserCmds:
+ st = tr.userCmds(st)
+ }
+ if tr.status != "" {
+ st = st.AddStatus(tr.status)
+ }
+ return st
+}
+
+func (tr *taskTracker) resetTimer() {
+ d := 1 * time.Second
+ if tr.timer == nil {
+ tr.timer = time.NewTimer(d)
+ go tr.ticker()
+ } else {
+ tr.timer.Reset(d)
+ }
+}
+
+func (tr *taskTracker) ticker() {
+ for range tr.timer.C {
+ tr.tick()
+ }
+}
+
+func (tr *taskTracker) tick() {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+
+ status := tr.render()
+ if status != tr.status {
+ tr.status = status
+ if disp := tr.dispatch; disp != nil {
+ disp(Render)
+ }
+ }
+ if len(tr.tickets) != 0 {
+ tr.resetTimer()
+ }
+}
+
+func (tr *taskTracker) userCmds(st *State) *State {
+ cl := make([]UserCmd, len(tr.tickets))
+ now := time.Now()
+ for i, t := range tr.tickets {
+ c := UserCmd{Name: ".kill"}
+ dur := mgpf.D(now.Sub(t.Start))
+ if t.Cancellable() {
+ c.Args = []string{t.CancelID}
+ c.Title = "Task: Cancel " + t.Title
+ c.Desc = fmt.Sprintf("elapsed: %s, cmd: `%s`", dur, mgutil.QuoteCmd(c.Name, c.Args...))
+ } else {
+ c.Title = "Task: " + t.Title
+ c.Desc = fmt.Sprintf("elapsed: %s", dur)
+ }
+ cl[i] = c
+ }
+ return st.AddUserCmds(cl...)
+}
+
+func (tr *taskTracker) runCmd(st *State) *State {
+ return st.AddBuiltinCmds(
+ BuiltinCmd{
+ Name: ".kill",
+ Desc: "List and cancel active tasks",
+ Run: tr.killBuiltin,
+ },
+ )
+}
+
+// Cancel cancels the task tid.
+// true is returned if the task exists and was canceled
+func (tr *taskTracker) Cancel(tid string) bool {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+
+ return tr.cancel(tid)
+}
+
+func (tr *taskTracker) cancel(tid string) bool {
+ for _, t := range tr.tickets {
+ if t.ID == tid || t.CancelID == tid {
+ t.Cancel()
+ return t.Cancellable()
+ }
+ }
+ return false
+}
+
+func (tr *taskTracker) killBuiltin(cx *CmdCtx) *State {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+
+ defer cx.Output.Close()
+ if len(cx.Args) == 0 {
+ tr.listAll(cx)
+ } else {
+ tr.killAll(cx)
+ }
+
+ return cx.State
+}
+
+func (tr *taskTracker) killAll(cx *CmdCtx) {
+ buf := &bytes.Buffer{}
+ for _, tid := range cx.Args {
+ fmt.Fprintf(buf, "%s: %v\n", tid, tr.cancel(tid))
+ }
+ cx.Output.Write(buf.Bytes())
+}
+
+func (tr *taskTracker) listAll(cx *CmdCtx) {
+ buf := &bytes.Buffer{}
+ for _, t := range tr.tickets {
+ id := t.ID
+ if t.CancelID != "" {
+ id += "|" + t.CancelID
+ }
+
+ dur := time.Since(t.Start)
+ if dur < time.Second {
+ dur = dur.Round(time.Millisecond)
+ } else {
+ dur = dur.Round(time.Second)
+ }
+
+ fmt.Fprintf(buf, "ID: %s, Dur: %s, Title: %s\n", id, dur, t.Title)
+ }
+ cx.Output.Write(buf.Bytes())
+}
+
+func (tr *taskTracker) render() string {
+ if len(tr.tickets) == 0 {
+ return ""
+ }
+ now := time.Now()
+ visible := false
+ showAnim := false
+ title := ""
+ for _, t := range tr.tickets {
+ dur := now.Sub(t.Start)
+ if dur < 1*time.Second {
+ continue
+ }
+ visible = true
+ if t.NoEcho || t.Title == "" {
+ continue
+ }
+ if dur < 16*time.Second {
+ showAnim = true
+ }
+ if dur < 8*time.Second {
+ title = t.Title
+ break
+ }
+ }
+ if !visible {
+ return ""
+ }
+ tr.buf.Reset()
+ tr.buf.WriteString("Tasks ")
+ digits := mgutil.SecondaryDigits
+ if now.Second()%2 == 0 || !showAnim {
+ digits = mgutil.PrimaryDigits
+ }
+ digits.DrawInto(len(tr.tickets), &tr.buf)
+ if title != "" {
+ tr.buf.WriteByte(' ')
+ tr.buf.WriteString(title)
+ }
+ return tr.buf.String()
+}
+
+func (tr *taskTracker) done(id string) {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+
+ l := make([]*TaskTicket, 0, len(tr.tickets)-1)
+ for _, t := range tr.tickets {
+ if t.ID != id {
+ l = append(l, t)
+ }
+ }
+ tr.tickets = l
+}
+
+func (tr *taskTracker) Begin(o Task) *TaskTicket {
+ tr.mu.Lock()
+ defer tr.mu.Unlock()
+
+ if cid := o.CancelID; cid != "" {
+ for _, t := range tr.tickets {
+ if t.CancelID == cid {
+ t.Cancel()
+ }
+ }
+ }
+
+ tr.id++
+ id := fmt.Sprintf("@%d", tr.id)
+ if o.CancelID == "" {
+ o.CancelID = id
+ }
+ t := &TaskTicket{
+ Task: o,
+ ID: id,
+ Start: time.Now(),
+ tracker: tr,
+ }
+ tr.tickets = append(tr.tickets, t)
+ tr.resetTimer()
+ return t
+}
diff --git a/src/disposa.blue/margo/mg/tempdir.go b/src/margo.sh/mg/tempdir.go
similarity index 100%
rename from src/disposa.blue/margo/mg/tempdir.go
rename to src/margo.sh/mg/tempdir.go
diff --git a/src/margo.sh/mg/testutil.go b/src/margo.sh/mg/testutil.go
new file mode 100644
index 00000000..1b702d5e
--- /dev/null
+++ b/src/margo.sh/mg/testutil.go
@@ -0,0 +1,53 @@
+package mg
+
+import (
+ "go/build"
+ "io"
+ "margo.sh/mgutil"
+)
+
+// NewTestingAgent creates a new agent for testing
+//
+// The agent config used is equivalent to:
+// * Codec: DefaultCodec
+// * Stdin: stdin or &mgutil.IOWrapper{} if nil
+// * Stdout: stdout or &mgutil.IOWrapper{} if nil
+// * Stderr: stderr or &mgutil.IOWrapper{} if nil
+//
+// * State.Env is set to mgutil.EnvMap{
+// * "GOROOT": build.Default.GOROOT,
+// * "GOPATH": build.Default.GOPATH,
+// * }
+func NewTestingAgent(stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) *Agent {
+ if stdin == nil {
+ stdin = &mgutil.IOWrapper{}
+ }
+ if stdout == nil {
+ stdout = &mgutil.IOWrapper{}
+ }
+ if stderr == nil {
+ stderr = &mgutil.IOWrapper{}
+ }
+ ag, _ := NewAgent(AgentConfig{
+ Stdin: stdin,
+ Stdout: stdout,
+ Stderr: stderr,
+ })
+ ag.Store.state = ag.Store.state.SetEnv(mgutil.EnvMap{
+ "GOROOT": build.Default.GOROOT,
+ "GOPATH": build.Default.GOPATH,
+ })
+ return ag
+}
+
+// NewTestingStore creates a new Store for testing
+// It's equivalent to NewTestingAgent().Store
+func NewTestingStore() *Store {
+ return NewTestingAgent(nil, nil, nil).Store
+}
+
+// NewTestingCtx creates a new Ctx for testing
+// It's equivalent to NewTestingStore().NewCtx()
+func NewTestingCtx(act Action) *Ctx {
+ return NewTestingStore().NewCtx(act)
+}
diff --git a/src/margo.sh/mg/tooltips.go b/src/margo.sh/mg/tooltips.go
new file mode 100644
index 00000000..c4ac3039
--- /dev/null
+++ b/src/margo.sh/mg/tooltips.go
@@ -0,0 +1,5 @@
+package mg
+
+type Tooltip struct {
+ Content string
+}
diff --git a/src/margo.sh/mg/usercmds.go b/src/margo.sh/mg/usercmds.go
new file mode 100644
index 00000000..29d53441
--- /dev/null
+++ b/src/margo.sh/mg/usercmds.go
@@ -0,0 +1,48 @@
+package mg
+
+// QueryUserCmds is the action dispatched to get a list of UserCmds.
+type QueryUserCmds struct{ ActionType }
+
+// QueryTestCmds is the action dispatched to get a list of UserCmds for testing, benchmarking, etc.
+type QueryTestCmds struct{ ActionType }
+
+// UserCmdList is a list of UserCmd
+type UserCmdList []UserCmd
+
+// Len implements sort.Interface
+func (uc UserCmdList) Len() int {
+ return len(uc)
+}
+
+// Len implements sort.Interface using UserCmd.Title for comparison
+func (uc UserCmdList) Less(i, j int) bool {
+ return uc[i].Title < uc[j].Title
+}
+
+// Len implements sort.Interface
+func (uc UserCmdList) Swap(i, j int) {
+ uc[i], uc[j] = uc[j], uc[i]
+}
+
+// UserCmd represents a command that may be displayed in the editor ui.
+type UserCmd struct {
+ // Title is the name of the command displayed to the user
+ Title string
+
+ // Desc describes what the command invocation does
+ Desc string
+
+ // Name is the name of the command to run
+ Name string
+
+ // Args is a list of args to pass to the command
+ Args []string
+
+ // Dir is preferred directory in which the command should run.
+ Dir string
+
+ // Prompts is a list of titles for prompting the user for input before running the command.
+ // The user is prompted once for each entry.
+ // The inputs are assigned directly to RunCmd.Prompts for command consumption.
+ Prompts []string
+}
diff --git a/src/margo.sh/mg/vfs.go b/src/margo.sh/mg/vfs.go
new file mode 100644
index 00000000..0325ce70
--- /dev/null
+++ b/src/margo.sh/mg/vfs.go
@@ -0,0 +1,98 @@
+package mg
+
+import (
+ "fmt"
+ hmnz "github.com/dustin/go-humanize"
+ "margo.sh/vfs"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ VFS = vfs.New()
+)
+
+type vfsCmd struct{ ReducerType }
+
+func (vc *vfsCmd) Reduce(mx *Ctx) *State {
+ v := mx.View
+ switch mx.Action.(type) {
+ case ViewModified, ViewLoaded:
+ mx.VFS.Invalidate(v.Name)
+ case ViewSaved:
+ mx.VFS.Invalidate(v.Name)
+ mx.VFS.Invalidate(v.Filename())
+ case RunCmd:
+ return mx.AddBuiltinCmds(
+ BuiltinCmd{
+ Name: ".vfs",
+ Desc: "Print a tree representing the default VFS",
+ Run: func(cx *CmdCtx) *State {
+ go vc.cmdVfs(cx)
+ return cx.State
+ },
+ },
+ BuiltinCmd{
+ Name: ".vfs-blobs",
+ Desc: "Print a list, and summary of, blobs (file contents) cached in the VFS.",
+ Run: func(cx *CmdCtx) *State {
+ go vc.cmdVfsBlobs(cx)
+ return cx.State
+ },
+ },
+ )
+ }
+ return mx.State
+}
+
+func (vc *vfsCmd) cmdVfs(cx *CmdCtx) {
+ defer cx.Output.Close()
+
+ if len(cx.Args) == 0 {
+ cx.VFS.Print(cx.Output)
+ return
+ }
+
+ for _, p := range cx.Args {
+ nd, pat := &cx.VFS.Node, p
+ if filepath.IsAbs(p) {
+ nd, pat = cx.VFS.Peek(filepath.Dir(p)), filepath.Base(p)
+ }
+ nd.PrintWithFilter(cx.Output, func(nd *vfs.Node) string {
+ if nd.IsBranch() {
+ return nd.String()
+ }
+ if ok, _ := filepath.Match(pat, nd.Name()); ok {
+ return nd.String()
+ }
+ return ""
+ })
+ }
+}
+
+func (vc *vfsCmd) cmdVfsBlobs(cx *CmdCtx) {
+ defer cx.Output.Close()
+
+ files := int64(0)
+ size := uint64(0)
+ cx.VFS.PrintWithFilter(cx.Output, func(nd *vfs.Node) string {
+ if nd.IsBranch() {
+ return nd.String()
+ }
+ nm := []string{}
+ for _, b := range vfs.Blobs(nd) {
+ files++
+ sz := uint64(b.Len())
+ size += sz
+ nm = append(nm, fmt.Sprintf("%s (%s)", nd.String(), hmnz.IBytes(sz)))
+ }
+ return strings.Join(nm, ", ")
+ })
+ fmt.Fprintf(cx.Output, "\n%s files (%s) cached in memory.",
+ hmnz.Comma(files), hmnz.IBytes(size),
+ )
+}
+
+func init() {
+ DefaultReducers.Before(&vfsCmd{})
+}
diff --git a/src/margo.sh/mg/view.go b/src/margo.sh/mg/view.go
new file mode 100644
index 00000000..858d55a0
--- /dev/null
+++ b/src/margo.sh/mg/view.go
@@ -0,0 +1,198 @@
+package mg
+
+import (
+ "bytes"
+ "encoding/base64"
+ "golang.org/x/crypto/blake2b"
+ "io"
+ "io/ioutil"
+ "margo.sh/mgutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "unicode/utf8"
+)
+
+type View struct {
+ Path string
+ Wd string
+ Name string
+ Hash string
+ Src []byte
+ Pos int
+ Row int
+ Col int
+ Dirty bool
+ Ext string
+ Lang Lang
+
+ changed int
+ kvs KVStore
+}
+
+func newView(kvs KVStore) *View {
+ return &View{kvs: kvs}
+}
+
+func (v *View) Copy(updaters ...func(*View)) *View {
+ x := *v
+ for _, f := range updaters {
+ f(&x)
+ }
+ return &x
+}
+
+func (v *View) LangIs(langs ...Lang) bool {
+ if langs == nil {
+ return true
+ }
+ for _, l := range langs {
+ if l == v.Lang {
+ return true
+ }
+ }
+ return false
+}
+
+func (v *View) Dir() string {
+ if v.Path != "" {
+ return filepath.Dir(v.Path)
+ }
+ return v.Wd
+}
+
+func (v *View) Basename() string {
+ if v.Path != "" {
+ return filepath.Base(v.Path)
+ }
+ return v.Name
+}
+
+func (v *View) ShortFilename() string {
+ return v.ShortFn(nil)
+}
+
+func (v *View) ShortFn(env mgutil.EnvMap) string {
+ return mgutil.ShortFn(v.Filename(), env)
+}
+
+func (v *View) Filename() string {
+ if v.Path != "" {
+ return v.Path
+ }
+ return filepath.Join(v.Wd, v.Name)
+}
+
+func (v *View) key() interface{} {
+ type Key struct{ Hash string }
+ return Key{v.Hash}
+}
+
+func (v *View) src() (src []byte, ok bool) {
+ src = v.Src
+ if len(src) != 0 {
+ return src, true
+ }
+
+ if v.kvs != nil {
+ src, _ = v.kvs.Get(v.key()).([]byte)
+ }
+
+ if v.Path == "" || v.Dirty || len(src) != 0 {
+ return src, true
+ }
+
+ return nil, false
+}
+
+func (v *View) ReadAll() ([]byte, error) {
+ if src, ok := v.src(); ok {
+ return src, nil
+ }
+
+ r, err := v.Open()
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+
+ src, err := ioutil.ReadAll(r)
+ if err == nil && v.kvs != nil {
+ v.kvs.Put(v.key(), src)
+ }
+
+ return src, err
+}
+
+// SrcPos returns the view's src and pos taking care of reading src and clamping pos
+func (v *View) SrcPos() ([]byte, int) {
+ src, _ := v.ReadAll()
+ return src, mgutil.ClampPos(src, v.Pos)
+}
+
+func (v *View) Valid() bool {
+ return v.Name != ""
+}
+
+func (v *View) Open() (r io.ReadCloser, err error) {
+ if src, ok := v.src(); ok {
+ return ioutil.NopCloser(bytes.NewReader(src)), nil
+ }
+
+ if v.Path == "" {
+ return nil, os.ErrNotExist
+ }
+
+ return os.Open(v.Path)
+}
+
+func (v *View) finalize() {
+ src, err := v.ReadAll()
+ if err != nil {
+ return
+ }
+
+ v.Src = src
+ v.Pos = BytePos(src, v.Pos)
+ lines := bytes.Split(src[:v.Pos], []byte{'\n'})
+ v.Row = len(lines) - 1
+ v.Col = len(lines[len(lines)-1])
+ v.Hash = SrcHash(src)
+ v.Ext = filepath.Ext(v.Filename())
+ v.kvs.Put(v.key(), src)
+}
+
+func (v *View) SetSrc(s []byte) *View {
+ return v.Copy(func(v *View) {
+ v.Pos = 0
+ v.Row = 0
+ v.Col = 0
+ v.Src = s
+ v.Hash = SrcHash(s)
+ v.Dirty = true
+ v.changed++
+ })
+}
+
+func SrcHash(s []byte) string {
+ hash := blake2b.Sum512(s)
+ return "hash:blake2b/Sum512;base64url," + base64.URLEncoding.EncodeToString(hash[:])
+}
+
+// CommonPatterns is equivalent to CommonPatterns(View.Lang)
+func (v *View) CommonPatterns() []*regexp.Regexp {
+ return CommonPatterns(v.Lang)
+}
+
+func BytePos(src []byte, charPos int) int {
+ for i, c := range src {
+ if !utf8.RuneStart(c) {
+ continue
+ }
+ charPos--
+ if charPos < 0 {
+ return i
+ }
+ }
+ return len(src)
+}
diff --git a/src/disposa.blue/margo/mg/view_test.go b/src/margo.sh/mg/view_test.go
similarity index 100%
rename from src/disposa.blue/margo/mg/view_test.go
rename to src/margo.sh/mg/view_test.go
diff --git a/src/margo.sh/mg/virtual-cmds.go b/src/margo.sh/mg/virtual-cmds.go
new file mode 100644
index 00000000..32395a6c
--- /dev/null
+++ b/src/margo.sh/mg/virtual-cmds.go
@@ -0,0 +1,12 @@
+package mg
+
+const (
+ // RcActuate is the command that's run when a user triggeres an action
+ // at the cursor, primarily via the mouse e.g. goto.definition
+ //
+ // Args:
+ // -button="": the action wasn't triggered by a mouse click
+ // -button="left": the action was triggered by the by a left-click
+ // -button="right": the action was triggered by the by a right-click
+ RcActuate = ".actuate"
+)
diff --git a/src/disposa.blue/margo/mgcli/mgcli.go b/src/margo.sh/mgcli/mgcli.go
similarity index 89%
rename from src/disposa.blue/margo/mgcli/mgcli.go
rename to src/margo.sh/mgcli/mgcli.go
index cd9bb1ce..83e741a5 100644
--- a/src/disposa.blue/margo/mgcli/mgcli.go
+++ b/src/margo.sh/mgcli/mgcli.go
@@ -6,6 +6,12 @@ import (
"os"
)
+type Commands struct {
+ Name string
+ Build *cli.Command
+ Run *cli.Command
+}
+
type App struct{ cli.App }
func (a *App) RunAndExitOnError() {
diff --git a/src/margo.sh/mgpf/mgpf.go b/src/margo.sh/mgpf/mgpf.go
new file mode 100644
index 00000000..a38e2ba7
--- /dev/null
+++ b/src/margo.sh/mgpf/mgpf.go
@@ -0,0 +1,212 @@
+package mgpf
+
+import (
+ "fmt"
+ "io"
+ "margo.sh/mgutil"
+ "sync"
+ "time"
+)
+
+var (
+ enabled = &mgutil.AtomicBool{}
+
+ DefaultPrintOpts = PrintOpts{
+ Indent: " ",
+ }
+)
+
+func Enabled() bool {
+ return enabled.IsSet()
+}
+
+func Enable() {
+ enabled.Set(true)
+}
+
+func Disable() {
+ enabled.Set(false)
+}
+
+type Dur struct {
+ time.Duration
+}
+
+func (d Dur) String() string {
+ p := d.Duration
+ switch {
+ case p < time.Millisecond:
+ return p.Round(time.Microsecond).String()
+ case p < time.Minute:
+ return p.Round(time.Millisecond).String()
+ default:
+ return p.Round(time.Second).String()
+ }
+}
+
+func D(d time.Duration) Dur {
+ return Dur{Duration: d}
+}
+
+type PrintOpts struct {
+ Prefix string
+ Indent string
+ MinDuration time.Duration
+}
+
+type Node struct {
+ Name string
+ Duration time.Duration
+ Samples int
+ Children []*Node
+
+ start time.Time
+}
+
+func (n *Node) Dur() Dur {
+ return D(n.duration())
+}
+
+func (n *Node) child(name string) *Node {
+ for _, c := range n.Children {
+ if c.Name == name {
+ return c
+ }
+ }
+ c := &Node{Name: name}
+ n.Children = append(n.Children, c)
+ return c
+}
+
+func (n *Node) duration() time.Duration {
+ d := n.Duration
+ if d > 0 {
+ return d
+ }
+
+ for _, c := range n.Children {
+ d += c.Duration
+ }
+ return d
+}
+
+func (n *Node) fprint(w io.Writer, o PrintOpts) {
+ subTitle := ""
+ if i := n.Samples; i >= 2 {
+ subTitle = fmt.Sprintf("*%d", i)
+ }
+ fmt.Fprintf(w, "%s%s%s %s\n", o.Prefix, n.Name, subTitle, n.Dur())
+
+ o.Prefix += o.Indent
+ for _, c := range n.Children {
+ if c.Duration >= o.MinDuration {
+ c.fprint(w, o)
+ }
+ }
+}
+
+type Profile struct {
+ root *Node
+ stack []*Node
+ mu sync.RWMutex
+}
+
+func (p *Profile) Dur() Dur {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ return p.root.Dur()
+}
+
+func (p *Profile) Do(name string, f func()) {
+ defer p.Push(name).Pop()
+ f()
+}
+
+func (p *Profile) Push(name string) *Profile {
+ p.update(func() {
+ n := p.stack[len(p.stack)-1].child(name)
+ n.start = time.Now()
+ p.stack = append(p.stack, n)
+ })
+ return p
+}
+
+func (p *Profile) Pop() {
+ p.update(func() {
+ n := p.stack[len(p.stack)-1]
+ n.Duration += time.Since(n.start)
+ n.Samples++
+ p.stack = p.stack[:len(p.stack)-1]
+ })
+}
+
+func (p *Profile) Start(name string) *Sample {
+ s := &Sample{t: time.Now(), p: p}
+ p.update(func() {
+ s.n = p.stack[len(p.stack)-1].child(name)
+ })
+ return s
+}
+
+func (p *Profile) Sample(name string, d time.Duration) {
+ p.update(func() {
+ n := p.stack[len(p.stack)-1].child(name)
+ n.Duration += d
+ n.Samples++
+ })
+}
+
+func (p *Profile) Fprint(w io.Writer, opts *PrintOpts) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ o := DefaultPrintOpts
+ if opts != nil {
+ o = *opts
+ if o.Indent == "" {
+ o.Indent = DefaultPrintOpts.Indent
+ }
+ }
+
+ p.root.fprint(w, o)
+}
+
+func (p *Profile) SetName(name string) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ p.root.Name = name
+}
+
+func (p *Profile) update(f func()) {
+ if !Enabled() {
+ return
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ f()
+}
+
+func NewProfile(name string) *Profile {
+ n := &Node{Name: name}
+ return &Profile{root: n, stack: []*Node{n}}
+}
+
+func Since(t time.Time) Dur {
+ return D(time.Since(t))
+}
+
+type Sample struct {
+ t time.Time
+ p *Profile
+ n *Node
+}
+
+func (s *Sample) Stop() {
+ s.p.update(func() {
+ s.n.Duration += time.Since(s.t)
+ s.n.Samples++
+ })
+}
diff --git a/src/disposa.blue/margo/misc/pprof/pprofdo/pprofdo-fallback.go b/src/margo.sh/mgpf/pprof/pprofdo/pprofdo-fallback.go
similarity index 100%
rename from src/disposa.blue/margo/misc/pprof/pprofdo/pprofdo-fallback.go
rename to src/margo.sh/mgpf/pprof/pprofdo/pprofdo-fallback.go
diff --git a/src/disposa.blue/margo/misc/pprof/pprofdo/pprofdo.go b/src/margo.sh/mgpf/pprof/pprofdo/pprofdo.go
similarity index 100%
rename from src/disposa.blue/margo/misc/pprof/pprofdo/pprofdo.go
rename to src/margo.sh/mgpf/pprof/pprofdo/pprofdo.go
diff --git a/src/disposa.blue/margo/misc/pprof/pprofhttp/pprofhttp.go b/src/margo.sh/mgpf/pprof/pprofhttp/pprofhttp.go
similarity index 100%
rename from src/disposa.blue/margo/misc/pprof/pprofhttp/pprofhttp.go
rename to src/margo.sh/mgpf/pprof/pprofhttp/pprofhttp.go
diff --git a/src/margo.sh/mgutil/chanq.go b/src/margo.sh/mgutil/chanq.go
new file mode 100644
index 00000000..11ebc53e
--- /dev/null
+++ b/src/margo.sh/mgutil/chanq.go
@@ -0,0 +1,62 @@
+package mgutil
+
+import (
+ "sync"
+)
+
+// ChanQ is a bounded queue
+type ChanQ struct {
+ c chan interface{}
+ mu sync.Mutex
+ closed bool
+}
+
+// Put puts v into the queue.
+// It removes the oldest value if no space is available.
+func (q *ChanQ) Put(v interface{}) {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+
+ if q.closed {
+ return
+ }
+
+ for {
+ select {
+ case q.c <- v:
+ return
+ case _, open := <-q.c:
+ if !open {
+ return
+ }
+ }
+ }
+}
+
+// C returns a channel on which values are sent
+func (q *ChanQ) C() <-chan interface{} {
+ return q.c
+}
+
+// Close closes the queue and the channel returned by C().
+// closing a closed queue has no effect.
+func (q *ChanQ) Close() {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+
+ if q.closed {
+ return
+ }
+
+ q.closed = true
+ close(q.c)
+}
+
+// NewChanQ creates a new ChanQ
+// if cap is less than 1, it panics
+func NewChanQ(cap int) *ChanQ {
+ if cap < 1 {
+ panic("ChanQ cap must be greater than, or equal to, one")
+ }
+ return &ChanQ{c: make(chan interface{}, cap)}
+}
diff --git a/src/margo.sh/mgutil/chanq_test.go b/src/margo.sh/mgutil/chanq_test.go
new file mode 100644
index 00000000..9f41f261
--- /dev/null
+++ b/src/margo.sh/mgutil/chanq_test.go
@@ -0,0 +1,30 @@
+package mgutil
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestChanQ(t *testing.T) {
+ for _, i := range []int{0, -1} {
+ name := fmt.Sprintf("NewChanQ(%d)", i)
+ t.Run(name, func(t *testing.T) {
+ defer func() {
+ if v := recover(); v == nil {
+ t.Errorf("%s does not result in a panic", name)
+ }
+ }()
+ NewChanQ(i)
+ })
+ }
+
+ cq := NewChanQ(1)
+ lastVal := -1
+ for i := 0; i < 3; i++ {
+ lastVal = i
+ cq.Put(lastVal)
+ }
+ if v := <-cq.C(); v != lastVal {
+ t.Error("CtxQ.Put does not appear to clear the old value")
+ }
+}
diff --git a/src/margo.sh/mgutil/concurrency.go b/src/margo.sh/mgutil/concurrency.go
new file mode 100644
index 00000000..7f67f268
--- /dev/null
+++ b/src/margo.sh/mgutil/concurrency.go
@@ -0,0 +1,10 @@
+package mgutil
+
+import (
+ "runtime"
+)
+
+// MinNumCPU calls Min(runtime.NumCPU(), q...).
+func MinNumCPU(q ...int) int {
+ return Min(runtime.NumCPU(), q...)
+}
diff --git a/src/margo.sh/mgutil/digits.go b/src/margo.sh/mgutil/digits.go
new file mode 100644
index 00000000..922e8d7a
--- /dev/null
+++ b/src/margo.sh/mgutil/digits.go
@@ -0,0 +1,27 @@
+package mgutil
+
+var (
+ PrimaryDigits = DigitDisplay{'🄌', '➊', '➋', '➌', '➍', '➎', '➏', '➐', '➑', '➒'}
+ SecondaryDigits = DigitDisplay{'🄋', '➀', '➁', '➂', '➃', '➄', '➅', '➆', '➇', '➈'}
+)
+
+type RuneWriter interface {
+ WriteRune(rune) (int, error)
+}
+
+type DigitDisplay []rune
+
+func (p DigitDisplay) Draw(n int, f func(rune)) {
+ base := len(p)
+ if n < base {
+ f(p[n])
+ return
+ }
+ m := n / base
+ p.Draw(m, f)
+ f(p[n-m*base])
+}
+
+func (p DigitDisplay) DrawInto(n int, w RuneWriter) {
+ p.Draw(n, func(r rune) { w.WriteRune(r) })
+}
diff --git a/src/margo.sh/mgutil/iowrapper.go b/src/margo.sh/mgutil/iowrapper.go
new file mode 100644
index 00000000..a2709c42
--- /dev/null
+++ b/src/margo.sh/mgutil/iowrapper.go
@@ -0,0 +1,105 @@
+package mgutil
+
+import (
+ "io"
+ "sync"
+)
+
+var (
+ _ io.Reader = (*IOWrapper)(nil)
+ _ io.Writer = (*IOWrapper)(nil)
+ _ io.Closer = (*IOWrapper)(nil)
+ _ io.Reader = (ReaderFunc)(nil)
+ _ io.Writer = (WriterFunc)(nil)
+)
+
+type OutputStream interface {
+ io.Writer
+ io.Closer
+ Flush() error
+}
+
+// IOWrapper implements various optional io interfaces.
+// It delegates to the interface fields that are not nil
+type IOWrapper struct {
+ // If Locker is not nil, all methods are called while holding the lock
+ Locker sync.Locker
+
+ // If Reader is not nil, it will be called to handle reads
+ Reader io.Reader
+
+ // If Writer is not nil, it will be called to handle writes
+ Writer io.Writer
+
+ // If Closer is not nil, it will be called to handle closes
+ Closer io.Closer
+
+ // If Flusher is not nil, it will be called to handle flushes
+ Flusher interface{ Flush() error }
+}
+
+// lockUnlock locks Locker if it's not nil and returns Locker.Unlock
+// otherwise it returns a nop unlock function
+func (iow *IOWrapper) lockUnlock() func() {
+ if mu := iow.Locker; mu != nil {
+ mu.Lock()
+ return mu.Unlock
+ }
+ return func() {}
+}
+
+// Read calls Reader.Read() if Reader is not nil
+// otherwise it returns `0, io.EOF`
+func (iow *IOWrapper) Read(p []byte) (int, error) {
+ defer iow.lockUnlock()()
+
+ if r := iow.Reader; r != nil {
+ return r.Read(p)
+ }
+ return 0, io.EOF
+}
+
+// Write calls Writer.Write() if Writer is not nil
+// otherwise it returns `len(p), nil`
+func (iow *IOWrapper) Write(p []byte) (int, error) {
+ defer iow.lockUnlock()()
+
+ if w := iow.Writer; w != nil {
+ return w.Write(p)
+ }
+ return len(p), nil
+}
+
+// Close calls Closer.Close() if Closer is not nil
+// otherwise it returns `nil`
+func (iow *IOWrapper) Close() error {
+ defer iow.lockUnlock()()
+
+ if c := iow.Closer; c != nil {
+ return c.Close()
+ }
+ return nil
+}
+
+// Flush calls Flushr.Flush() if Flusher is not nil
+// otherwise it returns `nil`
+func (iow *IOWrapper) Flush() error {
+ defer iow.lockUnlock()()
+
+ if f := iow.Flusher; f != nil {
+ return f.Flush()
+ }
+ return nil
+}
+
+// WriterFunc implements io.Writer using a function.
+type WriterFunc func([]byte) (int, error)
+
+// Write calls f(p)
+func (f WriterFunc) Write(p []byte) (int, error) { return f(p) }
+
+// ReaderFunc implements io.Reader using a function.
+type ReaderFunc func([]byte) (int, error)
+
+// Read calls f(p)
+func (f ReaderFunc) Read(p []byte) (int, error) { return f(p) }
diff --git a/src/margo.sh/mgutil/memo.go b/src/margo.sh/mgutil/memo.go
new file mode 100644
index 00000000..d9b8e724
--- /dev/null
+++ b/src/margo.sh/mgutil/memo.go
@@ -0,0 +1,7 @@
+package mgutil
+
+import (
+ "margo.sh/memo"
+)
+
+type Memo = memo.M
diff --git a/src/margo.sh/mgutil/mgutil.go b/src/margo.sh/mgutil/mgutil.go
new file mode 100644
index 00000000..93ecd2ce
--- /dev/null
+++ b/src/margo.sh/mgutil/mgutil.go
@@ -0,0 +1,78 @@
+// Package mgutil is a collections of utility types and functions with no dependency on margo.sh/mg
+package mgutil
+
+import (
+ "strconv"
+ "strings"
+)
+
+// QuoteCmdArg uses strconv.Quote to quote the command arg s.
+// NOTE: the result is for display only, and should not be used for shell security.
+// e.g.
+// `a b c` -> `"a b c"`
+// `abc` -> `abc`
+// `-abc=123` -> `-abc=123`
+// `-abc=1 2 3` -> `-abc="1 2 3"`
+func QuoteCmdArg(s string) string {
+ eqPos := strings.Index(s, "=")
+ switch {
+ case s == "":
+ return `""`
+ case !strings.Contains(s, " "):
+ return s
+ case strings.HasPrefix(s, "-") && eqPos > 0:
+ return s[:eqPos+1] + strconv.Quote(s[eqPos+1:])
+ default:
+ return strconv.Quote(s)
+ }
+}
+
+// QuoteCmd joins `name [args]` with name and each arg quoted with QuoteCmdArg
+// NOTE: the result is for display only, and should not be used for shell security.
+func QuoteCmd(name string, args ...string) string {
+ a := append([]string{name}, args...)
+ for i, s := range a {
+ a[i] = QuoteCmdArg(s)
+ }
+ return strings.Join(a, " ")
+}
+
+// Clamp limits n to the interval [ lo, hi ]
+func Clamp(lo, hi int, n int) int {
+ switch {
+ case n <= lo:
+ return lo
+ case n >= hi:
+ return hi
+ default:
+ return n
+ }
+}
+
+// ClampPos limits pos to the interval [ 0, len(s)-1 ]
+func ClampPos(s []byte, pos int) int {
+ if len(s) == 0 {
+ return 0
+ }
+ return Clamp(0, len(s)-1, pos)
+}
+
+// Max returns the largest of p or q.
+func Max(p int, q ...int) int {
+ for _, q := range q {
+ if q > p {
+ p = q
+ }
+ }
+ return p
+}
+
+// Min returns the smallest of p or q.
+func Min(p int, q ...int) int {
+ for _, q := range q {
+ if q < p {
+ p = q
+ }
+ }
+ return p
+}
diff --git a/src/margo.sh/mgutil/mgutil_test.go b/src/margo.sh/mgutil/mgutil_test.go
new file mode 100644
index 00000000..a88fe180
--- /dev/null
+++ b/src/margo.sh/mgutil/mgutil_test.go
@@ -0,0 +1,63 @@
+package mgutil
+
+import (
+ "testing"
+)
+
+func TestClamp(t *testing.T) {
+ type Case struct{ lo, hi, n, res int }
+
+ test := func(c Case) {
+ t.Helper()
+
+ if got := Clamp(c.lo, c.hi, c.n); got != c.res {
+ t.Errorf("Clamp(%d,%d, %d) should be %d, not %d", c.lo, c.hi, c.n, c.res, got)
+ }
+ }
+
+ test(Case{-1, -1, 0, -1})
+ test(Case{-1, 0, 0, 0})
+ test(Case{-1, 1, 0, 0})
+ test(Case{-1, 1, 1, 1})
+ test(Case{-1, 1, 2, 1})
+}
+
+func TestClampPos(t *testing.T) {
+ type Case struct{ len, pos, res int }
+
+ test := func(c Case) {
+ t.Helper()
+
+ var s []byte
+ if c.len >= 0 {
+ s = make([]byte, c.len)
+ }
+ if got := ClampPos(s, c.pos); got != c.res {
+ t.Errorf("ClampPos(%d, %d) should be %d, not %d", c.len, c.pos, c.res, got)
+ }
+ }
+
+ test(Case{-1, -1, 0})
+ test(Case{-1, 0, 0})
+ test(Case{-1, 1, 0})
+
+ test(Case{0, -1, 0})
+ test(Case{0, 0, 0})
+ test(Case{0, 1, 0})
+
+ test(Case{1, -1, 0})
+ test(Case{1, 0, 0})
+ test(Case{1, 1, 0})
+
+ test(Case{2, -1, 0})
+ test(Case{2, 0, 0})
+ test(Case{2, 1, 1})
+
+ test(Case{3, -1, 0})
+ test(Case{3, 3, 2})
+ test(Case{3, 4, 2})
+
+ test(Case{4, -10, 0})
+ test(Case{4, 0, 0})
+ test(Case{4, 10, 3})
+}
diff --git a/src/margo.sh/mgutil/nocopy.go b/src/margo.sh/mgutil/nocopy.go
new file mode 100644
index 00000000..13c06a70
--- /dev/null
+++ b/src/margo.sh/mgutil/nocopy.go
@@ -0,0 +1,11 @@
+package mgutil
+
+// NoCopy is used to prevent struct copying through `go vet`s -copylocks checker.
+// It's an export of the type `sync.noCopy`
+// See https://golang.org/issues/8005#issuecomment-190753527
+//
+// To prevent struct copying, add a field `noCopy NoCopy` to the struct
+type NoCopy struct{}
+
+// Lock is a no-op used by the `go vet -copylocks` checker.
+func (*NoCopy) Lock() {}
diff --git a/src/margo.sh/mgutil/os.go b/src/margo.sh/mgutil/os.go
new file mode 100644
index 00000000..e595fcaf
--- /dev/null
+++ b/src/margo.sh/mgutil/os.go
@@ -0,0 +1,125 @@
+package mgutil
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// EnvMap is a map of environment variables
+type EnvMap map[string]string
+
+// copy returns a copy of the map
+// sizeHint is a hint about the expected new size of the map
+// if sizeHint is less than 0, it's assumed to be 0
+func (e EnvMap) copy(sizeHint int) EnvMap {
+ n := len(e) + sizeHint
+ if n < 0 {
+ n = 0
+ }
+ m := make(EnvMap, n)
+ for k, v := range e {
+ m[k] = v
+ }
+ return m
+}
+
+// Add is an alias of Set
+func (e EnvMap) Add(k, v string) EnvMap {
+ return e.Set(k, v)
+}
+
+// Set sets the key k in the map to the value v and a the new map
+func (e EnvMap) Set(k, v string) EnvMap {
+ m := e.copy(1)
+ m[k] = v
+ return m
+}
+
+// Unset removes the list of keys from the map and returns the new map
+func (e EnvMap) Unset(keys ...string) EnvMap {
+ m := e.copy(0)
+ for _, k := range keys {
+ delete(m, k)
+ }
+ return m
+}
+
+// Merge merges p into the map and returns a the new map
+func (e EnvMap) Merge(p map[string]string) EnvMap {
+ if len(p) == 0 {
+ return e
+ }
+
+ m := e.copy(len(p))
+ for k, v := range p {
+ m[k] = v
+ }
+ return m
+}
+
+// Environ returns a copy of os.Environ merged with the values in the map
+func (e EnvMap) Environ() []string {
+ el := os.Environ()
+ l := make([]string, 0, len(e)+len(el))
+ for _, s := range el {
+ k := strings.SplitN(s, "=", 2)[0]
+ if _, exists := e[k]; !exists {
+ l = append(l, s)
+ }
+ }
+ for k, v := range e {
+ l = append(l, k+"="+v)
+ }
+ return l
+}
+
+// Get returns the value for k if it exists in the map.
+// If it doesn't exists or is an empty string, def is returned.
+func (e EnvMap) Get(k, def string) string {
+ if v := e[k]; v != "" {
+ return v
+ }
+ return def
+}
+
+// Getenv returns the value for k if it exists in the map or via os.Getenv.
+// If it doesn't exists or is an empty string, def is returned.
+func (e EnvMap) Getenv(k, def string) string {
+ if v := e[k]; v != "" {
+ return v
+ }
+ if v := os.Getenv(k); v != "" {
+ return v
+ }
+ return def
+}
+
+// List is an alias of EventMap.PathList
+func (e EnvMap) List(k string) []string {
+ return e.PathList(k)
+}
+
+// PathList is the equivalent of PathList(e[k])
+func (e EnvMap) PathList(k string) []string {
+ return PathList(e[k])
+}
+
+// PathList splits s by filepath.ListSeparator and returns the list with empty and duplicate components removed
+func PathList(s string) []string {
+ l := strings.Split(s, string(filepath.ListSeparator))
+ j := 0
+ for i, p := range l {
+ if p != "" && !StrSet(l[:i]).Has(p) {
+ l[j] = p
+ j++
+ }
+ }
+ return l[:j:j]
+}
+
+// IsParentDir returns true if parentDir is a parent of childPath
+func IsParentDir(parentDir, childPath string) bool {
+ p, err := filepath.Rel(parentDir, childPath)
+ return err == nil && p != "." && !strings.HasPrefix(p, ".."+string(filepath.Separator))
+}
diff --git a/src/margo.sh/mgutil/path.go b/src/margo.sh/mgutil/path.go
new file mode 100644
index 00000000..a8ca92bb
--- /dev/null
+++ b/src/margo.sh/mgutil/path.go
@@ -0,0 +1,100 @@
+package mgutil
+
+import (
+ "path"
+ "path/filepath"
+ "strings"
+ "unicode"
+)
+
+var (
+ // ShortFnEnv is the list of envvars that are used in ShortFn.
+ ShortFnEnv = []string{
+ "GOPATH",
+ "GOROOT",
+ }
+)
+
+// FilePathParent returns the parent(filepath.Dir) of fn if it has a parent.
+// If fn has no parent, an empty string is returned instead of ".", "/" or fn itself.
+func FilePathParent(fn string) string {
+ fn = filepath.Clean(fn)
+ dir := filepath.Dir(fn)
+ if dir == "." || dir == fn || dir == string(filepath.Separator) {
+ return ""
+ }
+ return dir
+}
+
+// PathParent returns the parent(path.Dir) of fn if it has a parent.
+// If fn has no parent, an empty string is returned instead of "." or fn itself.
+func PathParent(fn string) string {
+ fn = path.Clean(fn)
+ dir := path.Dir(fn)
+ if dir == "." || dir == fn || dir == "/" {
+ return ""
+ }
+ return dir
+}
+
+// ShortFn returns a shortened form of filename fn for display in UIs.
+//
+// If env is set, it's used to override os.Getenv.
+//
+// The following prefix/ replacements are made (in the listed order):
+// * Envvar names listed in ShortFnEnv are replaced with `$name/`.
+// * `HOME` or `USERPROFILE` envvars are replaced with `~/`.
+//
+// All other (non-prefix) path components except the last 2 are replaced with their first letter and preceding dots.
+//
+// This mimics the similar path display feature in shells like Fish.
+//
+// e.g. Given a long path like `/home/user/.config/sublime-text-3/Packages/User/GoSublime/pkg/mod/github.com/DisposaBoy/pkg@v1.23/go.mod`:
+// * Given `$GOPATH=/home/user/.config/sublime-text-3/Packages/User/GoSublime`,
+// `$GOPATH/p/m/g/D/pkg@v1.2.3/go.mod`
+// * Otherwise, `~/.c/s/P/U/G/p/m/g/D/pkg@v1.2.3/go.mod` is returned.
+func ShortFn(fn string, env EnvMap) string {
+ return shortFn(fn, env.Getenv)
+}
+
+func shortFn(fn string, getenv func(k string, def string) string) string {
+ repl := shortFnRepl(getenv)
+ fn = repl.Replace(filepath.Clean(fn))
+ l := strings.Split(fn, string(filepath.Separator))
+ if len(l) <= 3 {
+ return fn
+ }
+ for i, s := range l[:len(l)-2] {
+ if strings.HasPrefix(s, "~") || strings.HasPrefix(s, "$") {
+ continue
+ }
+ for j, r := range s {
+ if unicode.IsLetter(r) {
+ l[i] = s[:j] + string(r)
+ break
+ }
+ }
+ }
+ return strings.Join(l, string(filepath.Separator))
+}
+
+// ShortFilename calls ShortFn(fn, nil)
+func ShortFilename(fn string) string {
+ return ShortFn(fn, nil)
+}
+
+func shortFnRepl(getenv func(k string, def string) string) *strings.Replacer {
+ const sep = string(filepath.Separator)
+ l := []string{}
+ for _, k := range ShortFnEnv {
+ for _, s := range PathList(getenv(k, "")) {
+ l = append(l, s+sep, "$"+k+sep)
+ }
+ }
+ for _, k := range []string{"HOME", "USERPROFILE"} {
+ if s := getenv(k, ""); s != "" {
+ l = append(l, s+sep, "~"+sep)
+ }
+ }
+ return strings.NewReplacer(l...)
+}
diff --git a/src/margo.sh/mgutil/path_test.go b/src/margo.sh/mgutil/path_test.go
new file mode 100644
index 00000000..08674bcf
--- /dev/null
+++ b/src/margo.sh/mgutil/path_test.go
@@ -0,0 +1,49 @@
+// +build !windows
+
+package mgutil
+
+import (
+ "path/filepath"
+ "testing"
+)
+
+func TestShortFn(t *testing.T) {
+ home := "/home/user"
+ gp := home + "/.config/sublime-text-3/Packages/User/GoSublime"
+ fn := gp + "/pkg/mod/github.com/DisposaBoy/pkg@v1.2.3/go.mod"
+ tbl := []struct {
+ nm string
+ fn string
+ res string
+ env func(string, string) string
+ }{
+ {
+ nm: "With $HOME",
+ fn: fn,
+ res: `~/.c/s/P/U/G/p/m/g/D/pkg@v1.2.3/go.mod`,
+ env: (EnvMap{"HOME": home}).Get,
+ },
+ {
+ nm: "Without $HOME",
+ fn: fn,
+ res: `/h/u/.c/s/P/U/G/p/m/g/D/pkg@v1.2.3/go.mod`,
+ env: (EnvMap{"HOME": ""}).Get,
+ },
+ {
+ nm: "With $GOPATH",
+ fn: fn,
+ res: `$GOPATH/p/m/g/D/pkg@v1.2.3/go.mod`,
+ env: (EnvMap{"GOPATH": gp}).Get,
+ },
+ }
+ for _, r := range tbl {
+ r.fn = filepath.FromSlash(r.fn)
+ r.res = filepath.FromSlash(r.res)
+ t.Run(r.nm, func(t *testing.T) {
+ res := shortFn(fn, r.env)
+ if res != r.res {
+ t.Errorf("ShortFn(`%s`) = `%s`. Expected `%s`.", r.fn, res, r.res)
+ }
+ })
+ }
+}
diff --git a/src/margo.sh/mgutil/pos.go b/src/margo.sh/mgutil/pos.go
new file mode 100644
index 00000000..c31477ca
--- /dev/null
+++ b/src/margo.sh/mgutil/pos.go
@@ -0,0 +1,29 @@
+package mgutil
+
+import (
+ "unicode/utf8"
+)
+
+// RepositionLeft moves pos left-wards through src until cond returns false
+func RepositionLeft(src []byte, pos int, cond func(rune) bool) int {
+ for 0 <= pos && pos < len(src) {
+ r, n := utf8.DecodeLastRune(src[:pos])
+ if n < 1 || !cond(r) {
+ break
+ }
+ pos -= n
+ }
+ return pos
+}
+
+// RepositionRight moves pos right-wards through src until cond returns false
+func RepositionRight(src []byte, pos int, cond func(rune) bool) int {
+ for 0 <= pos && pos < len(src) {
+ r, n := utf8.DecodeRune(src[pos:])
+ if n < 1 || !cond(r) {
+ break
+ }
+ pos += n
+ }
+ return pos
+}
diff --git a/src/margo.sh/mgutil/splitwriter.go b/src/margo.sh/mgutil/splitwriter.go
new file mode 100644
index 00000000..f5a4e659
--- /dev/null
+++ b/src/margo.sh/mgutil/splitwriter.go
@@ -0,0 +1,126 @@
+package mgutil
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+)
+
+type SplitFunc func(buf []byte) (next, rest []byte, ok bool)
+
+var ErrSplitWriterClosed = errors.New("SplitWriter: closed")
+
+func SplitLine(s []byte) (line, rest []byte, ok bool) {
+ i := bytes.IndexByte(s, '\n')
+ if i < 0 {
+ return nil, s, false
+ }
+ i++
+ return s[:i], s[i:], true
+}
+
+func SplitLineOrCR(s []byte) (line, rest []byte, ok bool) {
+ i := bytes.IndexByte(s, '\n')
+ if i < 0 {
+ i = bytes.IndexByte(s, '\r')
+ if i < 0 {
+ return nil, s, false
+ }
+ }
+ i++
+ return s[:i], s[i:], true
+}
+
+type SplitWriter struct {
+ split SplitFunc
+ write func([]byte) (int, error)
+ close func() error
+ fflush func() error
+
+ mu sync.Mutex
+ err error
+ buf []byte
+}
+
+func (w *SplitWriter) Write(p []byte) (int, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ w.buf = append(w.buf, p...)
+ rest := w.buf
+ for len(rest) != 0 {
+ p, s, ok := w.split(rest)
+ rest = s
+ if !ok {
+ break
+ }
+ if _, w.err = w.write(p[:len(p):len(p)]); w.err != nil {
+ return len(p), w.err
+ }
+ }
+ if len(rest) == 0 {
+ w.buf = nil
+ } else if len(rest) < len(w.buf) {
+ w.buf = append(w.buf[:0], rest...)
+ }
+ return len(p), nil
+}
+
+func (w *SplitWriter) Flush() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ return w.flush(false)
+}
+
+func (w *SplitWriter) flush(closing bool) error {
+ if w.err != nil {
+ return w.err
+ }
+ if closing && len(w.buf) != 0 {
+ _, w.err = w.write(w.buf)
+ w.buf = nil
+ }
+ if err := w.fflush(); err != nil && w.err == nil {
+ w.err = err
+ }
+ return w.err
+}
+
+func (w *SplitWriter) Close() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.err == ErrSplitWriterClosed {
+ return w.err
+ }
+ flushErr := w.flush(true)
+ w.err = ErrSplitWriterClosed
+ if err := w.close(); err != nil {
+ return err
+ }
+ return flushErr
+}
+
+func NewSplitWriter(split SplitFunc, w io.WriteCloser) *SplitWriter {
+ return &SplitWriter{
+ split: split,
+ write: w.Write,
+ close: w.Close,
+ fflush: func() error { return nil },
+ }
+}
+
+func NewSplitStream(split SplitFunc, w OutputStream) *SplitWriter {
+ return &SplitWriter{
+ split: split,
+ write: w.Write,
+ close: w.Close,
+ fflush: w.Flush,
+ }
+}
diff --git a/src/margo.sh/mgutil/str.go b/src/margo.sh/mgutil/str.go
new file mode 100644
index 00000000..f7377d84
--- /dev/null
+++ b/src/margo.sh/mgutil/str.go
@@ -0,0 +1,32 @@
+package mgutil
+
+// StrSet holds a set of strings
+type StrSet []string
+
+// NewStrSet returns a new StrSet initialised with the strings in l
+func NewStrSet(l ...string) StrSet {
+ return StrSet{}.Add(l...)
+}
+
+// Add add the list of strings l to the set and returns the new set
+func (s StrSet) Add(l ...string) StrSet {
+ res := make(StrSet, 0, len(s)+len(l))
+ for _, lst := range [][]string{[]string(s), l} {
+ for _, p := range lst {
+ if !res.Has(p) {
+ res = append(res, p)
+ }
+ }
+ }
+ return res
+}
+
+// Has returns true if p is in the set
+func (s StrSet) Has(p string) bool {
+ for _, q := range s {
+ if p == q {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/margo.sh/mgutil/sync.go b/src/margo.sh/mgutil/sync.go
new file mode 100644
index 00000000..75c4b3a1
--- /dev/null
+++ b/src/margo.sh/mgutil/sync.go
@@ -0,0 +1,49 @@
+package mgutil
+
+import (
+ "sync/atomic"
+)
+
+type AtomicBool struct{ n int32 }
+
+func (a *AtomicBool) Set(v bool) {
+ if v {
+ atomic.StoreInt32(&a.n, 1)
+ } else {
+ atomic.StoreInt32(&a.n, 0)
+ }
+}
+
+func (a *AtomicBool) IsSet() bool {
+ return atomic.LoadInt32(&a.n) != 0
+}
+
+type AtomicInt int64
+
+func (i *AtomicInt) N() int64 {
+ return atomic.LoadInt64((*int64)(i))
+}
+
+func (i *AtomicInt) Set(n int64) {
+ atomic.StoreInt64((*int64)(i), n)
+}
+
+func (i *AtomicInt) Swap(old, new int64) {
+ atomic.CompareAndSwapInt64((*int64)(i), old, new)
+}
+
+func (i *AtomicInt) Inc() int64 {
+ return atomic.AddInt64((*int64)(i), 1)
+}
+
+func (i *AtomicInt) Dec() int64 {
+ return atomic.AddInt64((*int64)(i), -1)
+}
+
+func (i *AtomicInt) Add(n int64) int64 {
+ return atomic.AddInt64((*int64)(i), n)
+}
+
+func (i *AtomicInt) Sub(n int64) int64 {
+ return atomic.AddInt64((*int64)(i), -n)
+}
diff --git a/src/disposa.blue/margo/sublime/config.go b/src/margo.sh/sublime/config.go
similarity index 68%
rename from src/disposa.blue/margo/sublime/config.go
rename to src/margo.sh/sublime/config.go
index 5299edad..03f3a9c4 100644
--- a/src/disposa.blue/margo/sublime/config.go
+++ b/src/margo.sh/sublime/config.go
@@ -1,22 +1,24 @@
package sublime
import (
- "disposa.blue/margo/mg"
+ "margo.sh/mg"
)
var (
- DefaultConfig = Config{}
+ DefaultConfig Config = Config{}.EnabledForLangs("*").(Config)
_ mg.EditorConfig = DefaultConfig
)
+type ConfigValues struct {
+ EnabledForLangs []mg.Lang
+ InhibitExplicitCompletions bool
+ InhibitWordCompletions bool
+ OverrideSettings map[string]interface{}
+}
+
type Config struct {
- Values struct {
- EnabledForLangs []string
- InhibitExplicitCompletions bool
- InhibitWordCompletions bool
- OverrideSettings map[string]interface{}
- }
+ Values ConfigValues
}
func (c Config) EditorConfig() interface{} {
@@ -27,7 +29,7 @@ func (c Config) Config() mg.EditorConfig {
return c
}
-func (c Config) EnabledForLangs(langs ...string) mg.EditorConfig {
+func (c Config) EnabledForLangs(langs ...mg.Lang) mg.EditorConfig {
c.Values.EnabledForLangs = langs
return c
}
@@ -60,6 +62,10 @@ func (c Config) DisableGsComplete() Config {
return c.overrideSetting("gscomplete_enabled", false)
}
+func (c Config) DisableCalltips() Config {
+ return c.overrideSetting("calltips", false)
+}
+
func (c Config) DisableGsLint() Config {
return c.overrideSetting("gslint_enabled", false)
}
diff --git a/src/margo.sh/sublime/config_test.go b/src/margo.sh/sublime/config_test.go
new file mode 100644
index 00000000..67beb1a2
--- /dev/null
+++ b/src/margo.sh/sublime/config_test.go
@@ -0,0 +1,11 @@
+package sublime
+
+import (
+ "testing"
+)
+
+func TestDefaultConfig(t *testing.T) {
+ if len(DefaultConfig.Values.EnabledForLangs) == 0 {
+ t.Fatalf("DefaultConfig.Values.EnabledForLangs is empty")
+ }
+}
diff --git a/src/margo.sh/sublime/ext.go b/src/margo.sh/sublime/ext.go
new file mode 100644
index 00000000..1b4ef954
--- /dev/null
+++ b/src/margo.sh/sublime/ext.go
@@ -0,0 +1,16 @@
+package sublime
+
+import (
+ "margo.sh/mg"
+ "runtime"
+)
+
+func Margo(ma mg.Args) {
+ ma.Store.Use(mg.NewReducer(func(mx *mg.Ctx) *mg.State {
+ ctrl := "ctrl"
+ if runtime.GOOS == "darwin" {
+ ctrl = "super"
+ }
+ return mx.AddStatusf("press ` %s+. `,` %s+x ` to configure margo", ctrl, ctrl)
+ }))
+}
diff --git a/src/margo.sh/sublime/sublime.go b/src/margo.sh/sublime/sublime.go
new file mode 100644
index 00000000..fb95f754
--- /dev/null
+++ b/src/margo.sh/sublime/sublime.go
@@ -0,0 +1,149 @@
+package sublime
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/urfave/cli"
+ "go/build"
+ "io/ioutil"
+ "margo.sh/cmdpkg/margo/cmdrunner"
+ "margo.sh/mg"
+ "margo.sh/mgcli"
+ yotsuba "margo.sh/why_would_you_make_yotsuba_cry"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+const (
+ AgentName = "margo.sublime"
+)
+
+var (
+ Commands = mgcli.Commands{
+ Name: AgentName,
+ Build: &cli.Command{
+ Action: mgcli.Action(buildAction),
+ },
+ Run: &cli.Command{
+ SkipFlagParsing: true,
+ SkipArgReorder: true,
+ Action: mgcli.Action(runAction),
+ },
+ }
+
+ logger = mg.NewLogger(os.Stderr)
+ agentBuildCtx = yotsuba.AgentBuildContext
+ agentBuildEnv = yotsuba.AgentBuildEnv
+)
+
+func buildAction(c *cli.Context) error {
+ tags := "margo"
+ errs := []string{}
+
+ modWas, modSet := os.LookupEnv("GO111MODULE")
+ // guess: dotless module names are reserved,
+ // so the import fails because `margo` is not in GOROOT
+ // I <3 Go modules!
+ os.Setenv("GO111MODULE", "off")
+ pkg, err := extensionPkg()
+ if modSet {
+ os.Setenv("GO111MODULE", modWas)
+ } else {
+ os.Unsetenv("GO111MODULE")
+ }
+ if err == nil {
+ fixExtPkg(pkg)
+ tags = "margo margo_extension"
+ fmt.Fprintln(os.Stderr, "Using margo extension:", pkg.Dir)
+ } else {
+ errs = append(errs,
+ fmt.Sprintf("*Not* using margo extension: Error: %s", err),
+ fmt.Sprintf("agent GOPATH is %s", agentBuildCtx.GOPATH),
+ )
+ }
+
+ if err := goInstallAgent(tags); err != nil {
+ errs = append(errs, fmt.Sprintf("Error: %s", err))
+ }
+
+ if len(errs) == 0 {
+ return nil
+ }
+
+ ctrl := "ctrl"
+ if runtime.GOOS == "darwin" {
+ ctrl = "super"
+ }
+ return fmt.Errorf("press ` %s+. `,` %s+x ` to configure margo or check console for errors\n%s", ctrl, ctrl, strings.Join(errs, "\n"))
+}
+
+func runAction(c *cli.Context) error {
+ name := AgentName
+ if exe, err := exec.LookPath(name); err == nil {
+ name = exe
+ }
+ return cmdrunner.Cmd{Name: name, Args: c.Args()}.Run()
+}
+
+func goInstallAgent(tags string) error {
+ args := []string{"install", "-v", "-tags=" + tags}
+ if os.Getenv("MARGO_BUILD_FLAGS_RACE") == "1" {
+ args = append(args, "-race")
+ }
+ args = append(args, "margo.sh/cmd/"+AgentName)
+ cr := cmdrunner.Cmd{
+ Name: "go",
+ Args: args,
+ OutToErr: true,
+ Env: agentBuildEnv,
+ }
+ return cr.Run()
+}
+
+func extensionPkg() (*build.Package, error) {
+ pkg, err := agentBuildCtx.Import("margo", "", 0)
+ if err == nil && len(pkg.GoFiles) == 0 {
+ err = fmt.Errorf("%s imported but has no .go files", pkg.Dir)
+ }
+ return pkg, err
+}
+
+func fixExtPkg(pkg *build.Package) {
+ for _, fn := range pkg.GoFiles {
+ fixExtFile(filepath.Join(pkg.Dir, fn))
+ }
+}
+
+func fixExtFile(fn string) {
+ p, err := ioutil.ReadFile(fn)
+ if err != nil {
+ logger.Println("fixExtFile:", err)
+ return
+ }
+
+ from := `disposa.blue/margo`
+ to := `margo.sh`
+ q := bytes.Replace(p, []byte(from), []byte(to), -1)
+ if bytes.Equal(p, q) {
+ return
+ }
+
+ bak := fn + ".~mgfix~.bak"
+ errOk := func(err error) string {
+ if err != nil {
+ return err.Error()
+ }
+ return "ok"
+ }
+
+ logger.Printf("mgfix %s: replace `%s` -> `%s`\n", fn, from, to)
+ err = os.Rename(fn, bak)
+ logger.Printf("mgfix %s: rename -> `%s`: %s\n", fn, bak, errOk(err))
+ if err == nil {
+ err := ioutil.WriteFile(fn, q, 0644)
+ logger.Printf("mgfix %s: saving: %s\n", fn, errOk(err))
+ }
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/.gitignore b/src/margo.sh/vendor/github.com/coreos/bbolt/.gitignore
new file mode 100644
index 00000000..3bcd8cba
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/.gitignore
@@ -0,0 +1,5 @@
+*.prof
+*.test
+*.swp
+/bin/
+cover.out
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/.travis.yml b/src/margo.sh/vendor/github.com/coreos/bbolt/.travis.yml
new file mode 100644
index 00000000..257dfdfe
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+go_import_path: go.etcd.io/bbolt
+
+sudo: false
+
+go:
+- 1.12
+
+before_install:
+- go get -v honnef.co/go/tools/...
+- go get -v github.com/kisielk/errcheck
+
+script:
+- make fmt
+- make test
+- make race
+# - make errcheck
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/LICENSE b/src/margo.sh/vendor/github.com/coreos/bbolt/LICENSE
new file mode 100644
index 00000000..004e77fe
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Ben Johnson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/Makefile b/src/margo.sh/vendor/github.com/coreos/bbolt/Makefile
new file mode 100644
index 00000000..2968aaa6
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/Makefile
@@ -0,0 +1,38 @@
+BRANCH=`git rev-parse --abbrev-ref HEAD`
+COMMIT=`git rev-parse --short HEAD`
+GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
+
+default: build
+
+race:
+ @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
+ @echo "array freelist test"
+ @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)"
+
+fmt:
+ !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
+
+# go get honnef.co/go/tools/simple
+gosimple:
+ gosimple ./...
+
+# go get honnef.co/go/tools/unused
+unused:
+ unused ./...
+
+# go get github.com/kisielk/errcheck
+errcheck:
+ @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
+
+test:
+ TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic
+ # Note: gets "program not an importable package" in out of path builds
+ TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
+
+ @echo "array freelist test"
+
+ @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic
+ # Note: gets "program not an importable package" in out of path builds
+ @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
+
+.PHONY: race fmt errcheck test gosimple unused
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/README.md b/src/margo.sh/vendor/github.com/coreos/bbolt/README.md
new file mode 100644
index 00000000..2dff3761
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/README.md
@@ -0,0 +1,956 @@
+bbolt
+=====
+
+[](https://goreportcard.com/report/github.com/etcd-io/bbolt)
+[](https://codecov.io/gh/etcd-io/bbolt)
+[](https://travis-ci.com/etcd-io/bbolt)
+[](https://godoc.org/github.com/etcd-io/bbolt)
+[](https://github.com/etcd-io/bbolt/releases)
+[](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
+
+bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
+store. The purpose of this fork is to provide the Go community with an active
+maintenance and development target for Bolt; the goal is improved reliability
+and stability. bbolt includes bug fixes, performance enhancements, and features
+not found in Bolt while preserving backwards compatibility with the Bolt API.
+
+Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
+[LMDB project][lmdb]. The goal of the project is to provide a simple,
+fast, and reliable database for projects that don't require a full database
+server such as Postgres or MySQL.
+
+Since Bolt is meant to be used as such a low-level piece of functionality,
+simplicity is key. The API will be small and only focus on getting values
+and setting values. That's it.
+
+[gh_ben]: https://github.com/benbjohnson
+[bolt]: https://github.com/boltdb/bolt
+[hyc_symas]: https://twitter.com/hyc_symas
+[lmdb]: http://symas.com/mdb/
+
+## Project Status
+
+Bolt is stable, the API is fixed, and the file format is fixed. Full unit
+test coverage and randomized black box testing are used to ensure database
+consistency and thread safety. Bolt is currently used in high-load production
+environments serving databases as large as 1TB. Many companies such as
+Shopify and Heroku use Bolt-backed services every day.
+
+## Project versioning
+
+bbolt uses [semantic versioning](http://semver.org).
+API should not change between patch and minor releases.
+New minor versions may add additional features to the API.
+
+## Table of Contents
+
+ - [Getting Started](#getting-started)
+ - [Installing](#installing)
+ - [Opening a database](#opening-a-database)
+ - [Transactions](#transactions)
+ - [Read-write transactions](#read-write-transactions)
+ - [Read-only transactions](#read-only-transactions)
+ - [Batch read-write transactions](#batch-read-write-transactions)
+ - [Managing transactions manually](#managing-transactions-manually)
+ - [Using buckets](#using-buckets)
+ - [Using key/value pairs](#using-keyvalue-pairs)
+ - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
+ - [Iterating over keys](#iterating-over-keys)
+ - [Prefix scans](#prefix-scans)
+ - [Range scans](#range-scans)
+ - [ForEach()](#foreach)
+ - [Nested buckets](#nested-buckets)
+ - [Database backups](#database-backups)
+ - [Statistics](#statistics)
+ - [Read-Only Mode](#read-only-mode)
+ - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
+ - [Resources](#resources)
+ - [Comparison with other databases](#comparison-with-other-databases)
+ - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
+ - [LevelDB, RocksDB](#leveldb-rocksdb)
+ - [LMDB](#lmdb)
+ - [Caveats & Limitations](#caveats--limitations)
+ - [Reading the Source](#reading-the-source)
+ - [Other Projects Using Bolt](#other-projects-using-bolt)
+
+## Getting Started
+
+### Installing
+
+To start using Bolt, install Go and run `go get`:
+
+```sh
+$ go get go.etcd.io/bbolt/...
+```
+
+This will retrieve the library and install the `bolt` command line utility into
+your `$GOBIN` path.
+
+
+### Importing bbolt
+
+To use bbolt as an embedded key-value store, import as:
+
+```go
+import bolt "go.etcd.io/bbolt"
+
+db, err := bolt.Open(path, 0666, nil)
+if err != nil {
+ return err
+}
+defer db.Close()
+```
+
+
+### Opening a database
+
+The top-level object in Bolt is a `DB`. It is represented as a single file on
+your disk and represents a consistent snapshot of your data.
+
+To open your database, simply use the `bolt.Open()` function:
+
+```go
+package main
+
+import (
+ "log"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+func main() {
+ // Open the my.db data file in your current directory.
+ // It will be created if it doesn't exist.
+ db, err := bolt.Open("my.db", 0600, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+
+ ...
+}
+```
+
+Please note that Bolt obtains a file lock on the data file so multiple processes
+cannot open the same database at the same time. Opening an already open Bolt
+database will cause it to hang until the other process closes it. To prevent
+an indefinite wait you can pass a timeout option to the `Open()` function:
+
+```go
+db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
+```
+
+
+### Transactions
+
+Bolt allows only one read-write transaction at a time but allows as many
+read-only transactions as you want at a time. Each transaction has a consistent
+view of the data as it existed when the transaction started.
+
+Individual transactions and all objects created from them (e.g. buckets, keys)
+are not thread safe. To work with data in multiple goroutines you must start
+a transaction for each one or use locking to ensure only one goroutine accesses
+a transaction at a time. Creating transaction from the `DB` is thread safe.
+
+Read-only transactions and read-write transactions should not depend on one
+another and generally shouldn't be opened simultaneously in the same goroutine.
+This can cause a deadlock as the read-write transaction needs to periodically
+re-map the data file but it cannot do so while a read-only transaction is open.
+
+
+#### Read-write transactions
+
+To start a read-write transaction, you can use the `DB.Update()` function:
+
+```go
+err := db.Update(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+Inside the closure, you have a consistent view of the database. You commit the
+transaction by returning `nil` at the end. You can also rollback the transaction
+at any point by returning an error. All database operations are allowed inside
+a read-write transaction.
+
+Always check the return error as it will report any disk failures that can cause
+your transaction to not complete. If you return an error within your closure
+it will be passed through.
+
+
+#### Read-only transactions
+
+To start a read-only transaction, you can use the `DB.View()` function:
+
+```go
+err := db.View(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+You also get a consistent view of the database within this closure, however,
+no mutating operations are allowed within a read-only transaction. You can only
+retrieve buckets, retrieve values, and copy the database within a read-only
+transaction.
+
+
+#### Batch read-write transactions
+
+Each `DB.Update()` waits for disk to commit the writes. This overhead
+can be minimized by combining multiple updates with the `DB.Batch()`
+function:
+
+```go
+err := db.Batch(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+Concurrent Batch calls are opportunistically combined into larger
+transactions. Batch is only useful when there are multiple goroutines
+calling it.
+
+The trade-off is that `Batch` can call the given
+function multiple times, if parts of the transaction fail. The
+function must be idempotent and side effects must take effect only
+after a successful return from `DB.Batch()`.
+
+For example: don't display messages from inside the function, instead
+set variables in the enclosing scope:
+
+```go
+var id uint64
+err := db.Batch(func(tx *bolt.Tx) error {
+ // Find last key in bucket, decode as bigendian uint64, increment
+ // by one, encode back to []byte, and add new key.
+ ...
+ id = newValue
+ return nil
+})
+if err != nil {
+ return ...
+}
+fmt.Println("Allocated ID %d", id)
+```
+
+
+#### Managing transactions manually
+
+The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
+function. These helper functions will start the transaction, execute a function,
+and then safely close your transaction if an error is returned. This is the
+recommended way to use Bolt transactions.
+
+However, sometimes you may want to manually start and end your transactions.
+You can use the `DB.Begin()` function directly but **please** be sure to close
+the transaction.
+
+```go
+// Start a writable transaction.
+tx, err := db.Begin(true)
+if err != nil {
+ return err
+}
+defer tx.Rollback()
+
+// Use the transaction...
+_, err := tx.CreateBucket([]byte("MyBucket"))
+if err != nil {
+ return err
+}
+
+// Commit the transaction and check for error.
+if err := tx.Commit(); err != nil {
+ return err
+}
+```
+
+The first argument to `DB.Begin()` is a boolean stating if the transaction
+should be writable.
+
+
+### Using buckets
+
+Buckets are collections of key/value pairs within the database. All keys in a
+bucket must be unique. You can create a bucket using the `Tx.CreateBucket()`
+function:
+
+```go
+db.Update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucket([]byte("MyBucket"))
+ if err != nil {
+ return fmt.Errorf("create bucket: %s", err)
+ }
+ return nil
+})
+```
+
+You can also create a bucket only if it doesn't exist by using the
+`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
+function for all your top-level buckets after you open your database so you can
+guarantee that they exist for future transactions.
+
+To delete a bucket, simply call the `Tx.DeleteBucket()` function.
+
+
+### Using key/value pairs
+
+To save a key/value pair to a bucket, use the `Bucket.Put()` function:
+
+```go
+db.Update(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte("MyBucket"))
+ err := b.Put([]byte("answer"), []byte("42"))
+ return err
+})
+```
+
+This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
+bucket. To retrieve this value, we can use the `Bucket.Get()` function:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte("MyBucket"))
+ v := b.Get([]byte("answer"))
+ fmt.Printf("The answer is: %s\n", v)
+ return nil
+})
+```
+
+The `Get()` function does not return an error because its operation is
+guaranteed to work (unless there is some kind of system failure). If the key
+exists then it will return its byte slice value. If it doesn't exist then it
+will return `nil`. It's important to note that you can have a zero-length value
+set to a key which is different than the key not existing.
+
+Use the `Bucket.Delete()` function to delete a key from the bucket.
+
+Please note that values returned from `Get()` are only valid while the
+transaction is open. If you need to use a value outside of the transaction
+then you must use `copy()` to copy it to another byte slice.
+
+
+### Autoincrementing integer for the bucket
+By using the `NextSequence()` function, you can let Bolt determine a sequence
+which can be used as the unique identifier for your key/value pairs. See the
+example below.
+
+```go
+// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
+func (s *Store) CreateUser(u *User) error {
+ return s.db.Update(func(tx *bolt.Tx) error {
+ // Retrieve the users bucket.
+ // This should be created when the DB is first opened.
+ b := tx.Bucket([]byte("users"))
+
+ // Generate ID for the user.
+ // This returns an error only if the Tx is closed or not writeable.
+ // That can't happen in an Update() call so I ignore the error check.
+ id, _ := b.NextSequence()
+ u.ID = int(id)
+
+ // Marshal user data into bytes.
+ buf, err := json.Marshal(u)
+ if err != nil {
+ return err
+ }
+
+ // Persist bytes to users bucket.
+ return b.Put(itob(u.ID), buf)
+ })
+}
+
+// itob returns an 8-byte big endian representation of v.
+func itob(v int) []byte {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(v))
+ return b
+}
+
+type User struct {
+ ID int
+ ...
+}
+```
+
+### Iterating over keys
+
+Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
+iteration over these keys extremely fast. To iterate over keys we'll use a
+`Cursor`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ b := tx.Bucket([]byte("MyBucket"))
+
+ c := b.Cursor()
+
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+The cursor allows you to move to a specific point in the list of keys and move
+forward or backward through the keys one at a time.
+
+The following functions are available on the cursor:
+
+```
+First() Move to the first key.
+Last() Move to the last key.
+Seek() Move to a specific key.
+Next() Move to the next key.
+Prev() Move to the previous key.
+```
+
+Each of those functions has a return signature of `(key []byte, value []byte)`.
+When you have iterated to the end of the cursor then `Next()` will return a
+`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
+before calling `Next()` or `Prev()`. If you do not seek to a position then
+these functions will return a `nil` key.
+
+During iteration, if the key is non-`nil` but the value is `nil`, that means
+the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
+access the sub-bucket.
+
+
+#### Prefix scans
+
+To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ c := tx.Bucket([]byte("MyBucket")).Cursor()
+
+ prefix := []byte("1234")
+ for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+#### Range scans
+
+Another common use case is scanning over a range such as a time range. If you
+use a sortable time encoding such as RFC3339 then you can query a specific
+date range like this:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume our events bucket exists and has RFC3339 encoded time keys.
+ c := tx.Bucket([]byte("Events")).Cursor()
+
+ // Our time range spans the 90's decade.
+ min := []byte("1990-01-01T00:00:00Z")
+ max := []byte("2000-01-01T00:00:00Z")
+
+ // Iterate over the 90's.
+ for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
+ fmt.Printf("%s: %s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
+
+
+#### ForEach()
+
+You can also use the function `ForEach()` if you know you'll be iterating over
+all the keys in a bucket:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ b := tx.Bucket([]byte("MyBucket"))
+
+ b.ForEach(func(k, v []byte) error {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ return nil
+ })
+ return nil
+})
+```
+
+Please note that keys and values in `ForEach()` are only valid while
+the transaction is open. If you need to use a key or value outside of
+the transaction, you must use `copy()` to copy it to another byte
+slice.
+
+### Nested buckets
+
+You can also store a bucket in a key to create nested buckets. The API is the
+same as the bucket management API on the `DB` object:
+
+```go
+func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
+func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
+func (*Bucket) DeleteBucket(key []byte) error
+```
+
+Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
+
+```go
+
+// createUser creates a new user in the given account.
+func createUser(accountID int, u *User) error {
+ // Start the transaction.
+ tx, err := db.Begin(true)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ // Retrieve the root bucket for the account.
+ // Assume this has already been created when the account was set up.
+ root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
+
+ // Setup the users bucket.
+ bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
+ if err != nil {
+ return err
+ }
+
+ // Generate an ID for the new user.
+ userID, err := bkt.NextSequence()
+ if err != nil {
+ return err
+ }
+ u.ID = userID
+
+ // Marshal and save the encoded user.
+ if buf, err := json.Marshal(u); err != nil {
+ return err
+ } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
+ return err
+ }
+
+ // Commit the transaction.
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+```
+
+
+
+
+### Database backups
+
+Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
+function to write a consistent view of the database to a writer. If you call
+this from a read-only transaction, it will perform a hot backup and not block
+your other database reads and writes.
+
+By default, it will use a regular file handle which will utilize the operating
+system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
+documentation for information about optimizing for larger-than-RAM datasets.
+
+One common use case is to backup over HTTP so you can use tools like `cURL` to
+do database backups:
+
+```go
+func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
+ err := db.View(func(tx *bolt.Tx) error {
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
+ w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
+ _, err := tx.WriteTo(w)
+ return err
+ })
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+```
+
+Then you can backup using this command:
+
+```sh
+$ curl http://localhost/backup > my.db
+```
+
+Or you can open your browser to `http://localhost/backup` and it will download
+automatically.
+
+If you want to backup to another file you can use the `Tx.CopyFile()` helper
+function.
+
+
+### Statistics
+
+The database keeps a running count of many of the internal operations it
+performs so you can better understand what's going on. By grabbing a snapshot
+of these stats at two points in time we can see what operations were performed
+in that time range.
+
+For example, we could start a goroutine to log stats every 10 seconds:
+
+```go
+go func() {
+ // Grab the initial stats.
+ prev := db.Stats()
+
+ for {
+ // Wait for 10s.
+ time.Sleep(10 * time.Second)
+
+ // Grab the current stats and diff them.
+ stats := db.Stats()
+ diff := stats.Sub(&prev)
+
+ // Encode stats to JSON and print to STDERR.
+ json.NewEncoder(os.Stderr).Encode(diff)
+
+ // Save stats for the next loop.
+ prev = stats
+ }
+}()
+```
+
+It's also useful to pipe these stats to a service such as statsd for monitoring
+or to provide an HTTP endpoint that will perform a fixed-length sample.
+
+
+### Read-Only Mode
+
+Sometimes it is useful to create a shared, read-only Bolt database. To this,
+set the `Options.ReadOnly` flag when opening your database. Read-only mode
+uses a shared lock to allow multiple processes to read from the database but
+it will block any processes from opening the database in read-write mode.
+
+```go
+db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+### Mobile Use (iOS/Android)
+
+Bolt is able to run on mobile devices by leveraging the binding feature of the
+[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
+contain your database logic and a reference to a `*bolt.DB` with a initializing
+constructor that takes in a filepath where the database file will be stored.
+Neither Android nor iOS require extra permissions or cleanup from using this method.
+
+```go
+func NewBoltDB(filepath string) *BoltDB {
+ db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ return &BoltDB{db}
+}
+
+type BoltDB struct {
+ db *bolt.DB
+ ...
+}
+
+func (b *BoltDB) Path() string {
+ return b.db.Path()
+}
+
+func (b *BoltDB) Close() {
+ b.db.Close()
+}
+```
+
+Database logic should be defined as methods on this wrapper struct.
+
+To initialize this struct from the native language (both platforms now sync
+their local storage to the cloud. These snippets disable that functionality for the
+database file):
+
+#### Android
+
+```java
+String path;
+if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
+ path = getNoBackupFilesDir().getAbsolutePath();
+} else{
+ path = getFilesDir().getAbsolutePath();
+}
+Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
+```
+
+#### iOS
+
+```objc
+- (void)demo {
+ NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
+ NSUserDomainMask,
+ YES) objectAtIndex:0];
+ GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
+ [self addSkipBackupAttributeToItemAtPath:demo.path];
+ //Some DB Logic would go here
+ [demo close];
+}
+
+- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
+{
+ NSURL* URL= [NSURL fileURLWithPath: filePathString];
+ assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
+
+ NSError *error = nil;
+ BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
+ forKey: NSURLIsExcludedFromBackupKey error: &error];
+ if(!success){
+ NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
+ }
+ return success;
+}
+
+```
+
+## Resources
+
+For more information on getting started with Bolt, check out the following articles:
+
+* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
+* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
+
+
+## Comparison with other databases
+
+### Postgres, MySQL, & other relational databases
+
+Relational databases structure data into rows and are only accessible through
+the use of SQL. This approach provides flexibility in how you store and query
+your data but also incurs overhead in parsing and planning SQL statements. Bolt
+accesses all data by a byte slice key. This makes Bolt fast to read and write
+data by key but provides no built-in support for joining values together.
+
+Most relational databases (with the exception of SQLite) are standalone servers
+that run separately from your application. This gives your systems
+flexibility to connect multiple application servers to a single database
+server but also adds overhead in serializing and transporting data over the
+network. Bolt runs as a library included in your application so all data access
+has to go through your application's process. This brings data closer to your
+application but limits multi-process access to the data.
+
+
+### LevelDB, RocksDB
+
+LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
+they are libraries bundled into the application, however, their underlying
+structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
+random writes by using a write ahead log and multi-tiered, sorted files called
+SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
+have trade-offs.
+
+If you require a high random write throughput (>10,000 w/sec) or you need to use
+spinning disks then LevelDB could be a good choice. If your application is
+read-heavy or does a lot of range scans then Bolt could be a good choice.
+
+One other important consideration is that LevelDB does not have transactions.
+It supports batch writing of key/values pairs and it supports read snapshots
+but it will not give you the ability to do a compare-and-swap operation safely.
+Bolt supports fully serializable ACID transactions.
+
+
+### LMDB
+
+Bolt was originally a port of LMDB so it is architecturally similar. Both use
+a B+tree, have ACID semantics with fully serializable transactions, and support
+lock-free MVCC using a single writer and multiple readers.
+
+The two projects have somewhat diverged. LMDB heavily focuses on raw performance
+while Bolt has focused on simplicity and ease of use. For example, LMDB allows
+several unsafe actions such as direct writes for the sake of performance. Bolt
+opts to disallow actions which can leave the database in a corrupted state. The
+only exception to this in Bolt is `DB.NoSync`.
+
+There are also a few differences in API. LMDB requires a maximum mmap size when
+opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
+automatically. LMDB overloads the getter and setter functions with multiple
+flags whereas Bolt splits these specialized cases into their own functions.
+
+
+## Caveats & Limitations
+
+It's important to pick the right tool for the job and Bolt is no exception.
+Here are a few things to note when evaluating and using Bolt:
+
+* Bolt is good for read intensive workloads. Sequential write performance is
+ also fast but random writes can be slow. You can use `DB.Batch()` or add a
+ write-ahead log to help mitigate this issue.
+
+* Bolt uses a B+tree internally so there can be a lot of random page access.
+ SSDs provide a significant performance boost over spinning disks.
+
+* Try to avoid long running read transactions. Bolt uses copy-on-write so
+ old pages cannot be reclaimed while an old transaction is using them.
+
+* Byte slices returned from Bolt are only valid during a transaction. Once the
+ transaction has been committed or rolled back then the memory they point to
+ can be reused by a new page or can be unmapped from virtual memory and you'll
+ see an `unexpected fault address` panic when accessing it.
+
+* Bolt uses an exclusive write lock on the database file so it cannot be
+ shared by multiple processes.
+
+* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
+ buckets that have random inserts will cause your database to have very poor
+ page utilization.
+
+* Use larger buckets in general. Smaller buckets causes poor page utilization
+ once they become larger than the page size (typically 4KB).
+
+* Bulk loading a lot of random writes into a new bucket can be slow as the
+ page will not split until the transaction is committed. Randomly inserting
+ more than 100,000 key/value pairs into a single new bucket in a single
+ transaction is not advised.
+
+* Bolt uses a memory-mapped file so the underlying operating system handles the
+ caching of the data. Typically, the OS will cache as much of the file as it
+ can in memory and will release memory as needed to other processes. This means
+ that Bolt can show very high memory usage when working with large databases.
+ However, this is expected and the OS will release memory as needed. Bolt can
+ handle databases much larger than the available physical RAM, provided its
+ memory-map fits in the process virtual address space. It may be problematic
+ on 32-bits systems.
+
+* The data structures in the Bolt database are memory mapped so the data file
+ will be endian specific. This means that you cannot copy a Bolt file from a
+ little endian machine to a big endian machine and have it work. For most
+ users this is not a concern since most modern CPUs are little endian.
+
+* Because of the way pages are laid out on disk, Bolt cannot truncate data files
+ and return free pages back to the disk. Instead, Bolt maintains a free list
+ of unused pages within its data file. These free pages can be reused by later
+ transactions. This works well for many use cases as databases generally tend
+ to grow. However, it's important to note that deleting large chunks of data
+ will not allow you to reclaim that space on disk.
+
+ For more information on page allocation, [see this comment][page-allocation].
+
+[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
+
+
+## Reading the Source
+
+Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
+transactional key/value database so it can be a good starting point for people
+interested in how databases work.
+
+The best places to start are the main entry points into Bolt:
+
+- `Open()` - Initializes the reference to the database. It's responsible for
+ creating the database if it doesn't exist, obtaining an exclusive lock on the
+ file, reading the meta pages, & memory-mapping the file.
+
+- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
+ value of the `writable` argument. This requires briefly obtaining the "meta"
+ lock to keep track of open transactions. Only one read-write transaction can
+ exist at a time so the "rwlock" is acquired during the life of a read-write
+ transaction.
+
+- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
+ arguments, a cursor is used to traverse the B+tree to the page and position
+ where they key & value will be written. Once the position is found, the bucket
+ materializes the underlying page and the page's parent pages into memory as
+ "nodes". These nodes are where mutations occur during read-write transactions.
+ These changes get flushed to disk during commit.
+
+- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
+ to move to the page & position of a key/value pair. During a read-only
+ transaction, the key and value data is returned as a direct reference to the
+ underlying mmap file so there's no allocation overhead. For read-write
+ transactions, this data may reference the mmap file or one of the in-memory
+ node values.
+
+- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
+ or in-memory nodes. It can seek to a specific key, move to the first or last
+ value, or it can move forward or backward. The cursor handles the movement up
+ and down the B+tree transparently to the end user.
+
+- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
+ into pages to be written to disk. Writing to disk then occurs in two phases.
+ First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
+ new meta page with an incremented transaction ID is written and another
+ `fsync()` occurs. This two phase write ensures that partially written data
+ pages are ignored in the event of a crash since the meta page pointing to them
+ is never written. Partially written meta pages are invalidated because they
+ are written with a checksum.
+
+If you have additional notes that could be helpful for others, please submit
+them via pull request.
+
+
+## Other Projects Using Bolt
+
+Below is a list of public, open source projects that use Bolt:
+
+* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
+* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
+* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
+* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
+* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
+* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
+* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
+* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
+* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
+* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
+* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
+ simple tx and key scans.
+* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
+* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
+* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
+* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
+* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
+* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
+* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
+* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
+* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
+* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
+* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
+* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
+* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more)
+* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
+* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
+* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
+* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
+* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
+* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
+* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
+* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
+* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
+* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
+* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
+* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
+* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
+* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
+* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
+* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
+* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
+* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
+* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
+* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
+ backed by boltdb.
+* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
+* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
+* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
+* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
+* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
+* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
+* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
+
+If you are using Bolt in a project please send a pull request to add it to the list.
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_386.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_386.go
new file mode 100644
index 00000000..aee25960
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_386.go
@@ -0,0 +1,7 @@
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_amd64.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_amd64.go
new file mode 100644
index 00000000..5dd8f3f2
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_amd64.go
@@ -0,0 +1,7 @@
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_arm.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_arm.go
new file mode 100644
index 00000000..aee25960
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_arm.go
@@ -0,0 +1,7 @@
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_arm64.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_arm64.go
new file mode 100644
index 00000000..810dfd55
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_arm64.go
@@ -0,0 +1,9 @@
+// +build arm64
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_linux.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_linux.go
new file mode 100644
index 00000000..7707bcac
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_linux.go
@@ -0,0 +1,10 @@
+package bbolt
+
+import (
+ "syscall"
+)
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return syscall.Fdatasync(int(db.file.Fd()))
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_mips64x.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_mips64x.go
new file mode 100644
index 00000000..dd8ffe12
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_mips64x.go
@@ -0,0 +1,9 @@
+// +build mips64 mips64le
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x8000000000 // 512GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_mipsx.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_mipsx.go
new file mode 100644
index 00000000..a669703a
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_mipsx.go
@@ -0,0 +1,9 @@
+// +build mips mipsle
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x40000000 // 1GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_openbsd.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_openbsd.go
new file mode 100644
index 00000000..d7f50358
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_openbsd.go
@@ -0,0 +1,27 @@
+package bbolt
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ msAsync = 1 << iota // perform asynchronous writes
+ msSync // perform synchronous writes
+ msInvalidate // invalidate cached data
+)
+
+func msync(db *DB) error {
+ _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+func fdatasync(db *DB) error {
+ if db.data != nil {
+ return msync(db)
+ }
+ return db.file.Sync()
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_ppc.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_ppc.go
new file mode 100644
index 00000000..84e545ef
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_ppc.go
@@ -0,0 +1,9 @@
+// +build ppc
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_ppc64.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_ppc64.go
new file mode 100644
index 00000000..a7612090
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_ppc64.go
@@ -0,0 +1,9 @@
+// +build ppc64
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_ppc64le.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
new file mode 100644
index 00000000..c830f2fc
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
@@ -0,0 +1,9 @@
+// +build ppc64le
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_riscv64.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_riscv64.go
new file mode 100644
index 00000000..c967613b
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_riscv64.go
@@ -0,0 +1,9 @@
+// +build riscv64
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_s390x.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_s390x.go
new file mode 100644
index 00000000..ff2a5609
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_s390x.go
@@ -0,0 +1,9 @@
+// +build s390x
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_unix.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_unix.go
new file mode 100644
index 00000000..2938fed5
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_unix.go
@@ -0,0 +1,93 @@
+// +build !windows,!plan9,!solaris,!aix
+
+package bbolt
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ flag := syscall.LOCK_NB
+ if exclusive {
+ flag |= syscall.LOCK_EX
+ } else {
+ flag |= syscall.LOCK_SH
+ }
+ for {
+ // Attempt to obtain an exclusive lock.
+ err := syscall.Flock(int(fd), flag)
+ if err == nil {
+ return nil
+ } else if err != syscall.EWOULDBLOCK {
+ return err
+ }
+
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ err = madvise(b, syscall.MADV_RANDOM)
+ if err != nil && err != syscall.ENOSYS {
+ // Ignore not implemented error in kernel because it still works.
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := syscall.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
+
+// NOTE: This function is copied from stdlib because it is not available on darwin.
+func madvise(b []byte, advice int) (err error) {
+ _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_unix_aix.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_unix_aix.go
new file mode 100644
index 00000000..a64c16f5
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_unix_aix.go
@@ -0,0 +1,90 @@
+// +build aix
+
+package bbolt
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ var lockType int16
+ if exclusive {
+ lockType = syscall.F_WRLCK
+ } else {
+ lockType = syscall.F_RDLCK
+ }
+ for {
+ // Attempt to obtain an exclusive lock.
+ lock := syscall.Flock_t{Type: lockType}
+ err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
+ if err == nil {
+ return nil
+ } else if err != syscall.EAGAIN {
+ return err
+ }
+
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Type = syscall.F_UNLCK
+ lock.Whence = 0
+ return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := unix.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
new file mode 100644
index 00000000..babad657
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
@@ -0,0 +1,88 @@
+package bbolt
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ var lockType int16
+ if exclusive {
+ lockType = syscall.F_WRLCK
+ } else {
+ lockType = syscall.F_RDLCK
+ }
+ for {
+ // Attempt to obtain an exclusive lock.
+ lock := syscall.Flock_t{Type: lockType}
+ err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
+ if err == nil {
+ return nil
+ } else if err != syscall.EAGAIN {
+ return err
+ }
+
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Type = syscall.F_UNLCK
+ lock.Whence = 0
+ return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := unix.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_windows.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_windows.go
new file mode 100644
index 00000000..fca178bd
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bolt_windows.go
@@ -0,0 +1,141 @@
+package bbolt
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procLockFileEx = modkernel32.NewProc("LockFileEx")
+ procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
+)
+
+const (
+ // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+ flagLockExclusive = 2
+ flagLockFailImmediately = 1
+
+ // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
+ errLockViolation syscall.Errno = 0x21
+)
+
+func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+ r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
+ if r == 0 {
+ return err
+ }
+ return nil
+}
+
+func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+ r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
+ if r == 0 {
+ return err
+ }
+ return nil
+}
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return db.file.Sync()
+}
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ var flag uint32 = flagLockFailImmediately
+ if exclusive {
+ flag |= flagLockExclusive
+ }
+ for {
+ // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
+ // -1..0 as the lock on the database file.
+ var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
+ err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{
+ Offset: m1,
+ OffsetHigh: m1,
+ })
+
+ if err == nil {
+ return nil
+ } else if err != errLockViolation {
+ return err
+ }
+
+ // If we timed oumercit then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
+ err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{
+ Offset: m1,
+ OffsetHigh: m1,
+ })
+ return err
+}
+
+// mmap memory maps a DB's data file.
+// Based on: https://github.com/edsrzf/mmap-go
+func mmap(db *DB, sz int) error {
+ if !db.readOnly {
+ // Truncate the database to the size of the mmap.
+ if err := db.file.Truncate(int64(sz)); err != nil {
+ return fmt.Errorf("truncate: %s", err)
+ }
+ }
+
+ // Open a file mapping handle.
+ sizelo := uint32(sz >> 32)
+ sizehi := uint32(sz) & 0xffffffff
+ h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
+ if h == 0 {
+ return os.NewSyscallError("CreateFileMapping", errno)
+ }
+
+ // Create the memory map.
+ addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
+ if addr == 0 {
+ return os.NewSyscallError("MapViewOfFile", errno)
+ }
+
+ // Close mapping handle.
+ if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
+ return os.NewSyscallError("CloseHandle", err)
+ }
+
+ // Convert to a byte array.
+ db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
+ db.datasz = sz
+
+ return nil
+}
+
+// munmap unmaps a pointer from a file.
+// Based on: https://github.com/edsrzf/mmap-go
+func munmap(db *DB) error {
+ if db.data == nil {
+ return nil
+ }
+
+ addr := (uintptr)(unsafe.Pointer(&db.data[0]))
+ if err := syscall.UnmapViewOfFile(addr); err != nil {
+ return os.NewSyscallError("UnmapViewOfFile", err)
+ }
+ return nil
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/boltsync_unix.go b/src/margo.sh/vendor/github.com/coreos/bbolt/boltsync_unix.go
new file mode 100644
index 00000000..9587afef
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/boltsync_unix.go
@@ -0,0 +1,8 @@
+// +build !windows,!plan9,!linux,!openbsd
+
+package bbolt
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return db.file.Sync()
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/bucket.go b/src/margo.sh/vendor/github.com/coreos/bbolt/bucket.go
new file mode 100644
index 00000000..d8750b14
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/bucket.go
@@ -0,0 +1,777 @@
+package bbolt
+
+import (
+ "bytes"
+ "fmt"
+ "unsafe"
+)
+
+const (
+ // MaxKeySize is the maximum length of a key, in bytes.
+ MaxKeySize = 32768
+
+ // MaxValueSize is the maximum length of a value, in bytes.
+ MaxValueSize = (1 << 31) - 2
+)
+
+const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
+
+const (
+ minFillPercent = 0.1
+ maxFillPercent = 1.0
+)
+
+// DefaultFillPercent is the percentage that split pages are filled.
+// This value can be changed by setting Bucket.FillPercent.
+const DefaultFillPercent = 0.5
+
+// Bucket represents a collection of key/value pairs inside the database.
+type Bucket struct {
+ *bucket
+ tx *Tx // the associated transaction
+ buckets map[string]*Bucket // subbucket cache
+ page *page // inline page reference
+ rootNode *node // materialized node for the root page.
+ nodes map[pgid]*node // node cache
+
+ // Sets the threshold for filling nodes when they split. By default,
+ // the bucket will fill to 50% but it can be useful to increase this
+ // amount if you know that your write workloads are mostly append-only.
+ //
+ // This is non-persisted across transactions so it must be set in every Tx.
+ FillPercent float64
+}
+
+// bucket represents the on-file representation of a bucket.
+// This is stored as the "value" of a bucket key. If the bucket is small enough,
+// then its root page can be stored inline in the "value", after the bucket
+// header. In the case of inline buckets, the "root" will be 0.
+type bucket struct {
+ root pgid // page id of the bucket's root-level page
+ sequence uint64 // monotonically incrementing, used by NextSequence()
+}
+
+// newBucket returns a new bucket associated with a transaction.
+func newBucket(tx *Tx) Bucket {
+ var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
+ if tx.writable {
+ b.buckets = make(map[string]*Bucket)
+ b.nodes = make(map[pgid]*node)
+ }
+ return b
+}
+
+// Tx returns the tx of the bucket.
+func (b *Bucket) Tx() *Tx {
+ return b.tx
+}
+
+// Root returns the root of the bucket.
+func (b *Bucket) Root() pgid {
+ return b.root
+}
+
+// Writable returns whether the bucket is writable.
+func (b *Bucket) Writable() bool {
+ return b.tx.writable
+}
+
+// Cursor creates a cursor associated with the bucket.
+// The cursor is only valid as long as the transaction is open.
+// Do not use a cursor after the transaction is closed.
+func (b *Bucket) Cursor() *Cursor {
+ // Update transaction statistics.
+ b.tx.stats.CursorCount++
+
+ // Allocate and return a cursor.
+ return &Cursor{
+ bucket: b,
+ stack: make([]elemRef, 0),
+ }
+}
+
+// Bucket retrieves a nested bucket by name.
+// Returns nil if the bucket does not exist.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) Bucket(name []byte) *Bucket {
+ if b.buckets != nil {
+ if child := b.buckets[string(name)]; child != nil {
+ return child
+ }
+ }
+
+ // Move cursor to key.
+ c := b.Cursor()
+ k, v, flags := c.seek(name)
+
+ // Return nil if the key doesn't exist or it is not a bucket.
+ if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
+ return nil
+ }
+
+ // Otherwise create a bucket and cache it.
+ var child = b.openBucket(v)
+ if b.buckets != nil {
+ b.buckets[string(name)] = child
+ }
+
+ return child
+}
+
+// Helper method that re-interprets a sub-bucket value
+// from a parent into a Bucket
+func (b *Bucket) openBucket(value []byte) *Bucket {
+ var child = newBucket(b.tx)
+
+ // Unaligned access requires a copy to be made.
+ const unalignedMask = unsafe.Alignof(struct {
+ bucket
+ page
+ }{}) - 1
+ unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0
+ if unaligned {
+ value = cloneBytes(value)
+ }
+
+ // If this is a writable transaction then we need to copy the bucket entry.
+ // Read-only transactions can point directly at the mmap entry.
+ if b.tx.writable && !unaligned {
+ child.bucket = &bucket{}
+ *child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
+ } else {
+ child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ }
+
+ // Save a reference to the inline page if the bucket is inline.
+ if child.root == 0 {
+ child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
+ }
+
+ return &child
+}
+
+// CreateBucket creates a new bucket at the given key and returns the new bucket.
+// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
+ if b.tx.db == nil {
+ return nil, ErrTxClosed
+ } else if !b.tx.writable {
+ return nil, ErrTxNotWritable
+ } else if len(key) == 0 {
+ return nil, ErrBucketNameRequired
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if there is an existing key.
+ if bytes.Equal(key, k) {
+ if (flags & bucketLeafFlag) != 0 {
+ return nil, ErrBucketExists
+ }
+ return nil, ErrIncompatibleValue
+ }
+
+ // Create empty, inline bucket.
+ var bucket = Bucket{
+ bucket: &bucket{},
+ rootNode: &node{isLeaf: true},
+ FillPercent: DefaultFillPercent,
+ }
+ var value = bucket.write()
+
+ // Insert into node.
+ key = cloneBytes(key)
+ c.node().put(key, key, value, 0, bucketLeafFlag)
+
+ // Since subbuckets are not allowed on inline buckets, we need to
+ // dereference the inline page, if it exists. This will cause the bucket
+ // to be treated as a regular, non-inline bucket for the rest of the tx.
+ b.page = nil
+
+ return b.Bucket(key), nil
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
+ child, err := b.CreateBucket(key)
+ if err == ErrBucketExists {
+ return b.Bucket(key), nil
+ } else if err != nil {
+ return nil, err
+ }
+ return child, nil
+}
+
+// DeleteBucket deletes a bucket at the given key.
+// Returns an error if the bucket does not exist, or if the key represents a non-bucket value.
+func (b *Bucket) DeleteBucket(key []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if bucket doesn't exist or is not a bucket.
+ if !bytes.Equal(key, k) {
+ return ErrBucketNotFound
+ } else if (flags & bucketLeafFlag) == 0 {
+ return ErrIncompatibleValue
+ }
+
+ // Recursively delete all child buckets.
+ child := b.Bucket(key)
+ err := child.ForEach(func(k, v []byte) error {
+ if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 {
+ if err := child.DeleteBucket(k); err != nil {
+ return fmt.Errorf("delete bucket: %s", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // Remove cached copy.
+ delete(b.buckets, string(key))
+
+ // Release all bucket pages to freelist.
+ child.nodes = nil
+ child.rootNode = nil
+ child.free()
+
+ // Delete the node if we have a matching key.
+ c.node().del(key)
+
+ return nil
+}
+
+// Get retrieves the value for a key in the bucket.
+// Returns a nil value if the key does not exist or if the key is a nested bucket.
+// The returned value is only valid for the life of the transaction.
+func (b *Bucket) Get(key []byte) []byte {
+ k, v, flags := b.Cursor().seek(key)
+
+ // Return nil if this is a bucket.
+ if (flags & bucketLeafFlag) != 0 {
+ return nil
+ }
+
+ // If our target node isn't the same key as what's passed in then return nil.
+ if !bytes.Equal(key, k) {
+ return nil
+ }
+ return v
+}
+
+// Put sets the value for a key in the bucket.
+// If the key exist then its previous value will be overwritten.
+// Supplied value must remain valid for the life of the transaction.
+// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
+func (b *Bucket) Put(key []byte, value []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ } else if len(key) == 0 {
+ return ErrKeyRequired
+ } else if len(key) > MaxKeySize {
+ return ErrKeyTooLarge
+ } else if int64(len(value)) > MaxValueSize {
+ return ErrValueTooLarge
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if there is an existing key with a bucket value.
+ if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
+
+ // Insert into node.
+ key = cloneBytes(key)
+ c.node().put(key, key, value, 0, 0)
+
+ return nil
+}
+
+// Delete removes a key from the bucket.
+// If the key does not exist then nothing is done and a nil error is returned.
+// Returns an error if the bucket was created from a read-only transaction.
+func (b *Bucket) Delete(key []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return nil if the key doesn't exist.
+ if !bytes.Equal(key, k) {
+ return nil
+ }
+
+ // Return an error if there is already existing bucket value.
+ if (flags & bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
+
+ // Delete the node if we have a matching key.
+ c.node().del(key)
+
+ return nil
+}
+
+// Sequence returns the current integer for the bucket without incrementing it.
+func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
+
+// SetSequence updates the sequence number for the bucket.
+func (b *Bucket) SetSequence(v uint64) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ }
+
+ // Materialize the root node if it hasn't been already so that the
+ // bucket will be saved during commit.
+ if b.rootNode == nil {
+ _ = b.node(b.root, nil)
+ }
+
+ // Increment and return the sequence.
+ b.bucket.sequence = v
+ return nil
+}
+
+// NextSequence returns an autoincrementing integer for the bucket.
+func (b *Bucket) NextSequence() (uint64, error) {
+ if b.tx.db == nil {
+ return 0, ErrTxClosed
+ } else if !b.Writable() {
+ return 0, ErrTxNotWritable
+ }
+
+ // Materialize the root node if it hasn't been already so that the
+ // bucket will be saved during commit.
+ if b.rootNode == nil {
+ _ = b.node(b.root, nil)
+ }
+
+ // Increment and return the sequence.
+ b.bucket.sequence++
+ return b.bucket.sequence, nil
+}
+
+// ForEach executes a function for each key/value pair in a bucket.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller. The provided function must not modify
+// the bucket; this will result in undefined behavior.
+func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ }
+ c := b.Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ if err := fn(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Stat returns stats on a bucket.
+func (b *Bucket) Stats() BucketStats {
+ var s, subStats BucketStats
+ pageSize := b.tx.db.pageSize
+ s.BucketN += 1
+ if b.root == 0 {
+ s.InlineBucketN += 1
+ }
+ b.forEachPage(func(p *page, depth int) {
+ if (p.flags & leafPageFlag) != 0 {
+ s.KeyN += int(p.count)
+
+ // used totals the used bytes for the page
+ used := pageHeaderSize
+
+ if p.count != 0 {
+ // If page has any elements, add all element headers.
+ used += leafPageElementSize * uintptr(p.count-1)
+
+ // Add all element key, value sizes.
+ // The computation takes advantage of the fact that the position
+ // of the last element's key/value equals to the total of the sizes
+ // of all previous elements' keys and values.
+ // It also includes the last element's header.
+ lastElement := p.leafPageElement(p.count - 1)
+ used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize)
+ }
+
+ if b.root == 0 {
+ // For inlined bucket just update the inline stats
+ s.InlineBucketInuse += int(used)
+ } else {
+ // For non-inlined bucket update all the leaf stats
+ s.LeafPageN++
+ s.LeafInuse += int(used)
+ s.LeafOverflowN += int(p.overflow)
+
+ // Collect stats from sub-buckets.
+ // Do that by iterating over all element headers
+ // looking for the ones with the bucketLeafFlag.
+ for i := uint16(0); i < p.count; i++ {
+ e := p.leafPageElement(i)
+ if (e.flags & bucketLeafFlag) != 0 {
+ // For any bucket element, open the element value
+ // and recursively call Stats on the contained bucket.
+ subStats.Add(b.openBucket(e.value()).Stats())
+ }
+ }
+ }
+ } else if (p.flags & branchPageFlag) != 0 {
+ s.BranchPageN++
+ lastElement := p.branchPageElement(p.count - 1)
+
+ // used totals the used bytes for the page
+ // Add header and all element headers.
+ used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1))
+
+ // Add size of all keys and values.
+ // Again, use the fact that last element's position equals to
+ // the total of key, value sizes of all previous elements.
+ used += uintptr(lastElement.pos + lastElement.ksize)
+ s.BranchInuse += int(used)
+ s.BranchOverflowN += int(p.overflow)
+ }
+
+ // Keep track of maximum page depth.
+ if depth+1 > s.Depth {
+ s.Depth = (depth + 1)
+ }
+ })
+
+ // Alloc stats can be computed from page counts and pageSize.
+ s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
+ s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
+
+ // Add the max depth of sub-buckets to get total nested depth.
+ s.Depth += subStats.Depth
+ // Add the stats for all sub-buckets
+ s.Add(subStats)
+ return s
+}
+
+// forEachPage iterates over every page in a bucket, including inline pages.
+func (b *Bucket) forEachPage(fn func(*page, int)) {
+ // If we have an inline page then just use that.
+ if b.page != nil {
+ fn(b.page, 0)
+ return
+ }
+
+ // Otherwise traverse the page hierarchy.
+ b.tx.forEachPage(b.root, 0, fn)
+}
+
+// forEachPageNode iterates over every page (or node) in a bucket.
+// This also includes inline pages.
+func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
+ // If we have an inline page or root node then just use that.
+ if b.page != nil {
+ fn(b.page, nil, 0)
+ return
+ }
+ b._forEachPageNode(b.root, 0, fn)
+}
+
+func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
+ var p, n = b.pageNode(pgid)
+
+ // Execute function.
+ fn(p, n, depth)
+
+ // Recursively loop over children.
+ if p != nil {
+ if (p.flags & branchPageFlag) != 0 {
+ for i := 0; i < int(p.count); i++ {
+ elem := p.branchPageElement(uint16(i))
+ b._forEachPageNode(elem.pgid, depth+1, fn)
+ }
+ }
+ } else {
+ if !n.isLeaf {
+ for _, inode := range n.inodes {
+ b._forEachPageNode(inode.pgid, depth+1, fn)
+ }
+ }
+ }
+}
+
+// spill writes all the nodes for this bucket to dirty pages.
+func (b *Bucket) spill() error {
+ // Spill all child buckets first.
+ for name, child := range b.buckets {
+ // If the child bucket is small enough and it has no child buckets then
+ // write it inline into the parent bucket's page. Otherwise spill it
+ // like a normal bucket and make the parent value a pointer to the page.
+ var value []byte
+ if child.inlineable() {
+ child.free()
+ value = child.write()
+ } else {
+ if err := child.spill(); err != nil {
+ return err
+ }
+
+ // Update the child bucket header in this bucket.
+ value = make([]byte, unsafe.Sizeof(bucket{}))
+ var bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ *bucket = *child.bucket
+ }
+
+ // Skip writing the bucket if there are no materialized nodes.
+ if child.rootNode == nil {
+ continue
+ }
+
+ // Update parent node.
+ var c = b.Cursor()
+ k, _, flags := c.seek([]byte(name))
+ if !bytes.Equal([]byte(name), k) {
+ panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
+ }
+ if flags&bucketLeafFlag == 0 {
+ panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
+ }
+ c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
+ }
+
+ // Ignore if there's not a materialized root node.
+ if b.rootNode == nil {
+ return nil
+ }
+
+ // Spill nodes.
+ if err := b.rootNode.spill(); err != nil {
+ return err
+ }
+ b.rootNode = b.rootNode.root()
+
+ // Update the root node for this bucket.
+ if b.rootNode.pgid >= b.tx.meta.pgid {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
+ }
+ b.root = b.rootNode.pgid
+
+ return nil
+}
+
+// inlineable returns true if a bucket is small enough to be written inline
+// and if it contains no subbuckets. Otherwise returns false.
+func (b *Bucket) inlineable() bool {
+ var n = b.rootNode
+
+ // Bucket must only contain a single leaf node.
+ if n == nil || !n.isLeaf {
+ return false
+ }
+
+ // Bucket is not inlineable if it contains subbuckets or if it goes beyond
+ // our threshold for inline bucket size.
+ var size = pageHeaderSize
+ for _, inode := range n.inodes {
+ size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value))
+
+ if inode.flags&bucketLeafFlag != 0 {
+ return false
+ } else if size > b.maxInlineBucketSize() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Returns the maximum total size of a bucket to make it a candidate for inlining.
+func (b *Bucket) maxInlineBucketSize() uintptr {
+ return uintptr(b.tx.db.pageSize / 4)
+}
+
+// write allocates and writes a bucket to a byte slice.
+func (b *Bucket) write() []byte {
+ // Allocate the appropriate size.
+ var n = b.rootNode
+ var value = make([]byte, bucketHeaderSize+n.size())
+
+ // Write a bucket header.
+ var bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ *bucket = *b.bucket
+
+ // Convert byte slice to a fake page and write the root node.
+ var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
+ n.write(p)
+
+ return value
+}
+
+// rebalance attempts to balance all nodes.
+func (b *Bucket) rebalance() {
+ for _, n := range b.nodes {
+ n.rebalance()
+ }
+ for _, child := range b.buckets {
+ child.rebalance()
+ }
+}
+
+// node creates a node from a page and associates it with a given parent.
+func (b *Bucket) node(pgid pgid, parent *node) *node {
+ _assert(b.nodes != nil, "nodes map expected")
+
+ // Retrieve node if it's already been created.
+ if n := b.nodes[pgid]; n != nil {
+ return n
+ }
+
+ // Otherwise create a node and cache it.
+ n := &node{bucket: b, parent: parent}
+ if parent == nil {
+ b.rootNode = n
+ } else {
+ parent.children = append(parent.children, n)
+ }
+
+ // Use the inline page if this is an inline bucket.
+ var p = b.page
+ if p == nil {
+ p = b.tx.page(pgid)
+ }
+
+ // Read the page into the node and cache it.
+ n.read(p)
+ b.nodes[pgid] = n
+
+ // Update statistics.
+ b.tx.stats.NodeCount++
+
+ return n
+}
+
+// free recursively frees all pages in the bucket.
+func (b *Bucket) free() {
+ if b.root == 0 {
+ return
+ }
+
+ var tx = b.tx
+ b.forEachPageNode(func(p *page, n *node, _ int) {
+ if p != nil {
+ tx.db.freelist.free(tx.meta.txid, p)
+ } else {
+ n.free()
+ }
+ })
+ b.root = 0
+}
+
+// dereference removes all references to the old mmap.
+func (b *Bucket) dereference() {
+ if b.rootNode != nil {
+ b.rootNode.root().dereference()
+ }
+
+ for _, child := range b.buckets {
+ child.dereference()
+ }
+}
+
+// pageNode returns the in-memory node, if it exists.
+// Otherwise returns the underlying page.
+func (b *Bucket) pageNode(id pgid) (*page, *node) {
+ // Inline buckets have a fake page embedded in their value so treat them
+ // differently. We'll return the rootNode (if available) or the fake page.
+ if b.root == 0 {
+ if id != 0 {
+ panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
+ }
+ if b.rootNode != nil {
+ return nil, b.rootNode
+ }
+ return b.page, nil
+ }
+
+ // Check the node cache for non-inline buckets.
+ if b.nodes != nil {
+ if n := b.nodes[id]; n != nil {
+ return nil, n
+ }
+ }
+
+ // Finally lookup the page from the transaction if no node is materialized.
+ return b.tx.page(id), nil
+}
+
+// BucketStats records statistics about resources used by a bucket.
+type BucketStats struct {
+ // Page count statistics.
+ BranchPageN int // number of logical branch pages
+ BranchOverflowN int // number of physical branch overflow pages
+ LeafPageN int // number of logical leaf pages
+ LeafOverflowN int // number of physical leaf overflow pages
+
+ // Tree statistics.
+ KeyN int // number of keys/value pairs
+ Depth int // number of levels in B+tree
+
+ // Page size utilization.
+ BranchAlloc int // bytes allocated for physical branch pages
+ BranchInuse int // bytes actually used for branch data
+ LeafAlloc int // bytes allocated for physical leaf pages
+ LeafInuse int // bytes actually used for leaf data
+
+ // Bucket statistics
+ BucketN int // total number of buckets including the top bucket
+ InlineBucketN int // total number on inlined buckets
+ InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
+}
+
+func (s *BucketStats) Add(other BucketStats) {
+ s.BranchPageN += other.BranchPageN
+ s.BranchOverflowN += other.BranchOverflowN
+ s.LeafPageN += other.LeafPageN
+ s.LeafOverflowN += other.LeafOverflowN
+ s.KeyN += other.KeyN
+ if s.Depth < other.Depth {
+ s.Depth = other.Depth
+ }
+ s.BranchAlloc += other.BranchAlloc
+ s.BranchInuse += other.BranchInuse
+ s.LeafAlloc += other.LeafAlloc
+ s.LeafInuse += other.LeafInuse
+
+ s.BucketN += other.BucketN
+ s.InlineBucketN += other.InlineBucketN
+ s.InlineBucketInuse += other.InlineBucketInuse
+}
+
+// cloneBytes returns a copy of a given slice.
+func cloneBytes(v []byte) []byte {
+ var clone = make([]byte, len(v))
+ copy(clone, v)
+ return clone
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/cursor.go b/src/margo.sh/vendor/github.com/coreos/bbolt/cursor.go
new file mode 100644
index 00000000..98aeb449
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/cursor.go
@@ -0,0 +1,396 @@
+package bbolt
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+)
+
+// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
+// Cursors see nested buckets with value == nil.
+// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
+//
+// Keys and values returned from the cursor are only valid for the life of the transaction.
+//
+// Changing data while traversing with a cursor may cause it to be invalidated
+// and return unexpected keys and/or values. You must reposition your cursor
+// after mutating data.
+type Cursor struct {
+ bucket *Bucket
+ stack []elemRef
+}
+
+// Bucket returns the bucket that this cursor was created from.
+func (c *Cursor) Bucket() *Bucket {
+ return c.bucket
+}
+
+// First moves the cursor to the first item in the bucket and returns its key and value.
+// If the bucket is empty then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) First() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ c.stack = c.stack[:0]
+ p, n := c.bucket.pageNode(c.bucket.root)
+ c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
+ c.first()
+
+ // If we land on an empty page then move to the next value.
+ // https://github.com/boltdb/bolt/issues/450
+ if c.stack[len(c.stack)-1].count() == 0 {
+ c.next()
+ }
+
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+
+}
+
+// Last moves the cursor to the last item in the bucket and returns its key and value.
+// If the bucket is empty then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Last() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ c.stack = c.stack[:0]
+ p, n := c.bucket.pageNode(c.bucket.root)
+ ref := elemRef{page: p, node: n}
+ ref.index = ref.count() - 1
+ c.stack = append(c.stack, ref)
+ c.last()
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Next moves the cursor to the next item in the bucket and returns its key and value.
+// If the cursor is at the end of the bucket then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Next() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ k, v, flags := c.next()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Prev moves the cursor to the previous item in the bucket and returns its key and value.
+// If the cursor is at the beginning of the bucket then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Prev() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+
+ // Attempt to move back one element until we're successful.
+ // Move up the stack as we hit the beginning of each page in our stack.
+ for i := len(c.stack) - 1; i >= 0; i-- {
+ elem := &c.stack[i]
+ if elem.index > 0 {
+ elem.index--
+ break
+ }
+ c.stack = c.stack[:i]
+ }
+
+ // If we've hit the end then return nil.
+ if len(c.stack) == 0 {
+ return nil, nil
+ }
+
+ // Move down the stack to find the last element of the last leaf under this branch.
+ c.last()
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Seek moves the cursor to a given key and returns it.
+// If the key does not exist then the next key is used. If no keys
+// follow, a nil key is returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
+ k, v, flags := c.seek(seek)
+
+ // If we ended up after the last element of a page then move to the next one.
+ if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
+ k, v, flags = c.next()
+ }
+
+ if k == nil {
+ return nil, nil
+ } else if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Delete removes the current key/value under the cursor from the bucket.
+// Delete fails if current key/value is a bucket or if the transaction is not writable.
+func (c *Cursor) Delete() error {
+ if c.bucket.tx.db == nil {
+ return ErrTxClosed
+ } else if !c.bucket.Writable() {
+ return ErrTxNotWritable
+ }
+
+ key, _, flags := c.keyValue()
+ // Return an error if current value is a bucket.
+ if (flags & bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
+ c.node().del(key)
+
+ return nil
+}
+
+// seek moves the cursor to a given key and returns it.
+// If the key does not exist then the next key is used.
+func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+
+ // Start from root page/node and traverse to correct page.
+ c.stack = c.stack[:0]
+ c.search(seek, c.bucket.root)
+
+ // If this is a bucket then return a nil value.
+ return c.keyValue()
+}
+
+// first moves the cursor to the first leaf element under the last page in the stack.
+func (c *Cursor) first() {
+ for {
+ // Exit when we hit a leaf page.
+ var ref = &c.stack[len(c.stack)-1]
+ if ref.isLeaf() {
+ break
+ }
+
+ // Keep adding pages pointing to the first element to the stack.
+ var pgid pgid
+ if ref.node != nil {
+ pgid = ref.node.inodes[ref.index].pgid
+ } else {
+ pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+ }
+ p, n := c.bucket.pageNode(pgid)
+ c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
+ }
+}
+
+// last moves the cursor to the last leaf element under the last page in the stack.
+func (c *Cursor) last() {
+ for {
+ // Exit when we hit a leaf page.
+ ref := &c.stack[len(c.stack)-1]
+ if ref.isLeaf() {
+ break
+ }
+
+ // Keep adding pages pointing to the last element in the stack.
+ var pgid pgid
+ if ref.node != nil {
+ pgid = ref.node.inodes[ref.index].pgid
+ } else {
+ pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+ }
+ p, n := c.bucket.pageNode(pgid)
+
+ var nextRef = elemRef{page: p, node: n}
+ nextRef.index = nextRef.count() - 1
+ c.stack = append(c.stack, nextRef)
+ }
+}
+
+// next moves to the next leaf element and returns the key and value.
+// If the cursor is at the last leaf element then it stays there and returns nil.
+func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
+ for {
+ // Attempt to move over one element until we're successful.
+ // Move up the stack as we hit the end of each page in our stack.
+ var i int
+ for i = len(c.stack) - 1; i >= 0; i-- {
+ elem := &c.stack[i]
+ if elem.index < elem.count()-1 {
+ elem.index++
+ break
+ }
+ }
+
+ // If we've hit the root page then stop and return. This will leave the
+ // cursor on the last element of the last page.
+ if i == -1 {
+ return nil, nil, 0
+ }
+
+ // Otherwise start from where we left off in the stack and find the
+ // first element of the first leaf page.
+ c.stack = c.stack[:i+1]
+ c.first()
+
+ // If this is an empty page then restart and move back up the stack.
+ // https://github.com/boltdb/bolt/issues/450
+ if c.stack[len(c.stack)-1].count() == 0 {
+ continue
+ }
+
+ return c.keyValue()
+ }
+}
+
+// search recursively performs a binary search against a given page/node until it finds a given key.
+func (c *Cursor) search(key []byte, pgid pgid) {
+ p, n := c.bucket.pageNode(pgid)
+ if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
+ panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
+ }
+ e := elemRef{page: p, node: n}
+ c.stack = append(c.stack, e)
+
+ // If we're on a leaf page/node then find the specific node.
+ if e.isLeaf() {
+ c.nsearch(key)
+ return
+ }
+
+ if n != nil {
+ c.searchNode(key, n)
+ return
+ }
+ c.searchPage(key, p)
+}
+
+func (c *Cursor) searchNode(key []byte, n *node) {
+ var exact bool
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
+ // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+ ret := bytes.Compare(n.inodes[i].key, key)
+ if ret == 0 {
+ exact = true
+ }
+ return ret != -1
+ })
+ if !exact && index > 0 {
+ index--
+ }
+ c.stack[len(c.stack)-1].index = index
+
+ // Recursively search to the next page.
+ c.search(key, n.inodes[index].pgid)
+}
+
+func (c *Cursor) searchPage(key []byte, p *page) {
+ // Binary search for the correct range.
+ inodes := p.branchPageElements()
+
+ var exact bool
+ index := sort.Search(int(p.count), func(i int) bool {
+ // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
+ // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+ ret := bytes.Compare(inodes[i].key(), key)
+ if ret == 0 {
+ exact = true
+ }
+ return ret != -1
+ })
+ if !exact && index > 0 {
+ index--
+ }
+ c.stack[len(c.stack)-1].index = index
+
+ // Recursively search to the next page.
+ c.search(key, inodes[index].pgid)
+}
+
+// nsearch searches the leaf node on the top of the stack for a key.
+func (c *Cursor) nsearch(key []byte) {
+ e := &c.stack[len(c.stack)-1]
+ p, n := e.page, e.node
+
+ // If we have a node then search its inodes.
+ if n != nil {
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ return bytes.Compare(n.inodes[i].key, key) != -1
+ })
+ e.index = index
+ return
+ }
+
+ // If we have a page then search its leaf elements.
+ inodes := p.leafPageElements()
+ index := sort.Search(int(p.count), func(i int) bool {
+ return bytes.Compare(inodes[i].key(), key) != -1
+ })
+ e.index = index
+}
+
+// keyValue returns the key and value of the current leaf element.
+func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
+ ref := &c.stack[len(c.stack)-1]
+
+ // If the cursor is pointing to the end of page/node then return nil.
+ if ref.count() == 0 || ref.index >= ref.count() {
+ return nil, nil, 0
+ }
+
+ // Retrieve value from node.
+ if ref.node != nil {
+ inode := &ref.node.inodes[ref.index]
+ return inode.key, inode.value, inode.flags
+ }
+
+ // Or retrieve value from page.
+ elem := ref.page.leafPageElement(uint16(ref.index))
+ return elem.key(), elem.value(), elem.flags
+}
+
+// node returns the node that the cursor is currently positioned on.
+func (c *Cursor) node() *node {
+ _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
+
+ // If the top of the stack is a leaf node then just return it.
+ if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
+ return ref.node
+ }
+
+ // Start from root and traverse down the hierarchy.
+ var n = c.stack[0].node
+ if n == nil {
+ n = c.bucket.node(c.stack[0].page.id, nil)
+ }
+ for _, ref := range c.stack[:len(c.stack)-1] {
+ _assert(!n.isLeaf, "expected branch node")
+ n = n.childAt(ref.index)
+ }
+ _assert(n.isLeaf, "expected leaf node")
+ return n
+}
+
+// elemRef represents a reference to an element on a given page/node.
+type elemRef struct {
+ page *page
+ node *node
+ index int
+}
+
+// isLeaf returns whether the ref is pointing at a leaf page/node.
+func (r *elemRef) isLeaf() bool {
+ if r.node != nil {
+ return r.node.isLeaf
+ }
+ return (r.page.flags & leafPageFlag) != 0
+}
+
+// count returns the number of inodes or page elements.
+func (r *elemRef) count() int {
+ if r.node != nil {
+ return len(r.node.inodes)
+ }
+ return int(r.page.count)
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/db.go b/src/margo.sh/vendor/github.com/coreos/bbolt/db.go
new file mode 100644
index 00000000..80b0095c
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/db.go
@@ -0,0 +1,1174 @@
+package bbolt
+
+import (
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "log"
+ "os"
+ "runtime"
+ "sort"
+ "sync"
+ "time"
+ "unsafe"
+)
+
+// The largest step that can be taken when remapping the mmap.
+const maxMmapStep = 1 << 30 // 1GB
+
+// The data file format version.
+const version = 2
+
+// Represents a marker value to indicate that a file is a Bolt DB.
+const magic uint32 = 0xED0CDAED
+
+const pgidNoFreelist pgid = 0xffffffffffffffff
+
+// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
+// syncing changes to a file. This is required as some operating systems,
+// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
+// must be synchronized using the msync(2) syscall.
+const IgnoreNoSync = runtime.GOOS == "openbsd"
+
+// Default values if not set in a DB instance.
+const (
+ DefaultMaxBatchSize int = 1000
+ DefaultMaxBatchDelay = 10 * time.Millisecond
+ DefaultAllocSize = 16 * 1024 * 1024
+)
+
+// default page size for db is set to the OS page size.
+var defaultPageSize = os.Getpagesize()
+
+// The time elapsed between consecutive file locking attempts.
+const flockRetryTimeout = 50 * time.Millisecond
+
+// FreelistType is the type of the freelist backend
+type FreelistType string
+
+const (
+ // FreelistArrayType indicates backend freelist type is array
+ FreelistArrayType = FreelistType("array")
+ // FreelistMapType indicates backend freelist type is hashmap
+ FreelistMapType = FreelistType("hashmap")
+)
+
+// DB represents a collection of buckets persisted to a file on disk.
+// All data access is performed through transactions which can be obtained through the DB.
+// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
+type DB struct {
+ // When enabled, the database will perform a Check() after every commit.
+ // A panic is issued if the database is in an inconsistent state. This
+ // flag has a large performance impact so it should only be used for
+ // debugging purposes.
+ StrictMode bool
+
+ // Setting the NoSync flag will cause the database to skip fsync()
+ // calls after each commit. This can be useful when bulk loading data
+ // into a database and you can restart the bulk load in the event of
+ // a system failure or database corruption. Do not set this flag for
+ // normal use.
+ //
+ // If the package global IgnoreNoSync constant is true, this value is
+ // ignored. See the comment on that constant for more details.
+ //
+ // THIS IS UNSAFE. PLEASE USE WITH CAUTION.
+ NoSync bool
+
+ // When true, skips syncing freelist to disk. This improves the database
+ // write performance under normal operation, but requires a full database
+ // re-sync during recovery.
+ NoFreelistSync bool
+
+ // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
+ // dramatic performance degradation if database is large and framentation in freelist is common.
+ // The alternative one is using hashmap, it is faster in almost all circumstances
+ // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
+ // The default type is array
+ FreelistType FreelistType
+
+ // When true, skips the truncate call when growing the database.
+ // Setting this to true is only safe on non-ext3/ext4 systems.
+ // Skipping truncation avoids preallocation of hard drive space and
+ // bypasses a truncate() and fsync() syscall on remapping.
+ //
+ // https://github.com/boltdb/bolt/issues/284
+ NoGrowSync bool
+
+ // If you want to read the entire database fast, you can set MmapFlag to
+ // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
+ MmapFlags int
+
+ // MaxBatchSize is the maximum size of a batch. Default value is
+ // copied from DefaultMaxBatchSize in Open.
+ //
+ // If <=0, disables batching.
+ //
+ // Do not change concurrently with calls to Batch.
+ MaxBatchSize int
+
+ // MaxBatchDelay is the maximum delay before a batch starts.
+ // Default value is copied from DefaultMaxBatchDelay in Open.
+ //
+ // If <=0, effectively disables batching.
+ //
+ // Do not change concurrently with calls to Batch.
+ MaxBatchDelay time.Duration
+
+ // AllocSize is the amount of space allocated when the database
+ // needs to create new pages. This is done to amortize the cost
+ // of truncate() and fsync() when growing the data file.
+ AllocSize int
+
+ path string
+ openFile func(string, int, os.FileMode) (*os.File, error)
+ file *os.File
+ dataref []byte // mmap'ed readonly, write throws SEGV
+ data *[maxMapSize]byte
+ datasz int
+ filesz int // current on disk file size
+ meta0 *meta
+ meta1 *meta
+ pageSize int
+ opened bool
+ rwtx *Tx
+ txs []*Tx
+ stats Stats
+
+ freelist *freelist
+ freelistLoad sync.Once
+
+ pagePool sync.Pool
+
+ batchMu sync.Mutex
+ batch *batch
+
+ rwlock sync.Mutex // Allows only one writer at a time.
+ metalock sync.Mutex // Protects meta page access.
+ mmaplock sync.RWMutex // Protects mmap access during remapping.
+ statlock sync.RWMutex // Protects stats access.
+
+ ops struct {
+ writeAt func(b []byte, off int64) (n int, err error)
+ }
+
+ // Read only mode.
+ // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
+ readOnly bool
+}
+
+// Path returns the path to currently open database file.
+func (db *DB) Path() string {
+ return db.path
+}
+
+// GoString returns the Go string representation of the database.
+func (db *DB) GoString() string {
+ return fmt.Sprintf("bolt.DB{path:%q}", db.path)
+}
+
+// String returns the string representation of the database.
+func (db *DB) String() string {
+ return fmt.Sprintf("DB<%q>", db.path)
+}
+
+// Open creates and opens a database at the given path.
+// If the file does not exist then it will be created automatically.
+// Passing in nil options will cause Bolt to open the database with the default options.
+func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
+ db := &DB{
+ opened: true,
+ }
+ // Set default options if no options are provided.
+ if options == nil {
+ options = DefaultOptions
+ }
+ db.NoSync = options.NoSync
+ db.NoGrowSync = options.NoGrowSync
+ db.MmapFlags = options.MmapFlags
+ db.NoFreelistSync = options.NoFreelistSync
+ db.FreelistType = options.FreelistType
+
+ // Set default values for later DB operations.
+ db.MaxBatchSize = DefaultMaxBatchSize
+ db.MaxBatchDelay = DefaultMaxBatchDelay
+ db.AllocSize = DefaultAllocSize
+
+ flag := os.O_RDWR
+ if options.ReadOnly {
+ flag = os.O_RDONLY
+ db.readOnly = true
+ }
+
+ db.openFile = options.OpenFile
+ if db.openFile == nil {
+ db.openFile = os.OpenFile
+ }
+
+ // Open data file and separate sync handler for metadata writes.
+ var err error
+ if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+ db.path = db.file.Name()
+
+ // Lock file so that other processes using Bolt in read-write mode cannot
+ // use the database at the same time. This would cause corruption since
+ // the two processes would write meta pages and free pages separately.
+ // The database file is locked exclusively (only one process can grab the lock)
+ // if !options.ReadOnly.
+ // The database file is locked using the shared lock (more than one process may
+ // hold a lock at the same time) otherwise (options.ReadOnly is set).
+ if err := flock(db, !db.readOnly, options.Timeout); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+
+ // Default values for test hooks
+ db.ops.writeAt = db.file.WriteAt
+
+ if db.pageSize = options.PageSize; db.pageSize == 0 {
+ // Set the default page size to the OS page size.
+ db.pageSize = defaultPageSize
+ }
+
+ // Initialize the database if it doesn't exist.
+ if info, err := db.file.Stat(); err != nil {
+ _ = db.close()
+ return nil, err
+ } else if info.Size() == 0 {
+ // Initialize new files with meta pages.
+ if err := db.init(); err != nil {
+ // clean up file descriptor on initialization fail
+ _ = db.close()
+ return nil, err
+ }
+ } else {
+ // Read the first meta page to determine the page size.
+ var buf [0x1000]byte
+ // If we can't read the page size, but can read a page, assume
+ // it's the same as the OS or one given -- since that's how the
+ // page size was chosen in the first place.
+ //
+ // If the first page is invalid and this OS uses a different
+ // page size than what the database was created with then we
+ // are out of luck and cannot access the database.
+ //
+ // TODO: scan for next page
+ if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
+ if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
+ db.pageSize = int(m.pageSize)
+ }
+ } else {
+ _ = db.close()
+ return nil, ErrInvalid
+ }
+ }
+
+ // Initialize page pool.
+ db.pagePool = sync.Pool{
+ New: func() interface{} {
+ return make([]byte, db.pageSize)
+ },
+ }
+
+ // Memory map the data file.
+ if err := db.mmap(options.InitialMmapSize); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+
+ if db.readOnly {
+ return db, nil
+ }
+
+ db.loadFreelist()
+
+ // Flush freelist when transitioning from no sync to sync so
+ // NoFreelistSync unaware boltdb can open the db later.
+ if !db.NoFreelistSync && !db.hasSyncedFreelist() {
+ tx, err := db.Begin(true)
+ if tx != nil {
+ err = tx.Commit()
+ }
+ if err != nil {
+ _ = db.close()
+ return nil, err
+ }
+ }
+
+ // Mark the database as opened and return.
+ return db, nil
+}
+
+// loadFreelist reads the freelist if it is synced, or reconstructs it
+// by scanning the DB if it is not synced. It assumes there are no
+// concurrent accesses being made to the freelist.
+func (db *DB) loadFreelist() {
+ db.freelistLoad.Do(func() {
+ db.freelist = newFreelist(db.FreelistType)
+ if !db.hasSyncedFreelist() {
+ // Reconstruct free list by scanning the DB.
+ db.freelist.readIDs(db.freepages())
+ } else {
+ // Read free list from freelist page.
+ db.freelist.read(db.page(db.meta().freelist))
+ }
+ db.stats.FreePageN = db.freelist.free_count()
+ })
+}
+
+func (db *DB) hasSyncedFreelist() bool {
+ return db.meta().freelist != pgidNoFreelist
+}
+
+// mmap opens the underlying memory-mapped file and initializes the meta references.
+// minsz is the minimum size that the new mmap can be.
+func (db *DB) mmap(minsz int) error {
+ db.mmaplock.Lock()
+ defer db.mmaplock.Unlock()
+
+ info, err := db.file.Stat()
+ if err != nil {
+ return fmt.Errorf("mmap stat error: %s", err)
+ } else if int(info.Size()) < db.pageSize*2 {
+ return fmt.Errorf("file size too small")
+ }
+
+ // Ensure the size is at least the minimum size.
+ var size = int(info.Size())
+ if size < minsz {
+ size = minsz
+ }
+ size, err = db.mmapSize(size)
+ if err != nil {
+ return err
+ }
+
+ // Dereference all mmap references before unmapping.
+ if db.rwtx != nil {
+ db.rwtx.root.dereference()
+ }
+
+ // Unmap existing data before continuing.
+ if err := db.munmap(); err != nil {
+ return err
+ }
+
+ // Memory-map the data file as a byte slice.
+ if err := mmap(db, size); err != nil {
+ return err
+ }
+
+ // Save references to the meta pages.
+ db.meta0 = db.page(0).meta()
+ db.meta1 = db.page(1).meta()
+
+ // Validate the meta pages. We only return an error if both meta pages fail
+ // validation, since meta0 failing validation means that it wasn't saved
+ // properly -- but we can recover using meta1. And vice-versa.
+ err0 := db.meta0.validate()
+ err1 := db.meta1.validate()
+ if err0 != nil && err1 != nil {
+ return err0
+ }
+
+ return nil
+}
+
+// munmap unmaps the data file from memory.
+func (db *DB) munmap() error {
+ if err := munmap(db); err != nil {
+ return fmt.Errorf("unmap error: " + err.Error())
+ }
+ return nil
+}
+
+// mmapSize determines the appropriate size for the mmap given the current size
+// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
+// Returns an error if the new mmap size is greater than the max allowed.
+func (db *DB) mmapSize(size int) (int, error) {
+ // Double the size from 32KB until 1GB.
+ for i := uint(15); i <= 30; i++ {
+ if size <= 1< maxMapSize {
+ return 0, fmt.Errorf("mmap too large")
+ }
+
+ // If larger than 1GB then grow by 1GB at a time.
+ sz := int64(size)
+ if remainder := sz % int64(maxMmapStep); remainder > 0 {
+ sz += int64(maxMmapStep) - remainder
+ }
+
+ // Ensure that the mmap size is a multiple of the page size.
+ // This should always be true since we're incrementing in MBs.
+ pageSize := int64(db.pageSize)
+ if (sz % pageSize) != 0 {
+ sz = ((sz / pageSize) + 1) * pageSize
+ }
+
+ // If we've exceeded the max size then only grow up to the max size.
+ if sz > maxMapSize {
+ sz = maxMapSize
+ }
+
+ return int(sz), nil
+}
+
+// init creates a new database file and initializes its meta pages.
+func (db *DB) init() error {
+ // Create two meta pages on a buffer.
+ buf := make([]byte, db.pageSize*4)
+ for i := 0; i < 2; i++ {
+ p := db.pageInBuffer(buf[:], pgid(i))
+ p.id = pgid(i)
+ p.flags = metaPageFlag
+
+ // Initialize the meta page.
+ m := p.meta()
+ m.magic = magic
+ m.version = version
+ m.pageSize = uint32(db.pageSize)
+ m.freelist = 2
+ m.root = bucket{root: 3}
+ m.pgid = 4
+ m.txid = txid(i)
+ m.checksum = m.sum64()
+ }
+
+ // Write an empty freelist at page 3.
+ p := db.pageInBuffer(buf[:], pgid(2))
+ p.id = pgid(2)
+ p.flags = freelistPageFlag
+ p.count = 0
+
+ // Write an empty leaf page at page 4.
+ p = db.pageInBuffer(buf[:], pgid(3))
+ p.id = pgid(3)
+ p.flags = leafPageFlag
+ p.count = 0
+
+ // Write the buffer to our data file.
+ if _, err := db.ops.writeAt(buf, 0); err != nil {
+ return err
+ }
+ if err := fdatasync(db); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Close releases all database resources.
+// It will block waiting for any open transactions to finish
+// before closing the database and returning.
+func (db *DB) Close() error {
+ db.rwlock.Lock()
+ defer db.rwlock.Unlock()
+
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
+
+ db.mmaplock.Lock()
+ defer db.mmaplock.Unlock()
+
+ return db.close()
+}
+
+func (db *DB) close() error {
+ if !db.opened {
+ return nil
+ }
+
+ db.opened = false
+
+ db.freelist = nil
+
+ // Clear ops.
+ db.ops.writeAt = nil
+
+ // Close the mmap.
+ if err := db.munmap(); err != nil {
+ return err
+ }
+
+ // Close file handles.
+ if db.file != nil {
+ // No need to unlock read-only file.
+ if !db.readOnly {
+ // Unlock the file.
+ if err := funlock(db); err != nil {
+ log.Printf("bolt.Close(): funlock error: %s", err)
+ }
+ }
+
+ // Close the file descriptor.
+ if err := db.file.Close(); err != nil {
+ return fmt.Errorf("db file close: %s", err)
+ }
+ db.file = nil
+ }
+
+ db.path = ""
+ return nil
+}
+
+// Begin starts a new transaction.
+// Multiple read-only transactions can be used concurrently but only one
+// write transaction can be used at a time. Starting multiple write transactions
+// will cause the calls to block and be serialized until the current write
+// transaction finishes.
+//
+// Transactions should not be dependent on one another. Opening a read
+// transaction and a write transaction in the same goroutine can cause the
+// writer to deadlock because the database periodically needs to re-mmap itself
+// as it grows and it cannot do that while a read transaction is open.
+//
+// If a long running read transaction (for example, a snapshot transaction) is
+// needed, you might want to set DB.InitialMmapSize to a large enough value
+// to avoid potential blocking of write transaction.
+//
+// IMPORTANT: You must close read-only transactions after you are finished or
+// else the database will not reclaim old pages.
+func (db *DB) Begin(writable bool) (*Tx, error) {
+ if writable {
+ return db.beginRWTx()
+ }
+ return db.beginTx()
+}
+
+func (db *DB) beginTx() (*Tx, error) {
+ // Lock the meta pages while we initialize the transaction. We obtain
+ // the meta lock before the mmap lock because that's the order that the
+ // write transaction will obtain them.
+ db.metalock.Lock()
+
+ // Obtain a read-only lock on the mmap. When the mmap is remapped it will
+ // obtain a write lock so all transactions must finish before it can be
+ // remapped.
+ db.mmaplock.RLock()
+
+ // Exit if the database is not open yet.
+ if !db.opened {
+ db.mmaplock.RUnlock()
+ db.metalock.Unlock()
+ return nil, ErrDatabaseNotOpen
+ }
+
+ // Create a transaction associated with the database.
+ t := &Tx{}
+ t.init(db)
+
+ // Keep track of transaction until it closes.
+ db.txs = append(db.txs, t)
+ n := len(db.txs)
+
+ // Unlock the meta pages.
+ db.metalock.Unlock()
+
+ // Update the transaction stats.
+ db.statlock.Lock()
+ db.stats.TxN++
+ db.stats.OpenTxN = n
+ db.statlock.Unlock()
+
+ return t, nil
+}
+
+func (db *DB) beginRWTx() (*Tx, error) {
+ // If the database was opened with Options.ReadOnly, return an error.
+ if db.readOnly {
+ return nil, ErrDatabaseReadOnly
+ }
+
+ // Obtain writer lock. This is released by the transaction when it closes.
+ // This enforces only one writer transaction at a time.
+ db.rwlock.Lock()
+
+ // Once we have the writer lock then we can lock the meta pages so that
+ // we can set up the transaction.
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
+
+ // Exit if the database is not open yet.
+ if !db.opened {
+ db.rwlock.Unlock()
+ return nil, ErrDatabaseNotOpen
+ }
+
+ // Create a transaction associated with the database.
+ t := &Tx{writable: true}
+ t.init(db)
+ db.rwtx = t
+ db.freePages()
+ return t, nil
+}
+
+// freePages releases any pages associated with closed read-only transactions.
+func (db *DB) freePages() {
+ // Free all pending pages prior to earliest open transaction.
+ sort.Sort(txsById(db.txs))
+ minid := txid(0xFFFFFFFFFFFFFFFF)
+ if len(db.txs) > 0 {
+ minid = db.txs[0].meta.txid
+ }
+ if minid > 0 {
+ db.freelist.release(minid - 1)
+ }
+ // Release unused txid extents.
+ for _, t := range db.txs {
+ db.freelist.releaseRange(minid, t.meta.txid-1)
+ minid = t.meta.txid + 1
+ }
+ db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF))
+ // Any page both allocated and freed in an extent is safe to release.
+}
+
+type txsById []*Tx
+
+func (t txsById) Len() int { return len(t) }
+func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid }
+
+// removeTx removes a transaction from the database.
+func (db *DB) removeTx(tx *Tx) {
+ // Release the read lock on the mmap.
+ db.mmaplock.RUnlock()
+
+ // Use the meta lock to restrict access to the DB object.
+ db.metalock.Lock()
+
+ // Remove the transaction.
+ for i, t := range db.txs {
+ if t == tx {
+ last := len(db.txs) - 1
+ db.txs[i] = db.txs[last]
+ db.txs[last] = nil
+ db.txs = db.txs[:last]
+ break
+ }
+ }
+ n := len(db.txs)
+
+ // Unlock the meta pages.
+ db.metalock.Unlock()
+
+ // Merge statistics.
+ db.statlock.Lock()
+ db.stats.OpenTxN = n
+ db.stats.TxStats.add(&tx.stats)
+ db.statlock.Unlock()
+}
+
+// Update executes a function within the context of a read-write managed transaction.
+// If no error is returned from the function then the transaction is committed.
+// If an error is returned then the entire transaction is rolled back.
+// Any error that is returned from the function or returned from the commit is
+// returned from the Update() method.
+//
+// Attempting to manually commit or rollback within the function will cause a panic.
+func (db *DB) Update(fn func(*Tx) error) error {
+ t, err := db.Begin(true)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the transaction rolls back in the event of a panic.
+ defer func() {
+ if t.db != nil {
+ t.rollback()
+ }
+ }()
+
+ // Mark as a managed tx so that the inner function cannot manually commit.
+ t.managed = true
+
+ // If an error is returned from the function then rollback and return error.
+ err = fn(t)
+ t.managed = false
+ if err != nil {
+ _ = t.Rollback()
+ return err
+ }
+
+ return t.Commit()
+}
+
+// View executes a function within the context of a managed read-only transaction.
+// Any error that is returned from the function is returned from the View() method.
+//
+// Attempting to manually rollback within the function will cause a panic.
+func (db *DB) View(fn func(*Tx) error) error {
+ t, err := db.Begin(false)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the transaction rolls back in the event of a panic.
+ defer func() {
+ if t.db != nil {
+ t.rollback()
+ }
+ }()
+
+ // Mark as a managed tx so that the inner function cannot manually rollback.
+ t.managed = true
+
+ // If an error is returned from the function then pass it through.
+ err = fn(t)
+ t.managed = false
+ if err != nil {
+ _ = t.Rollback()
+ return err
+ }
+
+ return t.Rollback()
+}
+
+// Batch calls fn as part of a batch. It behaves similar to Update,
+// except:
+//
+// 1. concurrent Batch calls can be combined into a single Bolt
+// transaction.
+//
+// 2. the function passed to Batch may be called multiple times,
+// regardless of whether it returns error or not.
+//
+// This means that Batch function side effects must be idempotent and
+// take permanent effect only after a successful return is seen in
+// caller.
+//
+// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
+// and DB.MaxBatchDelay, respectively.
+//
+// Batch is only useful when there are multiple goroutines calling it.
+func (db *DB) Batch(fn func(*Tx) error) error {
+ errCh := make(chan error, 1)
+
+ db.batchMu.Lock()
+ if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
+ // There is no existing batch, or the existing batch is full; start a new one.
+ db.batch = &batch{
+ db: db,
+ }
+ db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
+ }
+ db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
+ if len(db.batch.calls) >= db.MaxBatchSize {
+ // wake up batch, it's ready to run
+ go db.batch.trigger()
+ }
+ db.batchMu.Unlock()
+
+ err := <-errCh
+ if err == trySolo {
+ err = db.Update(fn)
+ }
+ return err
+}
+
+type call struct {
+ fn func(*Tx) error
+ err chan<- error
+}
+
+type batch struct {
+ db *DB
+ timer *time.Timer
+ start sync.Once
+ calls []call
+}
+
+// trigger runs the batch if it hasn't already been run.
+func (b *batch) trigger() {
+ b.start.Do(b.run)
+}
+
+// run performs the transactions in the batch and communicates results
+// back to DB.Batch.
+func (b *batch) run() {
+ b.db.batchMu.Lock()
+ b.timer.Stop()
+ // Make sure no new work is added to this batch, but don't break
+ // other batches.
+ if b.db.batch == b {
+ b.db.batch = nil
+ }
+ b.db.batchMu.Unlock()
+
+retry:
+ for len(b.calls) > 0 {
+ var failIdx = -1
+ err := b.db.Update(func(tx *Tx) error {
+ for i, c := range b.calls {
+ if err := safelyCall(c.fn, tx); err != nil {
+ failIdx = i
+ return err
+ }
+ }
+ return nil
+ })
+
+ if failIdx >= 0 {
+ // take the failing transaction out of the batch. it's
+ // safe to shorten b.calls here because db.batch no longer
+ // points to us, and we hold the mutex anyway.
+ c := b.calls[failIdx]
+ b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
+ // tell the submitter re-run it solo, continue with the rest of the batch
+ c.err <- trySolo
+ continue retry
+ }
+
+ // pass success, or bolt internal errors, to all callers
+ for _, c := range b.calls {
+ c.err <- err
+ }
+ break retry
+ }
+}
+
+// trySolo is a special sentinel error value used for signaling that a
+// transaction function should be re-run. It should never be seen by
+// callers.
+var trySolo = errors.New("batch function returned an error and should be re-run solo")
+
+type panicked struct {
+ reason interface{}
+}
+
+func (p panicked) Error() string {
+ if err, ok := p.reason.(error); ok {
+ return err.Error()
+ }
+ return fmt.Sprintf("panic: %v", p.reason)
+}
+
+func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
+ defer func() {
+ if p := recover(); p != nil {
+ err = panicked{p}
+ }
+ }()
+ return fn(tx)
+}
+
+// Sync executes fdatasync() against the database file handle.
+//
+// This is not necessary under normal operation, however, if you use NoSync
+// then it allows you to force the database file to sync against the disk.
+func (db *DB) Sync() error { return fdatasync(db) }
+
+// Stats retrieves ongoing performance stats for the database.
+// This is only updated when a transaction closes.
+func (db *DB) Stats() Stats {
+ db.statlock.RLock()
+ defer db.statlock.RUnlock()
+ return db.stats
+}
+
+// This is for internal access to the raw data bytes from the C cursor, use
+// carefully, or not at all.
+func (db *DB) Info() *Info {
+ return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
+}
+
+// page retrieves a page reference from the mmap based on the current page size.
+func (db *DB) page(id pgid) *page {
+ pos := id * pgid(db.pageSize)
+ return (*page)(unsafe.Pointer(&db.data[pos]))
+}
+
+// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
+func (db *DB) pageInBuffer(b []byte, id pgid) *page {
+ return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)]))
+}
+
+// meta retrieves the current meta page reference.
+func (db *DB) meta() *meta {
+ // We have to return the meta with the highest txid which doesn't fail
+ // validation. Otherwise, we can cause errors when in fact the database is
+ // in a consistent state. metaA is the one with the higher txid.
+ metaA := db.meta0
+ metaB := db.meta1
+ if db.meta1.txid > db.meta0.txid {
+ metaA = db.meta1
+ metaB = db.meta0
+ }
+
+ // Use higher meta page if valid. Otherwise fallback to previous, if valid.
+ if err := metaA.validate(); err == nil {
+ return metaA
+ } else if err := metaB.validate(); err == nil {
+ return metaB
+ }
+
+ // This should never be reached, because both meta1 and meta0 were validated
+ // on mmap() and we do fsync() on every write.
+ panic("bolt.DB.meta(): invalid meta pages")
+}
+
+// allocate returns a contiguous block of memory starting at a given page.
+func (db *DB) allocate(txid txid, count int) (*page, error) {
+ // Allocate a temporary buffer for the page.
+ var buf []byte
+ if count == 1 {
+ buf = db.pagePool.Get().([]byte)
+ } else {
+ buf = make([]byte, count*db.pageSize)
+ }
+ p := (*page)(unsafe.Pointer(&buf[0]))
+ p.overflow = uint32(count - 1)
+
+ // Use pages from the freelist if they are available.
+ if p.id = db.freelist.allocate(txid, count); p.id != 0 {
+ return p, nil
+ }
+
+ // Resize mmap() if we're at the end.
+ p.id = db.rwtx.meta.pgid
+ var minsz = int((p.id+pgid(count))+1) * db.pageSize
+ if minsz >= db.datasz {
+ if err := db.mmap(minsz); err != nil {
+ return nil, fmt.Errorf("mmap allocate error: %s", err)
+ }
+ }
+
+ // Move the page id high water mark.
+ db.rwtx.meta.pgid += pgid(count)
+
+ return p, nil
+}
+
+// grow grows the size of the database to the given sz.
+func (db *DB) grow(sz int) error {
+ // Ignore if the new size is less than available file size.
+ if sz <= db.filesz {
+ return nil
+ }
+
+ // If the data is smaller than the alloc size then only allocate what's needed.
+ // Once it goes over the allocation size then allocate in chunks.
+ if db.datasz < db.AllocSize {
+ sz = db.datasz
+ } else {
+ sz += db.AllocSize
+ }
+
+ // Truncate and fsync to ensure file size metadata is flushed.
+ // https://github.com/boltdb/bolt/issues/284
+ if !db.NoGrowSync && !db.readOnly {
+ if runtime.GOOS != "windows" {
+ if err := db.file.Truncate(int64(sz)); err != nil {
+ return fmt.Errorf("file resize error: %s", err)
+ }
+ }
+ if err := db.file.Sync(); err != nil {
+ return fmt.Errorf("file sync error: %s", err)
+ }
+ }
+
+ db.filesz = sz
+ return nil
+}
+
+func (db *DB) IsReadOnly() bool {
+ return db.readOnly
+}
+
+func (db *DB) freepages() []pgid {
+ tx, err := db.beginTx()
+ defer func() {
+ err = tx.Rollback()
+ if err != nil {
+ panic("freepages: failed to rollback tx")
+ }
+ }()
+ if err != nil {
+ panic("freepages: failed to open read only tx")
+ }
+
+ reachable := make(map[pgid]*page)
+ nofreed := make(map[pgid]bool)
+ ech := make(chan error)
+ go func() {
+ for e := range ech {
+ panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
+ }
+ }()
+ tx.checkBucket(&tx.root, reachable, nofreed, ech)
+ close(ech)
+
+ var fids []pgid
+ for i := pgid(2); i < db.meta().pgid; i++ {
+ if _, ok := reachable[i]; !ok {
+ fids = append(fids, i)
+ }
+ }
+ return fids
+}
+
+// Options represents the options that can be set when opening a database.
+type Options struct {
+ // Timeout is the amount of time to wait to obtain a file lock.
+ // When set to zero it will wait indefinitely. This option is only
+ // available on Darwin and Linux.
+ Timeout time.Duration
+
+ // Sets the DB.NoGrowSync flag before memory mapping the file.
+ NoGrowSync bool
+
+ // Do not sync freelist to disk. This improves the database write performance
+ // under normal operation, but requires a full database re-sync during recovery.
+ NoFreelistSync bool
+
+ // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
+ // dramatic performance degradation if database is large and framentation in freelist is common.
+ // The alternative one is using hashmap, it is faster in almost all circumstances
+ // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
+ // The default type is array
+ FreelistType FreelistType
+
+ // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
+ // grab a shared lock (UNIX).
+ ReadOnly bool
+
+ // Sets the DB.MmapFlags flag before memory mapping the file.
+ MmapFlags int
+
+ // InitialMmapSize is the initial mmap size of the database
+ // in bytes. Read transactions won't block write transaction
+ // if the InitialMmapSize is large enough to hold database mmap
+ // size. (See DB.Begin for more information)
+ //
+ // If <=0, the initial map size is 0.
+ // If initialMmapSize is smaller than the previous database size,
+ // it takes no effect.
+ InitialMmapSize int
+
+ // PageSize overrides the default OS page size.
+ PageSize int
+
+ // NoSync sets the initial value of DB.NoSync. Normally this can just be
+ // set directly on the DB itself when returned from Open(), but this option
+ // is useful in APIs which expose Options but not the underlying DB.
+ NoSync bool
+
+ // OpenFile is used to open files. It defaults to os.OpenFile. This option
+ // is useful for writing hermetic tests.
+ OpenFile func(string, int, os.FileMode) (*os.File, error)
+}
+
+// DefaultOptions represent the options used if nil options are passed into Open().
+// No timeout is used which will cause Bolt to wait indefinitely for a lock.
+var DefaultOptions = &Options{
+ Timeout: 0,
+ NoGrowSync: false,
+ FreelistType: FreelistArrayType,
+}
+
+// Stats represents statistics about the database.
+type Stats struct {
+ // Freelist stats
+ FreePageN int // total number of free pages on the freelist
+ PendingPageN int // total number of pending pages on the freelist
+ FreeAlloc int // total bytes allocated in free pages
+ FreelistInuse int // total bytes used by the freelist
+
+ // Transaction stats
+ TxN int // total number of started read transactions
+ OpenTxN int // number of currently open read transactions
+
+ TxStats TxStats // global, ongoing stats.
+}
+
+// Sub calculates and returns the difference between two sets of database stats.
+// This is useful when obtaining stats at two different points and time and
+// you need the performance counters that occurred within that time span.
+func (s *Stats) Sub(other *Stats) Stats {
+ if other == nil {
+ return *s
+ }
+ var diff Stats
+ diff.FreePageN = s.FreePageN
+ diff.PendingPageN = s.PendingPageN
+ diff.FreeAlloc = s.FreeAlloc
+ diff.FreelistInuse = s.FreelistInuse
+ diff.TxN = s.TxN - other.TxN
+ diff.TxStats = s.TxStats.Sub(&other.TxStats)
+ return diff
+}
+
+type Info struct {
+ Data uintptr
+ PageSize int
+}
+
+type meta struct {
+ magic uint32
+ version uint32
+ pageSize uint32
+ flags uint32
+ root bucket
+ freelist pgid
+ pgid pgid
+ txid txid
+ checksum uint64
+}
+
+// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
+func (m *meta) validate() error {
+ if m.magic != magic {
+ return ErrInvalid
+ } else if m.version != version {
+ return ErrVersionMismatch
+ } else if m.checksum != 0 && m.checksum != m.sum64() {
+ return ErrChecksum
+ }
+ return nil
+}
+
+// copy copies one meta object to another.
+func (m *meta) copy(dest *meta) {
+ *dest = *m
+}
+
+// write writes the meta onto a page.
+func (m *meta) write(p *page) {
+ if m.root.root >= m.pgid {
+ panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
+ } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist {
+ // TODO: reject pgidNoFreeList if !NoFreelistSync
+ panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
+ }
+
+ // Page id is either going to be 0 or 1 which we can determine by the transaction ID.
+ p.id = pgid(m.txid % 2)
+ p.flags |= metaPageFlag
+
+ // Calculate the checksum.
+ m.checksum = m.sum64()
+
+ m.copy(p.meta())
+}
+
+// generates the checksum for the meta.
+func (m *meta) sum64() uint64 {
+ var h = fnv.New64a()
+ _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
+ return h.Sum64()
+}
+
+// _assert will panic with a given formatted message if the given condition is false.
+func _assert(condition bool, msg string, v ...interface{}) {
+ if !condition {
+ panic(fmt.Sprintf("assertion failed: "+msg, v...))
+ }
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/doc.go b/src/margo.sh/vendor/github.com/coreos/bbolt/doc.go
new file mode 100644
index 00000000..95f25f01
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/doc.go
@@ -0,0 +1,44 @@
+/*
+package bbolt implements a low-level key/value store in pure Go. It supports
+fully serializable transactions, ACID semantics, and lock-free MVCC with
+multiple readers and a single writer. Bolt can be used for projects that
+want a simple data store without the need to add large dependencies such as
+Postgres or MySQL.
+
+Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
+optimized for fast read access and does not require recovery in the event of a
+system crash. Transactions which have not finished committing will simply be
+rolled back in the event of a crash.
+
+The design of Bolt is based on Howard Chu's LMDB database project.
+
+Bolt currently works on Windows, Mac OS X, and Linux.
+
+
+Basics
+
+There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
+a collection of buckets and is represented by a single file on disk. A bucket is
+a collection of unique keys that are associated with values.
+
+Transactions provide either read-only or read-write access to the database.
+Read-only transactions can retrieve key/value pairs and can use Cursors to
+iterate over the dataset sequentially. Read-write transactions can create and
+delete buckets and can insert and remove keys. Only one read-write transaction
+is allowed at a time.
+
+
+Caveats
+
+The database uses a read-only, memory-mapped data file to ensure that
+applications cannot corrupt the database, however, this means that keys and
+values returned from Bolt cannot be changed. Writing to a read-only byte slice
+will cause Go to panic.
+
+Keys and values retrieved from the database are only valid for the life of
+the transaction. When used outside the transaction, these byte slices can
+point to different data or can point to invalid memory which will cause a panic.
+
+
+*/
+package bbolt
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/errors.go b/src/margo.sh/vendor/github.com/coreos/bbolt/errors.go
new file mode 100644
index 00000000..48758ca5
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/errors.go
@@ -0,0 +1,71 @@
+package bbolt
+
+import "errors"
+
+// These errors can be returned when opening or calling methods on a DB.
+var (
+ // ErrDatabaseNotOpen is returned when a DB instance is accessed before it
+ // is opened or after it is closed.
+ ErrDatabaseNotOpen = errors.New("database not open")
+
+ // ErrDatabaseOpen is returned when opening a database that is
+ // already open.
+ ErrDatabaseOpen = errors.New("database already open")
+
+ // ErrInvalid is returned when both meta pages on a database are invalid.
+ // This typically occurs when a file is not a bolt database.
+ ErrInvalid = errors.New("invalid database")
+
+ // ErrVersionMismatch is returned when the data file was created with a
+ // different version of Bolt.
+ ErrVersionMismatch = errors.New("version mismatch")
+
+ // ErrChecksum is returned when either meta page checksum does not match.
+ ErrChecksum = errors.New("checksum error")
+
+ // ErrTimeout is returned when a database cannot obtain an exclusive lock
+ // on the data file after the timeout passed to Open().
+ ErrTimeout = errors.New("timeout")
+)
+
+// These errors can occur when beginning or committing a Tx.
+var (
+ // ErrTxNotWritable is returned when performing a write operation on a
+ // read-only transaction.
+ ErrTxNotWritable = errors.New("tx not writable")
+
+ // ErrTxClosed is returned when committing or rolling back a transaction
+ // that has already been committed or rolled back.
+ ErrTxClosed = errors.New("tx closed")
+
+ // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
+ // read-only database.
+ ErrDatabaseReadOnly = errors.New("database is in read-only mode")
+)
+
+// These errors can occur when putting or deleting a value or a bucket.
+var (
+ // ErrBucketNotFound is returned when trying to access a bucket that has
+ // not been created yet.
+ ErrBucketNotFound = errors.New("bucket not found")
+
+ // ErrBucketExists is returned when creating a bucket that already exists.
+ ErrBucketExists = errors.New("bucket already exists")
+
+ // ErrBucketNameRequired is returned when creating a bucket with a blank name.
+ ErrBucketNameRequired = errors.New("bucket name required")
+
+ // ErrKeyRequired is returned when inserting a zero-length key.
+ ErrKeyRequired = errors.New("key required")
+
+ // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
+ ErrKeyTooLarge = errors.New("key too large")
+
+ // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
+ ErrValueTooLarge = errors.New("value too large")
+
+ // ErrIncompatibleValue is returned when trying create or delete a bucket
+ // on an existing non-bucket key or when trying to create or delete a
+ // non-bucket key on an existing bucket key.
+ ErrIncompatibleValue = errors.New("incompatible value")
+)
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/freelist.go b/src/margo.sh/vendor/github.com/coreos/bbolt/freelist.go
new file mode 100644
index 00000000..d441b692
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/freelist.go
@@ -0,0 +1,413 @@
+package bbolt
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "unsafe"
+)
+
+// txPending holds a list of pgids and corresponding allocation txns
+// that are pending to be freed.
+type txPending struct {
+ ids []pgid
+ alloctx []txid // txids allocating the ids
+ lastReleaseBegin txid // beginning txid of last matching releaseRange
+}
+
+// pidSet holds the set of starting pgids which have the same span size
+type pidSet map[pgid]struct{}
+
+// freelist represents a list of all pages that are available for allocation.
+// It also tracks pages that have been freed but are still in use by open transactions.
+type freelist struct {
+ freelistType FreelistType // freelist type
+ ids []pgid // all free and available free page ids.
+ allocs map[pgid]txid // mapping of txid that allocated a pgid.
+ pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
+ cache map[pgid]bool // fast lookup of all free and pending page ids.
+ freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
+ forwardMap map[pgid]uint64 // key is start pgid, value is its span size
+ backwardMap map[pgid]uint64 // key is end pgid, value is its span size
+ allocate func(txid txid, n int) pgid // the freelist allocate func
+ free_count func() int // the function which gives you free page number
+ mergeSpans func(ids pgids) // the mergeSpan func
+ getFreePageIDs func() []pgid // get free pgids func
+ readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist
+}
+
+// newFreelist returns an empty, initialized freelist.
+func newFreelist(freelistType FreelistType) *freelist {
+ f := &freelist{
+ freelistType: freelistType,
+ allocs: make(map[pgid]txid),
+ pending: make(map[txid]*txPending),
+ cache: make(map[pgid]bool),
+ freemaps: make(map[uint64]pidSet),
+ forwardMap: make(map[pgid]uint64),
+ backwardMap: make(map[pgid]uint64),
+ }
+
+ if freelistType == FreelistMapType {
+ f.allocate = f.hashmapAllocate
+ f.free_count = f.hashmapFreeCount
+ f.mergeSpans = f.hashmapMergeSpans
+ f.getFreePageIDs = f.hashmapGetFreePageIDs
+ f.readIDs = f.hashmapReadIDs
+ } else {
+ f.allocate = f.arrayAllocate
+ f.free_count = f.arrayFreeCount
+ f.mergeSpans = f.arrayMergeSpans
+ f.getFreePageIDs = f.arrayGetFreePageIDs
+ f.readIDs = f.arrayReadIDs
+ }
+
+ return f
+}
+
+// size returns the size of the page after serialization.
+func (f *freelist) size() int {
+ n := f.count()
+ if n >= 0xFFFF {
+ // The first element will be used to store the count. See freelist.write.
+ n++
+ }
+ return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n)
+}
+
+// count returns count of pages on the freelist
+func (f *freelist) count() int {
+ return f.free_count() + f.pending_count()
+}
+
+// arrayFreeCount returns count of free pages(array version)
+func (f *freelist) arrayFreeCount() int {
+ return len(f.ids)
+}
+
+// pending_count returns count of pending pages
+func (f *freelist) pending_count() int {
+ var count int
+ for _, txp := range f.pending {
+ count += len(txp.ids)
+ }
+ return count
+}
+
+// copyallunsafe copies a list of all free ids and all pending ids in one sorted list.
+// f.count returns the minimum length required for dst.
+func (f *freelist) copyallunsafe(dstptr unsafe.Pointer) { // dstptr is []pgid data pointer
+ m := make(pgids, 0, f.pending_count())
+ for _, txp := range f.pending {
+ m = append(m, txp.ids...)
+ }
+ sort.Sort(m)
+ fpgids := f.getFreePageIDs()
+ sz := len(fpgids) + len(m)
+ dst := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: uintptr(dstptr),
+ Len: sz,
+ Cap: sz,
+ }))
+ mergepgids(dst, fpgids, m)
+}
+
+func (f *freelist) copyall(dst []pgid) {
+ m := make(pgids, 0, f.pending_count())
+ for _, txp := range f.pending {
+ m = append(m, txp.ids...)
+ }
+ sort.Sort(m)
+ mergepgids(dst, f.getFreePageIDs(), m)
+}
+
+// arrayAllocate returns the starting page id of a contiguous list of pages of a given size.
+// If a contiguous block cannot be found then 0 is returned.
+func (f *freelist) arrayAllocate(txid txid, n int) pgid {
+ if len(f.ids) == 0 {
+ return 0
+ }
+
+ var initial, previd pgid
+ for i, id := range f.ids {
+ if id <= 1 {
+ panic(fmt.Sprintf("invalid page allocation: %d", id))
+ }
+
+ // Reset initial page if this is not contiguous.
+ if previd == 0 || id-previd != 1 {
+ initial = id
+ }
+
+ // If we found a contiguous block then remove it and return it.
+ if (id-initial)+1 == pgid(n) {
+ // If we're allocating off the beginning then take the fast path
+ // and just adjust the existing slice. This will use extra memory
+ // temporarily but the append() in free() will realloc the slice
+ // as is necessary.
+ if (i + 1) == n {
+ f.ids = f.ids[i+1:]
+ } else {
+ copy(f.ids[i-n+1:], f.ids[i+1:])
+ f.ids = f.ids[:len(f.ids)-n]
+ }
+
+ // Remove from the free cache.
+ for i := pgid(0); i < pgid(n); i++ {
+ delete(f.cache, initial+i)
+ }
+ f.allocs[initial] = txid
+ return initial
+ }
+
+ previd = id
+ }
+ return 0
+}
+
+// free releases a page and its overflow for a given transaction id.
+// If the page is already free then a panic will occur.
+func (f *freelist) free(txid txid, p *page) {
+ if p.id <= 1 {
+ panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
+ }
+
+ // Free page and all its overflow pages.
+ txp := f.pending[txid]
+ if txp == nil {
+ txp = &txPending{}
+ f.pending[txid] = txp
+ }
+ allocTxid, ok := f.allocs[p.id]
+ if ok {
+ delete(f.allocs, p.id)
+ } else if (p.flags & freelistPageFlag) != 0 {
+ // Freelist is always allocated by prior tx.
+ allocTxid = txid - 1
+ }
+
+ for id := p.id; id <= p.id+pgid(p.overflow); id++ {
+ // Verify that page is not already free.
+ if f.cache[id] {
+ panic(fmt.Sprintf("page %d already freed", id))
+ }
+ // Add to the freelist and cache.
+ txp.ids = append(txp.ids, id)
+ txp.alloctx = append(txp.alloctx, allocTxid)
+ f.cache[id] = true
+ }
+}
+
+// release moves all page ids for a transaction id (or older) to the freelist.
+func (f *freelist) release(txid txid) {
+ m := make(pgids, 0)
+ for tid, txp := range f.pending {
+ if tid <= txid {
+ // Move transaction's pending pages to the available freelist.
+ // Don't remove from the cache since the page is still free.
+ m = append(m, txp.ids...)
+ delete(f.pending, tid)
+ }
+ }
+ f.mergeSpans(m)
+}
+
+// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
+func (f *freelist) releaseRange(begin, end txid) {
+ if begin > end {
+ return
+ }
+ var m pgids
+ for tid, txp := range f.pending {
+ if tid < begin || tid > end {
+ continue
+ }
+ // Don't recompute freed pages if ranges haven't updated.
+ if txp.lastReleaseBegin == begin {
+ continue
+ }
+ for i := 0; i < len(txp.ids); i++ {
+ if atx := txp.alloctx[i]; atx < begin || atx > end {
+ continue
+ }
+ m = append(m, txp.ids[i])
+ txp.ids[i] = txp.ids[len(txp.ids)-1]
+ txp.ids = txp.ids[:len(txp.ids)-1]
+ txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
+ txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
+ i--
+ }
+ txp.lastReleaseBegin = begin
+ if len(txp.ids) == 0 {
+ delete(f.pending, tid)
+ }
+ }
+ f.mergeSpans(m)
+}
+
+// rollback removes the pages from a given pending tx.
+func (f *freelist) rollback(txid txid) {
+ // Remove page ids from cache.
+ txp := f.pending[txid]
+ if txp == nil {
+ return
+ }
+ var m pgids
+ for i, pgid := range txp.ids {
+ delete(f.cache, pgid)
+ tx := txp.alloctx[i]
+ if tx == 0 {
+ continue
+ }
+ if tx != txid {
+ // Pending free aborted; restore page back to alloc list.
+ f.allocs[pgid] = tx
+ } else {
+ // Freed page was allocated by this txn; OK to throw away.
+ m = append(m, pgid)
+ }
+ }
+ // Remove pages from pending list and mark as free if allocated by txid.
+ delete(f.pending, txid)
+ f.mergeSpans(m)
+}
+
+// freed returns whether a given page is in the free list.
+func (f *freelist) freed(pgid pgid) bool {
+ return f.cache[pgid]
+}
+
+// read initializes the freelist from a freelist page.
+func (f *freelist) read(p *page) {
+ if (p.flags & freelistPageFlag) == 0 {
+ panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
+ }
+ // If the page.count is at the max uint16 value (64k) then it's considered
+ // an overflow and the size of the freelist is stored as the first element.
+ var idx, count uintptr = 0, uintptr(p.count)
+ if count == 0xFFFF {
+ idx = 1
+ count = uintptr(*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))))
+ }
+
+ // Copy the list of page ids from the freelist.
+ if count == 0 {
+ f.ids = nil
+ } else {
+ ids := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + idx*unsafe.Sizeof(pgid(0)),
+ Len: int(count),
+ Cap: int(count),
+ }))
+
+ // copy the ids, so we don't modify on the freelist page directly
+ idsCopy := make([]pgid, count)
+ copy(idsCopy, ids)
+ // Make sure they're sorted.
+ sort.Sort(pgids(idsCopy))
+
+ f.readIDs(idsCopy)
+ }
+}
+
+// arrayReadIDs initializes the freelist from a given list of ids.
+func (f *freelist) arrayReadIDs(ids []pgid) {
+ f.ids = ids
+ f.reindex()
+}
+
+func (f *freelist) arrayGetFreePageIDs() []pgid {
+ return f.ids
+}
+
+// write writes the page ids onto a freelist page. All free and pending ids are
+// saved to disk since in the event of a program crash, all pending ids will
+// become free.
+func (f *freelist) write(p *page) error {
+ // Combine the old free pgids and pgids waiting on an open transaction.
+
+ // Update the header flag.
+ p.flags |= freelistPageFlag
+
+ // The page.count can only hold up to 64k elements so if we overflow that
+ // number then we handle it by putting the size in the first element.
+ lenids := f.count()
+ if lenids == 0 {
+ p.count = uint16(lenids)
+ } else if lenids < 0xFFFF {
+ p.count = uint16(lenids)
+ f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))
+ } else {
+ p.count = 0xFFFF
+ *(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) = pgid(lenids)
+ f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + unsafe.Sizeof(pgid(0))))
+ }
+
+ return nil
+}
+
+// reload reads the freelist from a page and filters out pending items.
+func (f *freelist) reload(p *page) {
+ f.read(p)
+
+ // Build a cache of only pending pages.
+ pcache := make(map[pgid]bool)
+ for _, txp := range f.pending {
+ for _, pendingID := range txp.ids {
+ pcache[pendingID] = true
+ }
+ }
+
+ // Check each page in the freelist and build a new available freelist
+ // with any pages not in the pending lists.
+ var a []pgid
+ for _, id := range f.getFreePageIDs() {
+ if !pcache[id] {
+ a = append(a, id)
+ }
+ }
+
+ f.readIDs(a)
+}
+
+// noSyncReload reads the freelist from pgids and filters out pending items.
+func (f *freelist) noSyncReload(pgids []pgid) {
+ // Build a cache of only pending pages.
+ pcache := make(map[pgid]bool)
+ for _, txp := range f.pending {
+ for _, pendingID := range txp.ids {
+ pcache[pendingID] = true
+ }
+ }
+
+ // Check each page in the freelist and build a new available freelist
+ // with any pages not in the pending lists.
+ var a []pgid
+ for _, id := range pgids {
+ if !pcache[id] {
+ a = append(a, id)
+ }
+ }
+
+ f.readIDs(a)
+}
+
+// reindex rebuilds the free cache based on available and pending free lists.
+func (f *freelist) reindex() {
+ ids := f.getFreePageIDs()
+ f.cache = make(map[pgid]bool, len(ids))
+ for _, id := range ids {
+ f.cache[id] = true
+ }
+ for _, txp := range f.pending {
+ for _, pendingID := range txp.ids {
+ f.cache[pendingID] = true
+ }
+ }
+}
+
+// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array
+func (f *freelist) arrayMergeSpans(ids pgids) {
+ sort.Sort(ids)
+ f.ids = pgids(f.ids).merge(ids)
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/freelist_hmap.go b/src/margo.sh/vendor/github.com/coreos/bbolt/freelist_hmap.go
new file mode 100644
index 00000000..02ef2be0
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/freelist_hmap.go
@@ -0,0 +1,178 @@
+package bbolt
+
+import "sort"
+
+// hashmapFreeCount returns count of free pages(hashmap version)
+func (f *freelist) hashmapFreeCount() int {
+ // use the forwardmap to get the total count
+ count := 0
+ for _, size := range f.forwardMap {
+ count += int(size)
+ }
+ return count
+}
+
+// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend
+func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
+ if n == 0 {
+ return 0
+ }
+
+ // if we have a exact size match just return short path
+ if bm, ok := f.freemaps[uint64(n)]; ok {
+ for pid := range bm {
+ // remove the span
+ f.delSpan(pid, uint64(n))
+
+ f.allocs[pid] = txid
+
+ for i := pgid(0); i < pgid(n); i++ {
+ delete(f.cache, pid+i)
+ }
+ return pid
+ }
+ }
+
+ // lookup the map to find larger span
+ for size, bm := range f.freemaps {
+ if size < uint64(n) {
+ continue
+ }
+
+ for pid := range bm {
+ // remove the initial
+ f.delSpan(pid, uint64(size))
+
+ f.allocs[pid] = txid
+
+ remain := size - uint64(n)
+
+ // add remain span
+ f.addSpan(pid+pgid(n), remain)
+
+ for i := pgid(0); i < pgid(n); i++ {
+ delete(f.cache, pid+pgid(i))
+ }
+ return pid
+ }
+ }
+
+ return 0
+}
+
+// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version)
+func (f *freelist) hashmapReadIDs(pgids []pgid) {
+ f.init(pgids)
+
+ // Rebuild the page cache.
+ f.reindex()
+}
+
+// hashmapGetFreePageIDs returns the sorted free page ids
+func (f *freelist) hashmapGetFreePageIDs() []pgid {
+ count := f.free_count()
+ if count == 0 {
+ return nil
+ }
+
+ m := make([]pgid, 0, count)
+ for start, size := range f.forwardMap {
+ for i := 0; i < int(size); i++ {
+ m = append(m, start+pgid(i))
+ }
+ }
+ sort.Sort(pgids(m))
+
+ return m
+}
+
+// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans
+func (f *freelist) hashmapMergeSpans(ids pgids) {
+ for _, id := range ids {
+ // try to see if we can merge and update
+ f.mergeWithExistingSpan(id)
+ }
+}
+
+// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
+func (f *freelist) mergeWithExistingSpan(pid pgid) {
+ prev := pid - 1
+ next := pid + 1
+
+ preSize, mergeWithPrev := f.backwardMap[prev]
+ nextSize, mergeWithNext := f.forwardMap[next]
+ newStart := pid
+ newSize := uint64(1)
+
+ if mergeWithPrev {
+ //merge with previous span
+ start := prev + 1 - pgid(preSize)
+ f.delSpan(start, preSize)
+
+ newStart -= pgid(preSize)
+ newSize += preSize
+ }
+
+ if mergeWithNext {
+ // merge with next span
+ f.delSpan(next, nextSize)
+ newSize += nextSize
+ }
+
+ f.addSpan(newStart, newSize)
+}
+
+func (f *freelist) addSpan(start pgid, size uint64) {
+ f.backwardMap[start-1+pgid(size)] = size
+ f.forwardMap[start] = size
+ if _, ok := f.freemaps[size]; !ok {
+ f.freemaps[size] = make(map[pgid]struct{})
+ }
+
+ f.freemaps[size][start] = struct{}{}
+}
+
+func (f *freelist) delSpan(start pgid, size uint64) {
+ delete(f.forwardMap, start)
+ delete(f.backwardMap, start+pgid(size-1))
+ delete(f.freemaps[size], start)
+ if len(f.freemaps[size]) == 0 {
+ delete(f.freemaps, size)
+ }
+}
+
+// initial from pgids using when use hashmap version
+// pgids must be sorted
+func (f *freelist) init(pgids []pgid) {
+ if len(pgids) == 0 {
+ return
+ }
+
+ size := uint64(1)
+ start := pgids[0]
+
+ if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
+ panic("pgids not sorted")
+ }
+
+ f.freemaps = make(map[uint64]pidSet)
+ f.forwardMap = make(map[pgid]uint64)
+ f.backwardMap = make(map[pgid]uint64)
+
+ for i := 1; i < len(pgids); i++ {
+ // continuous page
+ if pgids[i] == pgids[i-1]+1 {
+ size++
+ } else {
+ f.addSpan(start, size)
+
+ size = 1
+ start = pgids[i]
+ }
+ }
+
+ // init the tail
+ if size != 0 && start != 0 {
+ f.addSpan(start, size)
+ }
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/go.mod b/src/margo.sh/vendor/github.com/coreos/bbolt/go.mod
new file mode 100644
index 00000000..c2366dae
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/go.mod
@@ -0,0 +1,5 @@
+module go.etcd.io/bbolt
+
+go 1.12
+
+require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/go.sum b/src/margo.sh/vendor/github.com/coreos/bbolt/go.sum
new file mode 100644
index 00000000..4ad15a48
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/node.go b/src/margo.sh/vendor/github.com/coreos/bbolt/node.go
new file mode 100644
index 00000000..1690eef3
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/node.go
@@ -0,0 +1,607 @@
+package bbolt
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "unsafe"
+)
+
+// node represents an in-memory, deserialized page.
+type node struct {
+ bucket *Bucket
+ isLeaf bool
+ unbalanced bool
+ spilled bool
+ key []byte
+ pgid pgid
+ parent *node
+ children nodes
+ inodes inodes
+}
+
+// root returns the top-level node this node is attached to.
+func (n *node) root() *node {
+ if n.parent == nil {
+ return n
+ }
+ return n.parent.root()
+}
+
+// minKeys returns the minimum number of inodes this node should have.
+func (n *node) minKeys() int {
+ if n.isLeaf {
+ return 1
+ }
+ return 2
+}
+
+// size returns the size of the node after serialization.
+func (n *node) size() int {
+ sz, elsz := pageHeaderSize, n.pageElementSize()
+ for i := 0; i < len(n.inodes); i++ {
+ item := &n.inodes[i]
+ sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
+ }
+ return int(sz)
+}
+
+// sizeLessThan returns true if the node is less than a given size.
+// This is an optimization to avoid calculating a large node when we only need
+// to know if it fits inside a certain page size.
+func (n *node) sizeLessThan(v uintptr) bool {
+ sz, elsz := pageHeaderSize, n.pageElementSize()
+ for i := 0; i < len(n.inodes); i++ {
+ item := &n.inodes[i]
+ sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
+ if sz >= v {
+ return false
+ }
+ }
+ return true
+}
+
+// pageElementSize returns the size of each page element based on the type of node.
+func (n *node) pageElementSize() uintptr {
+ if n.isLeaf {
+ return leafPageElementSize
+ }
+ return branchPageElementSize
+}
+
+// childAt returns the child node at a given index.
+func (n *node) childAt(index int) *node {
+ if n.isLeaf {
+ panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
+ }
+ return n.bucket.node(n.inodes[index].pgid, n)
+}
+
+// childIndex returns the index of a given child node.
+func (n *node) childIndex(child *node) int {
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
+ return index
+}
+
+// numChildren returns the number of children.
+func (n *node) numChildren() int {
+ return len(n.inodes)
+}
+
+// nextSibling returns the next node with the same parent.
+func (n *node) nextSibling() *node {
+ if n.parent == nil {
+ return nil
+ }
+ index := n.parent.childIndex(n)
+ if index >= n.parent.numChildren()-1 {
+ return nil
+ }
+ return n.parent.childAt(index + 1)
+}
+
+// prevSibling returns the previous node with the same parent.
+func (n *node) prevSibling() *node {
+ if n.parent == nil {
+ return nil
+ }
+ index := n.parent.childIndex(n)
+ if index == 0 {
+ return nil
+ }
+ return n.parent.childAt(index - 1)
+}
+
+// put inserts a key/value.
+func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
+ if pgid >= n.bucket.tx.meta.pgid {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
+ } else if len(oldKey) <= 0 {
+ panic("put: zero-length old key")
+ } else if len(newKey) <= 0 {
+ panic("put: zero-length new key")
+ }
+
+ // Find insertion index.
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
+
+ // Add capacity and shift nodes if we don't have an exact match and need to insert.
+ exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
+ if !exact {
+ n.inodes = append(n.inodes, inode{})
+ copy(n.inodes[index+1:], n.inodes[index:])
+ }
+
+ inode := &n.inodes[index]
+ inode.flags = flags
+ inode.key = newKey
+ inode.value = value
+ inode.pgid = pgid
+ _assert(len(inode.key) > 0, "put: zero-length inode key")
+}
+
+// del removes a key from the node.
+func (n *node) del(key []byte) {
+ // Find index of key.
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
+
+ // Exit if the key isn't found.
+ if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
+ return
+ }
+
+ // Delete inode from the node.
+ n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
+
+ // Mark the node as needing rebalancing.
+ n.unbalanced = true
+}
+
+// read initializes the node from a page.
+func (n *node) read(p *page) {
+ n.pgid = p.id
+ n.isLeaf = ((p.flags & leafPageFlag) != 0)
+ n.inodes = make(inodes, int(p.count))
+
+ for i := 0; i < int(p.count); i++ {
+ inode := &n.inodes[i]
+ if n.isLeaf {
+ elem := p.leafPageElement(uint16(i))
+ inode.flags = elem.flags
+ inode.key = elem.key()
+ inode.value = elem.value()
+ } else {
+ elem := p.branchPageElement(uint16(i))
+ inode.pgid = elem.pgid
+ inode.key = elem.key()
+ }
+ _assert(len(inode.key) > 0, "read: zero-length inode key")
+ }
+
+ // Save first key so we can find the node in the parent when we spill.
+ if len(n.inodes) > 0 {
+ n.key = n.inodes[0].key
+ _assert(len(n.key) > 0, "read: zero-length node key")
+ } else {
+ n.key = nil
+ }
+}
+
+// write writes the items onto one or more pages.
+func (n *node) write(p *page) {
+ // Initialize page.
+ if n.isLeaf {
+ p.flags |= leafPageFlag
+ } else {
+ p.flags |= branchPageFlag
+ }
+
+ if len(n.inodes) >= 0xFFFF {
+ panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
+ }
+ p.count = uint16(len(n.inodes))
+
+ // Stop here if there are no items to write.
+ if p.count == 0 {
+ return
+ }
+
+ // Loop over each item and write it to the page.
+ bp := uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
+ for i, item := range n.inodes {
+ _assert(len(item.key) > 0, "write: zero-length inode key")
+
+ // Write the page element.
+ if n.isLeaf {
+ elem := p.leafPageElement(uint16(i))
+ elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem)))
+ elem.flags = item.flags
+ elem.ksize = uint32(len(item.key))
+ elem.vsize = uint32(len(item.value))
+ } else {
+ elem := p.branchPageElement(uint16(i))
+ elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem)))
+ elem.ksize = uint32(len(item.key))
+ elem.pgid = item.pgid
+ _assert(elem.pgid != p.id, "write: circular dependency occurred")
+ }
+
+ // Create a slice to write into of needed size and advance
+ // byte pointer for next iteration.
+ klen, vlen := len(item.key), len(item.value)
+ sz := klen + vlen
+ b := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: bp,
+ Len: sz,
+ Cap: sz,
+ }))
+ bp += uintptr(sz)
+
+ // Write data for the element to the end of the page.
+ l := copy(b, item.key)
+ copy(b[l:], item.value)
+ }
+
+ // DEBUG ONLY: n.dump()
+}
+
+// split breaks up a node into multiple smaller nodes, if appropriate.
+// This should only be called from the spill() function.
+func (n *node) split(pageSize uintptr) []*node {
+ var nodes []*node
+
+ node := n
+ for {
+ // Split node into two.
+ a, b := node.splitTwo(pageSize)
+ nodes = append(nodes, a)
+
+ // If we can't split then exit the loop.
+ if b == nil {
+ break
+ }
+
+ // Set node to b so it gets split on the next iteration.
+ node = b
+ }
+
+ return nodes
+}
+
+// splitTwo breaks up a node into two smaller nodes, if appropriate.
+// This should only be called from the split() function.
+func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
+ // Ignore the split if the page doesn't have at least enough nodes for
+ // two pages or if the nodes can fit in a single page.
+ if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
+ return n, nil
+ }
+
+ // Determine the threshold before starting a new node.
+ var fillPercent = n.bucket.FillPercent
+ if fillPercent < minFillPercent {
+ fillPercent = minFillPercent
+ } else if fillPercent > maxFillPercent {
+ fillPercent = maxFillPercent
+ }
+ threshold := int(float64(pageSize) * fillPercent)
+
+ // Determine split position and sizes of the two pages.
+ splitIndex, _ := n.splitIndex(threshold)
+
+ // Split node into two separate nodes.
+ // If there's no parent then we'll need to create one.
+ if n.parent == nil {
+ n.parent = &node{bucket: n.bucket, children: []*node{n}}
+ }
+
+ // Create a new node and add it to the parent.
+ next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
+ n.parent.children = append(n.parent.children, next)
+
+ // Split inodes across two nodes.
+ next.inodes = n.inodes[splitIndex:]
+ n.inodes = n.inodes[:splitIndex]
+
+ // Update the statistics.
+ n.bucket.tx.stats.Split++
+
+ return n, next
+}
+
+// splitIndex finds the position where a page will fill a given threshold.
+// It returns the index as well as the size of the first page.
+// This is only be called from split().
+func (n *node) splitIndex(threshold int) (index, sz uintptr) {
+ sz = pageHeaderSize
+
+ // Loop until we only have the minimum number of keys required for the second page.
+ for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
+ index = uintptr(i)
+ inode := n.inodes[i]
+ elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value))
+
+ // If we have at least the minimum number of keys and adding another
+ // node would put us over the threshold then exit and return.
+ if index >= minKeysPerPage && sz+elsize > uintptr(threshold) {
+ break
+ }
+
+ // Add the element size to the total size.
+ sz += elsize
+ }
+
+ return
+}
+
+// spill writes the nodes to dirty pages and splits nodes as it goes.
+// Returns an error if dirty pages cannot be allocated.
+func (n *node) spill() error {
+ var tx = n.bucket.tx
+ if n.spilled {
+ return nil
+ }
+
+ // Spill child nodes first. Child nodes can materialize sibling nodes in
+ // the case of split-merge so we cannot use a range loop. We have to check
+ // the children size on every loop iteration.
+ sort.Sort(n.children)
+ for i := 0; i < len(n.children); i++ {
+ if err := n.children[i].spill(); err != nil {
+ return err
+ }
+ }
+
+ // We no longer need the child list because it's only used for spill tracking.
+ n.children = nil
+
+ // Split nodes into appropriate sizes. The first node will always be n.
+ var nodes = n.split(uintptr(tx.db.pageSize))
+ for _, node := range nodes {
+ // Add node's page to the freelist if it's not new.
+ if node.pgid > 0 {
+ tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
+ node.pgid = 0
+ }
+
+ // Allocate contiguous space for the node.
+ p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
+ if err != nil {
+ return err
+ }
+
+ // Write the node.
+ if p.id >= tx.meta.pgid {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
+ }
+ node.pgid = p.id
+ node.write(p)
+ node.spilled = true
+
+ // Insert into parent inodes.
+ if node.parent != nil {
+ var key = node.key
+ if key == nil {
+ key = node.inodes[0].key
+ }
+
+ node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
+ node.key = node.inodes[0].key
+ _assert(len(node.key) > 0, "spill: zero-length node key")
+ }
+
+ // Update the statistics.
+ tx.stats.Spill++
+ }
+
+ // If the root node split and created a new root then we need to spill that
+ // as well. We'll clear out the children to make sure it doesn't try to respill.
+ if n.parent != nil && n.parent.pgid == 0 {
+ n.children = nil
+ return n.parent.spill()
+ }
+
+ return nil
+}
+
+// rebalance attempts to combine the node with sibling nodes if the node fill
+// size is below a threshold or if there are not enough keys.
+func (n *node) rebalance() {
+ if !n.unbalanced {
+ return
+ }
+ n.unbalanced = false
+
+ // Update statistics.
+ n.bucket.tx.stats.Rebalance++
+
+ // Ignore if node is above threshold (25%) and has enough keys.
+ var threshold = n.bucket.tx.db.pageSize / 4
+ if n.size() > threshold && len(n.inodes) > n.minKeys() {
+ return
+ }
+
+ // Root node has special handling.
+ if n.parent == nil {
+ // If root node is a branch and only has one node then collapse it.
+ if !n.isLeaf && len(n.inodes) == 1 {
+ // Move root's child up.
+ child := n.bucket.node(n.inodes[0].pgid, n)
+ n.isLeaf = child.isLeaf
+ n.inodes = child.inodes[:]
+ n.children = child.children
+
+ // Reparent all child nodes being moved.
+ for _, inode := range n.inodes {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child.parent = n
+ }
+ }
+
+ // Remove old child.
+ child.parent = nil
+ delete(n.bucket.nodes, child.pgid)
+ child.free()
+ }
+
+ return
+ }
+
+ // If node has no keys then just remove it.
+ if n.numChildren() == 0 {
+ n.parent.del(n.key)
+ n.parent.removeChild(n)
+ delete(n.bucket.nodes, n.pgid)
+ n.free()
+ n.parent.rebalance()
+ return
+ }
+
+ _assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
+
+ // Destination node is right sibling if idx == 0, otherwise left sibling.
+ var target *node
+ var useNextSibling = (n.parent.childIndex(n) == 0)
+ if useNextSibling {
+ target = n.nextSibling()
+ } else {
+ target = n.prevSibling()
+ }
+
+ // If both this node and the target node are too small then merge them.
+ if useNextSibling {
+ // Reparent all child nodes being moved.
+ for _, inode := range target.inodes {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child.parent.removeChild(child)
+ child.parent = n
+ child.parent.children = append(child.parent.children, child)
+ }
+ }
+
+ // Copy over inodes from target and remove target.
+ n.inodes = append(n.inodes, target.inodes...)
+ n.parent.del(target.key)
+ n.parent.removeChild(target)
+ delete(n.bucket.nodes, target.pgid)
+ target.free()
+ } else {
+ // Reparent all child nodes being moved.
+ for _, inode := range n.inodes {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child.parent.removeChild(child)
+ child.parent = target
+ child.parent.children = append(child.parent.children, child)
+ }
+ }
+
+ // Copy over inodes to target and remove node.
+ target.inodes = append(target.inodes, n.inodes...)
+ n.parent.del(n.key)
+ n.parent.removeChild(n)
+ delete(n.bucket.nodes, n.pgid)
+ n.free()
+ }
+
+ // Either this node or the target node was deleted from the parent so rebalance it.
+ n.parent.rebalance()
+}
+
+// removes a node from the list of in-memory children.
+// This does not affect the inodes.
+func (n *node) removeChild(target *node) {
+ for i, child := range n.children {
+ if child == target {
+ n.children = append(n.children[:i], n.children[i+1:]...)
+ return
+ }
+ }
+}
+
+// dereference causes the node to copy all its inode key/value references to heap memory.
+// This is required when the mmap is reallocated so inodes are not pointing to stale data.
+func (n *node) dereference() {
+ if n.key != nil {
+ key := make([]byte, len(n.key))
+ copy(key, n.key)
+ n.key = key
+ _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
+ }
+
+ for i := range n.inodes {
+ inode := &n.inodes[i]
+
+ key := make([]byte, len(inode.key))
+ copy(key, inode.key)
+ inode.key = key
+ _assert(len(inode.key) > 0, "dereference: zero-length inode key")
+
+ value := make([]byte, len(inode.value))
+ copy(value, inode.value)
+ inode.value = value
+ }
+
+ // Recursively dereference children.
+ for _, child := range n.children {
+ child.dereference()
+ }
+
+ // Update statistics.
+ n.bucket.tx.stats.NodeDeref++
+}
+
+// free adds the node's underlying page to the freelist.
+func (n *node) free() {
+ if n.pgid != 0 {
+ n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
+ n.pgid = 0
+ }
+}
+
+// dump writes the contents of the node to STDERR for debugging purposes.
+/*
+func (n *node) dump() {
+ // Write node header.
+ var typ = "branch"
+ if n.isLeaf {
+ typ = "leaf"
+ }
+ warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
+
+ // Write out abbreviated version of each item.
+ for _, item := range n.inodes {
+ if n.isLeaf {
+ if item.flags&bucketLeafFlag != 0 {
+ bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
+ warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
+ } else {
+ warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
+ }
+ } else {
+ warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
+ }
+ }
+ warn("")
+}
+*/
+
+type nodes []*node
+
+func (s nodes) Len() int { return len(s) }
+func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nodes) Less(i, j int) bool {
+ return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1
+}
+
+// inode represents an internal node inside of a node.
+// It can be used to point to elements in a page or point
+// to an element which hasn't been added to a page yet.
+type inode struct {
+ flags uint32
+ pgid pgid
+ key []byte
+ value []byte
+}
+
+type inodes []inode
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/page.go b/src/margo.sh/vendor/github.com/coreos/bbolt/page.go
new file mode 100644
index 00000000..b5c16997
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/page.go
@@ -0,0 +1,219 @@
+package bbolt
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "unsafe"
+)
+
+const pageHeaderSize = unsafe.Sizeof(page{})
+
+const minKeysPerPage = 2
+
+const branchPageElementSize = unsafe.Sizeof(branchPageElement{})
+const leafPageElementSize = unsafe.Sizeof(leafPageElement{})
+
+const (
+ branchPageFlag = 0x01
+ leafPageFlag = 0x02
+ metaPageFlag = 0x04
+ freelistPageFlag = 0x10
+)
+
+const (
+ bucketLeafFlag = 0x01
+)
+
+type pgid uint64
+
+type page struct {
+ id pgid
+ flags uint16
+ count uint16
+ overflow uint32
+}
+
+// typ returns a human readable page type string used for debugging.
+func (p *page) typ() string {
+ if (p.flags & branchPageFlag) != 0 {
+ return "branch"
+ } else if (p.flags & leafPageFlag) != 0 {
+ return "leaf"
+ } else if (p.flags & metaPageFlag) != 0 {
+ return "meta"
+ } else if (p.flags & freelistPageFlag) != 0 {
+ return "freelist"
+ }
+ return fmt.Sprintf("unknown<%02x>", p.flags)
+}
+
+// meta returns a pointer to the metadata section of the page.
+func (p *page) meta() *meta {
+ return (*meta)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))
+}
+
+// leafPageElement retrieves the leaf node by index
+func (p *page) leafPageElement(index uint16) *leafPageElement {
+ off := uintptr(index) * unsafe.Sizeof(leafPageElement{})
+ return (*leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off))
+}
+
+// leafPageElements retrieves a list of leaf nodes.
+func (p *page) leafPageElements() []leafPageElement {
+ if p.count == 0 {
+ return nil
+ }
+ return *(*[]leafPageElement)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p),
+ Len: int(p.count),
+ Cap: int(p.count),
+ }))
+}
+
+// branchPageElement retrieves the branch node by index
+func (p *page) branchPageElement(index uint16) *branchPageElement {
+ off := uintptr(index) * unsafe.Sizeof(branchPageElement{})
+ return (*branchPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off))
+}
+
+// branchPageElements retrieves a list of branch nodes.
+func (p *page) branchPageElements() []branchPageElement {
+ if p.count == 0 {
+ return nil
+ }
+ return *(*[]branchPageElement)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p),
+ Len: int(p.count),
+ Cap: int(p.count),
+ }))
+}
+
+// dump writes n bytes of the page to STDERR as hex output.
+func (p *page) hexdump(n int) {
+ buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p)),
+ Len: n,
+ Cap: n,
+ }))
+ fmt.Fprintf(os.Stderr, "%x\n", buf)
+}
+
+type pages []*page
+
+func (s pages) Len() int { return len(s) }
+func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
+
+// branchPageElement represents a node on a branch page.
+type branchPageElement struct {
+ pos uint32
+ ksize uint32
+ pgid pgid
+}
+
+// key returns a byte slice of the node key.
+func (n *branchPageElement) key() []byte {
+ return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
+ Len: int(n.ksize),
+ Cap: int(n.ksize),
+ }))
+}
+
+// leafPageElement represents a node on a leaf page.
+type leafPageElement struct {
+ flags uint32
+ pos uint32
+ ksize uint32
+ vsize uint32
+}
+
+// key returns a byte slice of the node key.
+func (n *leafPageElement) key() []byte {
+ return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
+ Len: int(n.ksize),
+ Cap: int(n.ksize),
+ }))
+}
+
+// value returns a byte slice of the node value.
+func (n *leafPageElement) value() []byte {
+ return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos) + uintptr(n.ksize),
+ Len: int(n.vsize),
+ Cap: int(n.vsize),
+ }))
+}
+
+// PageInfo represents human readable information about a page.
+type PageInfo struct {
+ ID int
+ Type string
+ Count int
+ OverflowCount int
+}
+
+type pgids []pgid
+
+func (s pgids) Len() int { return len(s) }
+func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
+
+// merge returns the sorted union of a and b.
+func (a pgids) merge(b pgids) pgids {
+ // Return the opposite slice if one is nil.
+ if len(a) == 0 {
+ return b
+ }
+ if len(b) == 0 {
+ return a
+ }
+ merged := make(pgids, len(a)+len(b))
+ mergepgids(merged, a, b)
+ return merged
+}
+
+// mergepgids copies the sorted union of a and b into dst.
+// If dst is too small, it panics.
+func mergepgids(dst, a, b pgids) {
+ if len(dst) < len(a)+len(b) {
+ panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
+ }
+ // Copy in the opposite slice if one is nil.
+ if len(a) == 0 {
+ copy(dst, b)
+ return
+ }
+ if len(b) == 0 {
+ copy(dst, a)
+ return
+ }
+
+ // Merged will hold all elements from both lists.
+ merged := dst[:0]
+
+ // Assign lead to the slice with a lower starting value, follow to the higher value.
+ lead, follow := a, b
+ if b[0] < a[0] {
+ lead, follow = b, a
+ }
+
+ // Continue while there are elements in the lead.
+ for len(lead) > 0 {
+ // Merge largest prefix of lead that is ahead of follow[0].
+ n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
+ merged = append(merged, lead[:n]...)
+ if n >= len(lead) {
+ break
+ }
+
+ // Swap lead and follow.
+ lead, follow = follow, lead[n:]
+ }
+
+ // Append what's left in follow.
+ _ = append(merged, follow...)
+}
diff --git a/src/margo.sh/vendor/github.com/coreos/bbolt/tx.go b/src/margo.sh/vendor/github.com/coreos/bbolt/tx.go
new file mode 100644
index 00000000..13937cdb
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/coreos/bbolt/tx.go
@@ -0,0 +1,735 @@
+package bbolt
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "time"
+ "unsafe"
+)
+
+// txid represents the internal transaction identifier.
+type txid uint64
+
+// Tx represents a read-only or read/write transaction on the database.
+// Read-only transactions can be used for retrieving values for keys and creating cursors.
+// Read/write transactions can create and remove buckets and create and remove keys.
+//
+// IMPORTANT: You must commit or rollback transactions when you are done with
+// them. Pages can not be reclaimed by the writer until no more transactions
+// are using them. A long running read transaction can cause the database to
+// quickly grow.
+type Tx struct {
+ writable bool
+ managed bool
+ db *DB
+ meta *meta
+ root Bucket
+ pages map[pgid]*page
+ stats TxStats
+ commitHandlers []func()
+
+ // WriteFlag specifies the flag for write-related methods like WriteTo().
+ // Tx opens the database file with the specified flag to copy the data.
+ //
+ // By default, the flag is unset, which works well for mostly in-memory
+ // workloads. For databases that are much larger than available RAM,
+ // set the flag to syscall.O_DIRECT to avoid trashing the page cache.
+ WriteFlag int
+}
+
+// init initializes the transaction.
+func (tx *Tx) init(db *DB) {
+ tx.db = db
+ tx.pages = nil
+
+ // Copy the meta page since it can be changed by the writer.
+ tx.meta = &meta{}
+ db.meta().copy(tx.meta)
+
+ // Copy over the root bucket.
+ tx.root = newBucket(tx)
+ tx.root.bucket = &bucket{}
+ *tx.root.bucket = tx.meta.root
+
+ // Increment the transaction id and add a page cache for writable transactions.
+ if tx.writable {
+ tx.pages = make(map[pgid]*page)
+ tx.meta.txid += txid(1)
+ }
+}
+
+// ID returns the transaction id.
+func (tx *Tx) ID() int {
+ return int(tx.meta.txid)
+}
+
+// DB returns a reference to the database that created the transaction.
+func (tx *Tx) DB() *DB {
+ return tx.db
+}
+
+// Size returns current database size in bytes as seen by this transaction.
+func (tx *Tx) Size() int64 {
+ return int64(tx.meta.pgid) * int64(tx.db.pageSize)
+}
+
+// Writable returns whether the transaction can perform write operations.
+func (tx *Tx) Writable() bool {
+ return tx.writable
+}
+
+// Cursor creates a cursor associated with the root bucket.
+// All items in the cursor will return a nil value because all root bucket keys point to buckets.
+// The cursor is only valid as long as the transaction is open.
+// Do not use a cursor after the transaction is closed.
+func (tx *Tx) Cursor() *Cursor {
+ return tx.root.Cursor()
+}
+
+// Stats retrieves a copy of the current transaction statistics.
+func (tx *Tx) Stats() TxStats {
+ return tx.stats
+}
+
+// Bucket retrieves a bucket by name.
+// Returns nil if the bucket does not exist.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) Bucket(name []byte) *Bucket {
+ return tx.root.Bucket(name)
+}
+
+// CreateBucket creates a new bucket.
+// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
+ return tx.root.CreateBucket(name)
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
+ return tx.root.CreateBucketIfNotExists(name)
+}
+
+// DeleteBucket deletes a bucket.
+// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
+func (tx *Tx) DeleteBucket(name []byte) error {
+ return tx.root.DeleteBucket(name)
+}
+
+// ForEach executes a function for each bucket in the root.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller.
+func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
+ return tx.root.ForEach(func(k, v []byte) error {
+ return fn(k, tx.root.Bucket(k))
+ })
+}
+
+// OnCommit adds a handler function to be executed after the transaction successfully commits.
+func (tx *Tx) OnCommit(fn func()) {
+ tx.commitHandlers = append(tx.commitHandlers, fn)
+}
+
+// Commit writes all changes to disk and updates the meta page.
+// Returns an error if a disk write error occurs, or if Commit is
+// called on a read-only transaction.
+func (tx *Tx) Commit() error {
+ _assert(!tx.managed, "managed tx commit not allowed")
+ if tx.db == nil {
+ return ErrTxClosed
+ } else if !tx.writable {
+ return ErrTxNotWritable
+ }
+
+ // TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
+
+ // Rebalance nodes which have had deletions.
+ var startTime = time.Now()
+ tx.root.rebalance()
+ if tx.stats.Rebalance > 0 {
+ tx.stats.RebalanceTime += time.Since(startTime)
+ }
+
+ // spill data onto dirty pages.
+ startTime = time.Now()
+ if err := tx.root.spill(); err != nil {
+ tx.rollback()
+ return err
+ }
+ tx.stats.SpillTime += time.Since(startTime)
+
+ // Free the old root bucket.
+ tx.meta.root.root = tx.root.root
+
+ // Free the old freelist because commit writes out a fresh freelist.
+ if tx.meta.freelist != pgidNoFreelist {
+ tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
+ }
+
+ if !tx.db.NoFreelistSync {
+ err := tx.commitFreelist()
+ if err != nil {
+ return err
+ }
+ } else {
+ tx.meta.freelist = pgidNoFreelist
+ }
+
+ // Write dirty pages to disk.
+ startTime = time.Now()
+ if err := tx.write(); err != nil {
+ tx.rollback()
+ return err
+ }
+
+ // If strict mode is enabled then perform a consistency check.
+ // Only the first consistency error is reported in the panic.
+ if tx.db.StrictMode {
+ ch := tx.Check()
+ var errs []string
+ for {
+ err, ok := <-ch
+ if !ok {
+ break
+ }
+ errs = append(errs, err.Error())
+ }
+ if len(errs) > 0 {
+ panic("check fail: " + strings.Join(errs, "\n"))
+ }
+ }
+
+ // Write meta to disk.
+ if err := tx.writeMeta(); err != nil {
+ tx.rollback()
+ return err
+ }
+ tx.stats.WriteTime += time.Since(startTime)
+
+ // Finalize the transaction.
+ tx.close()
+
+ // Execute commit handlers now that the locks have been removed.
+ for _, fn := range tx.commitHandlers {
+ fn()
+ }
+
+ return nil
+}
+
+func (tx *Tx) commitFreelist() error {
+ // Allocate new pages for the new free list. This will overestimate
+ // the size of the freelist but not underestimate the size (which would be bad).
+ opgid := tx.meta.pgid
+ p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
+ if err != nil {
+ tx.rollback()
+ return err
+ }
+ if err := tx.db.freelist.write(p); err != nil {
+ tx.rollback()
+ return err
+ }
+ tx.meta.freelist = p.id
+ // If the high water mark has moved up then attempt to grow the database.
+ if tx.meta.pgid > opgid {
+ if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
+ tx.rollback()
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Rollback closes the transaction and ignores all previous updates. Read-only
+// transactions must be rolled back and not committed.
+func (tx *Tx) Rollback() error {
+ _assert(!tx.managed, "managed tx rollback not allowed")
+ if tx.db == nil {
+ return ErrTxClosed
+ }
+ tx.nonPhysicalRollback()
+ return nil
+}
+
+// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk.
+func (tx *Tx) nonPhysicalRollback() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ tx.db.freelist.rollback(tx.meta.txid)
+ }
+ tx.close()
+}
+
+// rollback needs to reload the free pages from disk in case some system error happens like fsync error.
+func (tx *Tx) rollback() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ tx.db.freelist.rollback(tx.meta.txid)
+ if !tx.db.hasSyncedFreelist() {
+ // Reconstruct free page list by scanning the DB to get the whole free page list.
+ // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
+ tx.db.freelist.noSyncReload(tx.db.freepages())
+ } else {
+ // Read free page list from freelist page.
+ tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
+ }
+ }
+ tx.close()
+}
+
+func (tx *Tx) close() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ // Grab freelist stats.
+ var freelistFreeN = tx.db.freelist.free_count()
+ var freelistPendingN = tx.db.freelist.pending_count()
+ var freelistAlloc = tx.db.freelist.size()
+
+ // Remove transaction ref & writer lock.
+ tx.db.rwtx = nil
+ tx.db.rwlock.Unlock()
+
+ // Merge statistics.
+ tx.db.statlock.Lock()
+ tx.db.stats.FreePageN = freelistFreeN
+ tx.db.stats.PendingPageN = freelistPendingN
+ tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
+ tx.db.stats.FreelistInuse = freelistAlloc
+ tx.db.stats.TxStats.add(&tx.stats)
+ tx.db.statlock.Unlock()
+ } else {
+ tx.db.removeTx(tx)
+ }
+
+ // Clear all references.
+ tx.db = nil
+ tx.meta = nil
+ tx.root = Bucket{tx: tx}
+ tx.pages = nil
+}
+
+// Copy writes the entire database to a writer.
+// This function exists for backwards compatibility.
+//
+// Deprecated; Use WriteTo() instead.
+func (tx *Tx) Copy(w io.Writer) error {
+ _, err := tx.WriteTo(w)
+ return err
+}
+
+// WriteTo writes the entire database to a writer.
+// If err == nil then exactly tx.Size() bytes will be written into the writer.
+func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
+ // Attempt to open reader with WriteFlag
+ f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ if cerr := f.Close(); err == nil {
+ err = cerr
+ }
+ }()
+
+ // Generate a meta page. We use the same page data for both meta pages.
+ buf := make([]byte, tx.db.pageSize)
+ page := (*page)(unsafe.Pointer(&buf[0]))
+ page.flags = metaPageFlag
+ *page.meta() = *tx.meta
+
+ // Write meta 0.
+ page.id = 0
+ page.meta().checksum = page.meta().sum64()
+ nn, err := w.Write(buf)
+ n += int64(nn)
+ if err != nil {
+ return n, fmt.Errorf("meta 0 copy: %s", err)
+ }
+
+ // Write meta 1 with a lower transaction id.
+ page.id = 1
+ page.meta().txid -= 1
+ page.meta().checksum = page.meta().sum64()
+ nn, err = w.Write(buf)
+ n += int64(nn)
+ if err != nil {
+ return n, fmt.Errorf("meta 1 copy: %s", err)
+ }
+
+ // Move past the meta pages in the file.
+ if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
+ return n, fmt.Errorf("seek: %s", err)
+ }
+
+ // Copy data pages.
+ wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
+ n += wn
+ if err != nil {
+ return n, err
+ }
+
+ return n, nil
+}
+
+// CopyFile copies the entire database to file at the given path.
+// A reader transaction is maintained during the copy so it is safe to continue
+// using the database while a copy is in progress.
+func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
+ f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
+ if err != nil {
+ return err
+ }
+
+ err = tx.Copy(f)
+ if err != nil {
+ _ = f.Close()
+ return err
+ }
+ return f.Close()
+}
+
+// Check performs several consistency checks on the database for this transaction.
+// An error is returned if any inconsistency is found.
+//
+// It can be safely run concurrently on a writable transaction. However, this
+// incurs a high cost for large databases and databases with a lot of subbuckets
+// because of caching. This overhead can be removed if running on a read-only
+// transaction, however, it is not safe to execute other writer transactions at
+// the same time.
+func (tx *Tx) Check() <-chan error {
+ ch := make(chan error)
+ go tx.check(ch)
+ return ch
+}
+
+func (tx *Tx) check(ch chan error) {
+ // Force loading free list if opened in ReadOnly mode.
+ tx.db.loadFreelist()
+
+ // Check if any pages are double freed.
+ freed := make(map[pgid]bool)
+ all := make([]pgid, tx.db.freelist.count())
+ tx.db.freelist.copyall(all)
+ for _, id := range all {
+ if freed[id] {
+ ch <- fmt.Errorf("page %d: already freed", id)
+ }
+ freed[id] = true
+ }
+
+ // Track every reachable page.
+ reachable := make(map[pgid]*page)
+ reachable[0] = tx.page(0) // meta0
+ reachable[1] = tx.page(1) // meta1
+ if tx.meta.freelist != pgidNoFreelist {
+ for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
+ reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
+ }
+ }
+
+ // Recursively check buckets.
+ tx.checkBucket(&tx.root, reachable, freed, ch)
+
+ // Ensure all pages below high water mark are either reachable or freed.
+ for i := pgid(0); i < tx.meta.pgid; i++ {
+ _, isReachable := reachable[i]
+ if !isReachable && !freed[i] {
+ ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
+ }
+ }
+
+ // Close the channel to signal completion.
+ close(ch)
+}
+
+func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
+ // Ignore inline buckets.
+ if b.root == 0 {
+ return
+ }
+
+ // Check every page used by this bucket.
+ b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
+ if p.id > tx.meta.pgid {
+ ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
+ }
+
+ // Ensure each page is only referenced once.
+ for i := pgid(0); i <= pgid(p.overflow); i++ {
+ var id = p.id + i
+ if _, ok := reachable[id]; ok {
+ ch <- fmt.Errorf("page %d: multiple references", int(id))
+ }
+ reachable[id] = p
+ }
+
+ // We should only encounter un-freed leaf and branch pages.
+ if freed[p.id] {
+ ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
+ } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
+ ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
+ }
+ })
+
+ // Check each bucket within this bucket.
+ _ = b.ForEach(func(k, v []byte) error {
+ if child := b.Bucket(k); child != nil {
+ tx.checkBucket(child, reachable, freed, ch)
+ }
+ return nil
+ })
+}
+
+// allocate returns a contiguous block of memory starting at a given page.
+func (tx *Tx) allocate(count int) (*page, error) {
+ p, err := tx.db.allocate(tx.meta.txid, count)
+ if err != nil {
+ return nil, err
+ }
+
+ // Save to our page cache.
+ tx.pages[p.id] = p
+
+ // Update statistics.
+ tx.stats.PageCount += count
+ tx.stats.PageAlloc += count * tx.db.pageSize
+
+ return p, nil
+}
+
+// write writes any dirty pages to disk.
+func (tx *Tx) write() error {
+ // Sort pages by id.
+ pages := make(pages, 0, len(tx.pages))
+ for _, p := range tx.pages {
+ pages = append(pages, p)
+ }
+ // Clear out page cache early.
+ tx.pages = make(map[pgid]*page)
+ sort.Sort(pages)
+
+ // Write pages to disk in order.
+ for _, p := range pages {
+ size := (int(p.overflow) + 1) * tx.db.pageSize
+ offset := int64(p.id) * int64(tx.db.pageSize)
+
+ // Write out page in "max allocation" sized chunks.
+ ptr := uintptr(unsafe.Pointer(p))
+ for {
+ // Limit our write to our max allocation size.
+ sz := size
+ if sz > maxAllocSize-1 {
+ sz = maxAllocSize - 1
+ }
+
+ // Write chunk to disk.
+ buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: ptr,
+ Len: sz,
+ Cap: sz,
+ }))
+ if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
+ return err
+ }
+
+ // Update statistics.
+ tx.stats.Write++
+
+ // Exit inner for loop if we've written all the chunks.
+ size -= sz
+ if size == 0 {
+ break
+ }
+
+ // Otherwise move offset forward and move pointer to next chunk.
+ offset += int64(sz)
+ ptr += uintptr(sz)
+ }
+ }
+
+ // Ignore file sync if flag is set on DB.
+ if !tx.db.NoSync || IgnoreNoSync {
+ if err := fdatasync(tx.db); err != nil {
+ return err
+ }
+ }
+
+ // Put small pages back to page pool.
+ for _, p := range pages {
+ // Ignore page sizes over 1 page.
+ // These are allocated using make() instead of the page pool.
+ if int(p.overflow) != 0 {
+ continue
+ }
+
+ buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(p)),
+ Len: tx.db.pageSize,
+ Cap: tx.db.pageSize,
+ }))
+
+ // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
+ for i := range buf {
+ buf[i] = 0
+ }
+ tx.db.pagePool.Put(buf)
+ }
+
+ return nil
+}
+
+// writeMeta writes the meta to the disk.
+func (tx *Tx) writeMeta() error {
+ // Create a temporary buffer for the meta page.
+ buf := make([]byte, tx.db.pageSize)
+ p := tx.db.pageInBuffer(buf, 0)
+ tx.meta.write(p)
+
+ // Write the meta page to file.
+ if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
+ return err
+ }
+ if !tx.db.NoSync || IgnoreNoSync {
+ if err := fdatasync(tx.db); err != nil {
+ return err
+ }
+ }
+
+ // Update statistics.
+ tx.stats.Write++
+
+ return nil
+}
+
+// page returns a reference to the page with a given id.
+// If page has been written to then a temporary buffered page is returned.
+func (tx *Tx) page(id pgid) *page {
+ // Check the dirty pages first.
+ if tx.pages != nil {
+ if p, ok := tx.pages[id]; ok {
+ return p
+ }
+ }
+
+ // Otherwise return directly from the mmap.
+ return tx.db.page(id)
+}
+
+// forEachPage iterates over every page within a given page and executes a function.
+func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
+ p := tx.page(pgid)
+
+ // Execute function.
+ fn(p, depth)
+
+ // Recursively loop over children.
+ if (p.flags & branchPageFlag) != 0 {
+ for i := 0; i < int(p.count); i++ {
+ elem := p.branchPageElement(uint16(i))
+ tx.forEachPage(elem.pgid, depth+1, fn)
+ }
+ }
+}
+
+// Page returns page information for a given page number.
+// This is only safe for concurrent use when used by a writable transaction.
+func (tx *Tx) Page(id int) (*PageInfo, error) {
+ if tx.db == nil {
+ return nil, ErrTxClosed
+ } else if pgid(id) >= tx.meta.pgid {
+ return nil, nil
+ }
+
+ // Build the page info.
+ p := tx.db.page(pgid(id))
+ info := &PageInfo{
+ ID: id,
+ Count: int(p.count),
+ OverflowCount: int(p.overflow),
+ }
+
+ // Determine the type (or if it's free).
+ if tx.db.freelist.freed(pgid(id)) {
+ info.Type = "free"
+ } else {
+ info.Type = p.typ()
+ }
+
+ return info, nil
+}
+
+// TxStats represents statistics about the actions performed by the transaction.
+type TxStats struct {
+ // Page statistics.
+ PageCount int // number of page allocations
+ PageAlloc int // total bytes allocated
+
+ // Cursor statistics.
+ CursorCount int // number of cursors created
+
+ // Node statistics
+ NodeCount int // number of node allocations
+ NodeDeref int // number of node dereferences
+
+ // Rebalance statistics.
+ Rebalance int // number of node rebalances
+ RebalanceTime time.Duration // total time spent rebalancing
+
+ // Split/Spill statistics.
+ Split int // number of nodes split
+ Spill int // number of nodes spilled
+ SpillTime time.Duration // total time spent spilling
+
+ // Write statistics.
+ Write int // number of writes performed
+ WriteTime time.Duration // total time spent writing to disk
+}
+
+func (s *TxStats) add(other *TxStats) {
+ s.PageCount += other.PageCount
+ s.PageAlloc += other.PageAlloc
+ s.CursorCount += other.CursorCount
+ s.NodeCount += other.NodeCount
+ s.NodeDeref += other.NodeDeref
+ s.Rebalance += other.Rebalance
+ s.RebalanceTime += other.RebalanceTime
+ s.Split += other.Split
+ s.Spill += other.Spill
+ s.SpillTime += other.SpillTime
+ s.Write += other.Write
+ s.WriteTime += other.WriteTime
+}
+
+// Sub calculates and returns the difference between two sets of transaction stats.
+// This is useful when obtaining stats at two different points and time and
+// you need the performance counters that occurred within that time span.
+func (s *TxStats) Sub(other *TxStats) TxStats {
+ var diff TxStats
+ diff.PageCount = s.PageCount - other.PageCount
+ diff.PageAlloc = s.PageAlloc - other.PageAlloc
+ diff.CursorCount = s.CursorCount - other.CursorCount
+ diff.NodeCount = s.NodeCount - other.NodeCount
+ diff.NodeDeref = s.NodeDeref - other.NodeDeref
+ diff.Rebalance = s.Rebalance - other.Rebalance
+ diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
+ diff.Split = s.Split - other.Split
+ diff.Spill = s.Spill - other.Spill
+ diff.SpillTime = s.SpillTime - other.SpillTime
+ diff.Write = s.Write - other.Write
+ diff.WriteTime = s.WriteTime - other.WriteTime
+ return diff
+}
diff --git a/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/LICENSE.md b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/LICENSE.md
new file mode 100644
index 00000000..1cade6ce
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Brian Goff
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go
new file mode 100644
index 00000000..af62279a
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go
@@ -0,0 +1,20 @@
+package md2man
+
+import (
+ "github.com/russross/blackfriday"
+)
+
+// Render converts a markdown document into a roff formatted document.
+func Render(doc []byte) []byte {
+ renderer := RoffRenderer(0)
+ extensions := 0
+ extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS
+ extensions |= blackfriday.EXTENSION_TABLES
+ extensions |= blackfriday.EXTENSION_FENCED_CODE
+ extensions |= blackfriday.EXTENSION_AUTOLINK
+ extensions |= blackfriday.EXTENSION_SPACE_HEADERS
+ extensions |= blackfriday.EXTENSION_FOOTNOTES
+ extensions |= blackfriday.EXTENSION_TITLEBLOCK
+
+ return blackfriday.Markdown(doc, renderer, extensions)
+}
diff --git a/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
new file mode 100644
index 00000000..8c29ec68
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
@@ -0,0 +1,285 @@
+package md2man
+
+import (
+ "bytes"
+ "fmt"
+ "html"
+ "strings"
+
+ "github.com/russross/blackfriday"
+)
+
+type roffRenderer struct {
+ ListCounters []int
+}
+
+// RoffRenderer creates a new blackfriday Renderer for generating roff documents
+// from markdown
+func RoffRenderer(flags int) blackfriday.Renderer {
+ return &roffRenderer{}
+}
+
+func (r *roffRenderer) GetFlags() int {
+ return 0
+}
+
+func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) {
+ out.WriteString(".TH ")
+
+ splitText := bytes.Split(text, []byte("\n"))
+ for i, line := range splitText {
+ line = bytes.TrimPrefix(line, []byte("% "))
+ if i == 0 {
+ line = bytes.Replace(line, []byte("("), []byte("\" \""), 1)
+ line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1)
+ }
+ line = append([]byte("\""), line...)
+ line = append(line, []byte("\" ")...)
+ out.Write(line)
+ }
+ out.WriteString("\n")
+
+ // disable hyphenation
+ out.WriteString(".nh\n")
+ // disable justification (adjust text to left margin only)
+ out.WriteString(".ad l\n")
+}
+
+func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
+ out.WriteString("\n.PP\n.RS\n\n.nf\n")
+ escapeSpecialChars(out, text)
+ out.WriteString("\n.fi\n.RE\n")
+}
+
+func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) {
+ out.WriteString("\n.PP\n.RS\n")
+ out.Write(text)
+ out.WriteString("\n.RE\n")
+}
+
+func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { // nolint: golint
+ out.Write(text)
+}
+
+func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {
+ marker := out.Len()
+
+ switch {
+ case marker == 0:
+ // This is the doc header
+ out.WriteString(".TH ")
+ case level == 1:
+ out.WriteString("\n\n.SH ")
+ case level == 2:
+ out.WriteString("\n.SH ")
+ default:
+ out.WriteString("\n.SS ")
+ }
+
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+}
+
+func (r *roffRenderer) HRule(out *bytes.Buffer) {
+ out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n")
+}
+
+func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
+ marker := out.Len()
+ r.ListCounters = append(r.ListCounters, 1)
+ out.WriteString("\n.RS\n")
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ r.ListCounters = r.ListCounters[:len(r.ListCounters)-1]
+ out.WriteString("\n.RE\n")
+}
+
+func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
+ if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
+ out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", r.ListCounters[len(r.ListCounters)-1]))
+ r.ListCounters[len(r.ListCounters)-1]++
+ } else {
+ out.WriteString(".IP \\(bu 2\n")
+ }
+ out.Write(text)
+ out.WriteString("\n")
+}
+
+func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
+ marker := out.Len()
+ out.WriteString("\n.PP\n")
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ if marker != 0 {
+ out.WriteString("\n")
+ }
+}
+
+func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+ out.WriteString("\n.TS\nallbox;\n")
+
+ maxDelims := 0
+ lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n")
+ for _, w := range lines {
+ curDelims := strings.Count(w, "\t")
+ if curDelims > maxDelims {
+ maxDelims = curDelims
+ }
+ }
+ out.Write([]byte(strings.Repeat("l ", maxDelims+1) + "\n"))
+ out.Write([]byte(strings.Repeat("l ", maxDelims+1) + ".\n"))
+ out.Write(header)
+ if len(header) > 0 {
+ out.Write([]byte("\n"))
+ }
+
+ out.Write(body)
+ out.WriteString("\n.TE\n")
+}
+
+func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) {
+ if out.Len() > 0 {
+ out.WriteString("\n")
+ }
+ out.Write(text)
+}
+
+func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString("\t")
+ }
+ if len(text) == 0 {
+ text = []byte{' '}
+ }
+ out.Write([]byte("\\fB\\fC" + string(text) + "\\fR"))
+}
+
+func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString("\t")
+ }
+ if len(text) > 30 {
+ text = append([]byte("T{\n"), text...)
+ text = append(text, []byte("\nT}")...)
+ }
+ if len(text) == 0 {
+ text = []byte{' '}
+ }
+ out.Write(text)
+}
+
+func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) {
+
+}
+
+func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
+
+}
+
+func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) {
+ out.WriteString("\n\\[la]")
+ out.Write(link)
+ out.WriteString("\\[ra]")
+}
+
+func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fB\\fC")
+ escapeSpecialChars(out, text)
+ out.WriteString("\\fR")
+}
+
+func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fB")
+ out.Write(text)
+ out.WriteString("\\fP")
+}
+
+func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fI")
+ out.Write(text)
+ out.WriteString("\\fP")
+}
+
+func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+}
+
+func (r *roffRenderer) LineBreak(out *bytes.Buffer) {
+ out.WriteString("\n.br\n")
+}
+
+func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+ out.Write(content)
+ r.AutoLink(out, link, 0)
+}
+
+func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { // nolint: golint
+ out.Write(tag)
+}
+
+func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\s+2")
+ out.Write(text)
+ out.WriteString("\\s-2")
+}
+
+func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) {
+}
+
+func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
+
+}
+
+func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) {
+ out.WriteString(html.UnescapeString(string(entity)))
+}
+
+func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) {
+ escapeSpecialChars(out, text)
+}
+
+func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) {
+}
+
+func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) {
+}
+
+func needsBackslash(c byte) bool {
+ for _, r := range []byte("-_&\\~") {
+ if c == r {
+ return true
+ }
+ }
+ return false
+}
+
+func escapeSpecialChars(out *bytes.Buffer, text []byte) {
+ for i := 0; i < len(text); i++ {
+ // escape initial apostrophe or period
+ if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
+ out.WriteString("\\&")
+ }
+
+ // directly copy normal characters
+ org := i
+
+ for i < len(text) && !needsBackslash(text[i]) {
+ i++
+ }
+ if i > org {
+ out.Write(text[org:i])
+ }
+
+ // escape a character
+ if i >= len(text) {
+ break
+ }
+ out.WriteByte('\\')
+ out.WriteByte(text[i])
+ }
+}
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/.travis.yml b/src/margo.sh/vendor/github.com/dustin/go-humanize/.travis.yml
new file mode 100644
index 00000000..ba95cdd1
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/.travis.yml
@@ -0,0 +1,21 @@
+sudo: false
+language: go
+go:
+ - 1.3.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - master
+matrix:
+ allow_failures:
+ - go: master
+ fast_finish: true
+install:
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d -s .)
+ - go tool vet .
+ - go test -v -race ./...
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/LICENSE b/src/margo.sh/vendor/github.com/dustin/go-humanize/LICENSE
new file mode 100644
index 00000000..8d9a94a9
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2005-2008 Dustin Sallings
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/README.markdown b/src/margo.sh/vendor/github.com/dustin/go-humanize/README.markdown
new file mode 100644
index 00000000..91b4ae56
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/README.markdown
@@ -0,0 +1,124 @@
+# Humane Units [](https://travis-ci.org/dustin/go-humanize) [](https://godoc.org/github.com/dustin/go-humanize)
+
+Just a few functions for helping humanize times and sizes.
+
+`go get` it as `github.com/dustin/go-humanize`, import it as
+`"github.com/dustin/go-humanize"`, use it as `humanize`.
+
+See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
+complete documentation.
+
+## Sizes
+
+This lets you take numbers like `82854982` and convert them to useful
+strings like, `83 MB` or `79 MiB` (whichever you prefer).
+
+Example:
+
+```go
+fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
+```
+
+## Times
+
+This lets you take a `time.Time` and spit it out in relative terms.
+For example, `12 seconds ago` or `3 days from now`.
+
+Example:
+
+```go
+fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
+```
+
+Thanks to Kyle Lemons for the time implementation from an IRC
+conversation one day. It's pretty neat.
+
+## Ordinals
+
+From a [mailing list discussion][odisc] where a user wanted to be able
+to label ordinals.
+
+ 0 -> 0th
+ 1 -> 1st
+ 2 -> 2nd
+ 3 -> 3rd
+ 4 -> 4th
+ [...]
+
+Example:
+
+```go
+fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
+```
+
+## Commas
+
+Want to shove commas into numbers? Be my guest.
+
+ 0 -> 0
+ 100 -> 100
+ 1000 -> 1,000
+ 1000000000 -> 1,000,000,000
+ -100000 -> -100,000
+
+Example:
+
+```go
+fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
+```
+
+## Ftoa
+
+Nicer float64 formatter that removes trailing zeros.
+
+```go
+fmt.Printf("%f", 2.24) // 2.240000
+fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
+fmt.Printf("%f", 2.0) // 2.000000
+fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
+```
+
+## SI notation
+
+Format numbers with [SI notation][sinotation].
+
+Example:
+
+```go
+humanize.SI(0.00000000223, "M") // 2.23 nM
+```
+
+## English-specific functions
+
+The following functions are in the `humanize/english` subpackage.
+
+### Plurals
+
+Simple English pluralization
+
+```go
+english.PluralWord(1, "object", "") // object
+english.PluralWord(42, "object", "") // objects
+english.PluralWord(2, "bus", "") // buses
+english.PluralWord(99, "locus", "loci") // loci
+
+english.Plural(1, "object", "") // 1 object
+english.Plural(42, "object", "") // 42 objects
+english.Plural(2, "bus", "") // 2 buses
+english.Plural(99, "locus", "loci") // 99 loci
+```
+
+### Word series
+
+Format comma-separated words lists with conjuctions:
+
+```go
+english.WordSeries([]string{"foo"}, "and") // foo
+english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
+english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
+
+english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
+```
+
+[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
+[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/big.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/big.go
new file mode 100644
index 00000000..f49dc337
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/big.go
@@ -0,0 +1,31 @@
+package humanize
+
+import (
+ "math/big"
+)
+
+// order of magnitude (to a max order)
+func oomm(n, b *big.Int, maxmag int) (float64, int) {
+ mag := 0
+ m := &big.Int{}
+ for n.Cmp(b) >= 0 {
+ n.DivMod(n, b, m)
+ mag++
+ if mag == maxmag && maxmag >= 0 {
+ break
+ }
+ }
+ return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
+
+// total order of magnitude
+// (same as above, but with no upper limit)
+func oom(n, b *big.Int) (float64, int) {
+ mag := 0
+ m := &big.Int{}
+ for n.Cmp(b) >= 0 {
+ n.DivMod(n, b, m)
+ mag++
+ }
+ return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/bigbytes.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/bigbytes.go
new file mode 100644
index 00000000..1a2bf617
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/bigbytes.go
@@ -0,0 +1,173 @@
+package humanize
+
+import (
+ "fmt"
+ "math/big"
+ "strings"
+ "unicode"
+)
+
+var (
+ bigIECExp = big.NewInt(1024)
+
+ // BigByte is one byte in bit.Ints
+ BigByte = big.NewInt(1)
+ // BigKiByte is 1,024 bytes in bit.Ints
+ BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
+ // BigMiByte is 1,024 k bytes in bit.Ints
+ BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
+ // BigGiByte is 1,024 m bytes in bit.Ints
+ BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
+ // BigTiByte is 1,024 g bytes in bit.Ints
+ BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
+ // BigPiByte is 1,024 t bytes in bit.Ints
+ BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
+ // BigEiByte is 1,024 p bytes in bit.Ints
+ BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
+ // BigZiByte is 1,024 e bytes in bit.Ints
+ BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
+ // BigYiByte is 1,024 z bytes in bit.Ints
+ BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
+)
+
+var (
+ bigSIExp = big.NewInt(1000)
+
+ // BigSIByte is one SI byte in big.Ints
+ BigSIByte = big.NewInt(1)
+ // BigKByte is 1,000 SI bytes in big.Ints
+ BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
+ // BigMByte is 1,000 SI k bytes in big.Ints
+ BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
+ // BigGByte is 1,000 SI m bytes in big.Ints
+ BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
+ // BigTByte is 1,000 SI g bytes in big.Ints
+ BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
+ // BigPByte is 1,000 SI t bytes in big.Ints
+ BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
+ // BigEByte is 1,000 SI p bytes in big.Ints
+ BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
+ // BigZByte is 1,000 SI e bytes in big.Ints
+ BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
+ // BigYByte is 1,000 SI z bytes in big.Ints
+ BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
+)
+
+var bigBytesSizeTable = map[string]*big.Int{
+ "b": BigByte,
+ "kib": BigKiByte,
+ "kb": BigKByte,
+ "mib": BigMiByte,
+ "mb": BigMByte,
+ "gib": BigGiByte,
+ "gb": BigGByte,
+ "tib": BigTiByte,
+ "tb": BigTByte,
+ "pib": BigPiByte,
+ "pb": BigPByte,
+ "eib": BigEiByte,
+ "eb": BigEByte,
+ "zib": BigZiByte,
+ "zb": BigZByte,
+ "yib": BigYiByte,
+ "yb": BigYByte,
+ // Without suffix
+ "": BigByte,
+ "ki": BigKiByte,
+ "k": BigKByte,
+ "mi": BigMiByte,
+ "m": BigMByte,
+ "gi": BigGiByte,
+ "g": BigGByte,
+ "ti": BigTiByte,
+ "t": BigTByte,
+ "pi": BigPiByte,
+ "p": BigPByte,
+ "ei": BigEiByte,
+ "e": BigEByte,
+ "z": BigZByte,
+ "zi": BigZiByte,
+ "y": BigYByte,
+ "yi": BigYiByte,
+}
+
+var ten = big.NewInt(10)
+
+func humanateBigBytes(s, base *big.Int, sizes []string) string {
+ if s.Cmp(ten) < 0 {
+ return fmt.Sprintf("%d B", s)
+ }
+ c := (&big.Int{}).Set(s)
+ val, mag := oomm(c, base, len(sizes)-1)
+ suffix := sizes[mag]
+ f := "%.0f %s"
+ if val < 10 {
+ f = "%.1f %s"
+ }
+
+ return fmt.Sprintf(f, val, suffix)
+
+}
+
+// BigBytes produces a human readable representation of an SI size.
+//
+// See also: ParseBigBytes.
+//
+// BigBytes(82854982) -> 83 MB
+func BigBytes(s *big.Int) string {
+ sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+ return humanateBigBytes(s, bigSIExp, sizes)
+}
+
+// BigIBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBigBytes.
+//
+// BigIBytes(82854982) -> 79 MiB
+func BigIBytes(s *big.Int) string {
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+ return humanateBigBytes(s, bigIECExp, sizes)
+}
+
+// ParseBigBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See also: BigBytes, BigIBytes.
+//
+// ParseBigBytes("42 MB") -> 42000000, nil
+// ParseBigBytes("42 mib") -> 44040192, nil
+func ParseBigBytes(s string) (*big.Int, error) {
+ lastDigit := 0
+ hasComma := false
+ for _, r := range s {
+ if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+ break
+ }
+ if r == ',' {
+ hasComma = true
+ }
+ lastDigit++
+ }
+
+ num := s[:lastDigit]
+ if hasComma {
+ num = strings.Replace(num, ",", "", -1)
+ }
+
+ val := &big.Rat{}
+ _, err := fmt.Sscanf(num, "%f", val)
+ if err != nil {
+ return nil, err
+ }
+
+ extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+ if m, ok := bigBytesSizeTable[extra]; ok {
+ mv := (&big.Rat{}).SetInt(m)
+ val.Mul(val, mv)
+ rv := &big.Int{}
+ rv.Div(val.Num(), val.Denom())
+ return rv, nil
+ }
+
+ return nil, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/bytes.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/bytes.go
new file mode 100644
index 00000000..0b498f48
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/bytes.go
@@ -0,0 +1,143 @@
+package humanize
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+// IEC Sizes.
+// kibis of bits
+const (
+ Byte = 1 << (iota * 10)
+ KiByte
+ MiByte
+ GiByte
+ TiByte
+ PiByte
+ EiByte
+)
+
+// SI Sizes.
+const (
+ IByte = 1
+ KByte = IByte * 1000
+ MByte = KByte * 1000
+ GByte = MByte * 1000
+ TByte = GByte * 1000
+ PByte = TByte * 1000
+ EByte = PByte * 1000
+)
+
+var bytesSizeTable = map[string]uint64{
+ "b": Byte,
+ "kib": KiByte,
+ "kb": KByte,
+ "mib": MiByte,
+ "mb": MByte,
+ "gib": GiByte,
+ "gb": GByte,
+ "tib": TiByte,
+ "tb": TByte,
+ "pib": PiByte,
+ "pb": PByte,
+ "eib": EiByte,
+ "eb": EByte,
+ // Without suffix
+ "": Byte,
+ "ki": KiByte,
+ "k": KByte,
+ "mi": MiByte,
+ "m": MByte,
+ "gi": GiByte,
+ "g": GByte,
+ "ti": TiByte,
+ "t": TByte,
+ "pi": PiByte,
+ "p": PByte,
+ "ei": EiByte,
+ "e": EByte,
+}
+
+func logn(n, b float64) float64 {
+ return math.Log(n) / math.Log(b)
+}
+
+func humanateBytes(s uint64, base float64, sizes []string) string {
+ if s < 10 {
+ return fmt.Sprintf("%d B", s)
+ }
+ e := math.Floor(logn(float64(s), base))
+ suffix := sizes[int(e)]
+ val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
+ f := "%.0f %s"
+ if val < 10 {
+ f = "%.1f %s"
+ }
+
+ return fmt.Sprintf(f, val, suffix)
+}
+
+// Bytes produces a human readable representation of an SI size.
+//
+// See also: ParseBytes.
+//
+// Bytes(82854982) -> 83 MB
+func Bytes(s uint64) string {
+ sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
+ return humanateBytes(s, 1000, sizes)
+}
+
+// IBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBytes.
+//
+// IBytes(82854982) -> 79 MiB
+func IBytes(s uint64) string {
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
+ return humanateBytes(s, 1024, sizes)
+}
+
+// ParseBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See Also: Bytes, IBytes.
+//
+// ParseBytes("42 MB") -> 42000000, nil
+// ParseBytes("42 mib") -> 44040192, nil
+func ParseBytes(s string) (uint64, error) {
+ lastDigit := 0
+ hasComma := false
+ for _, r := range s {
+ if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+ break
+ }
+ if r == ',' {
+ hasComma = true
+ }
+ lastDigit++
+ }
+
+ num := s[:lastDigit]
+ if hasComma {
+ num = strings.Replace(num, ",", "", -1)
+ }
+
+ f, err := strconv.ParseFloat(num, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+ if m, ok := bytesSizeTable[extra]; ok {
+ f *= float64(m)
+ if f >= math.MaxUint64 {
+ return 0, fmt.Errorf("too large: %v", s)
+ }
+ return uint64(f), nil
+ }
+
+ return 0, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/comma.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/comma.go
new file mode 100644
index 00000000..520ae3e5
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/comma.go
@@ -0,0 +1,116 @@
+package humanize
+
+import (
+ "bytes"
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+)
+
+// Comma produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Comma(834142) -> 834,142
+func Comma(v int64) string {
+ sign := ""
+
+ // Min int64 can't be negated to a usable value, so it has to be special cased.
+ if v == math.MinInt64 {
+ return "-9,223,372,036,854,775,808"
+ }
+
+ if v < 0 {
+ sign = "-"
+ v = 0 - v
+ }
+
+ parts := []string{"", "", "", "", "", "", ""}
+ j := len(parts) - 1
+
+ for v > 999 {
+ parts[j] = strconv.FormatInt(v%1000, 10)
+ switch len(parts[j]) {
+ case 2:
+ parts[j] = "0" + parts[j]
+ case 1:
+ parts[j] = "00" + parts[j]
+ }
+ v = v / 1000
+ j--
+ }
+ parts[j] = strconv.Itoa(int(v))
+ return sign + strings.Join(parts[j:], ",")
+}
+
+// Commaf produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Commaf(834142.32) -> 834,142.32
+func Commaf(v float64) string {
+ buf := &bytes.Buffer{}
+ if v < 0 {
+ buf.Write([]byte{'-'})
+ v = 0 - v
+ }
+
+ comma := []byte{','}
+
+ parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
+ pos := 0
+ if len(parts[0])%3 != 0 {
+ pos += len(parts[0]) % 3
+ buf.WriteString(parts[0][:pos])
+ buf.Write(comma)
+ }
+ for ; pos < len(parts[0]); pos += 3 {
+ buf.WriteString(parts[0][pos : pos+3])
+ buf.Write(comma)
+ }
+ buf.Truncate(buf.Len() - 1)
+
+ if len(parts) > 1 {
+ buf.Write([]byte{'.'})
+ buf.WriteString(parts[1])
+ }
+ return buf.String()
+}
+
+// CommafWithDigits works like the Commaf but limits the resulting
+// string to the given number of decimal places.
+//
+// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
+func CommafWithDigits(f float64, decimals int) string {
+ return stripTrailingDigits(Commaf(f), decimals)
+}
+
+// BigComma produces a string form of the given big.Int in base 10
+// with commas after every three orders of magnitude.
+func BigComma(b *big.Int) string {
+ sign := ""
+ if b.Sign() < 0 {
+ sign = "-"
+ b.Abs(b)
+ }
+
+ athousand := big.NewInt(1000)
+ c := (&big.Int{}).Set(b)
+ _, m := oom(c, athousand)
+ parts := make([]string, m+1)
+ j := len(parts) - 1
+
+ mod := &big.Int{}
+ for b.Cmp(athousand) >= 0 {
+ b.DivMod(b, athousand, mod)
+ parts[j] = strconv.FormatInt(mod.Int64(), 10)
+ switch len(parts[j]) {
+ case 2:
+ parts[j] = "0" + parts[j]
+ case 1:
+ parts[j] = "00" + parts[j]
+ }
+ j--
+ }
+ parts[j] = strconv.Itoa(int(b.Int64()))
+ return sign + strings.Join(parts[j:], ",")
+}
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/commaf.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/commaf.go
new file mode 100644
index 00000000..620690de
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/commaf.go
@@ -0,0 +1,40 @@
+// +build go1.6
+
+package humanize
+
+import (
+ "bytes"
+ "math/big"
+ "strings"
+)
+
+// BigCommaf produces a string form of the given big.Float in base 10
+// with commas after every three orders of magnitude.
+func BigCommaf(v *big.Float) string {
+ buf := &bytes.Buffer{}
+ if v.Sign() < 0 {
+ buf.Write([]byte{'-'})
+ v.Abs(v)
+ }
+
+ comma := []byte{','}
+
+ parts := strings.Split(v.Text('f', -1), ".")
+ pos := 0
+ if len(parts[0])%3 != 0 {
+ pos += len(parts[0]) % 3
+ buf.WriteString(parts[0][:pos])
+ buf.Write(comma)
+ }
+ for ; pos < len(parts[0]); pos += 3 {
+ buf.WriteString(parts[0][pos : pos+3])
+ buf.Write(comma)
+ }
+ buf.Truncate(buf.Len() - 1)
+
+ if len(parts) > 1 {
+ buf.Write([]byte{'.'})
+ buf.WriteString(parts[1])
+ }
+ return buf.String()
+}
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/ftoa.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/ftoa.go
new file mode 100644
index 00000000..1c62b640
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/ftoa.go
@@ -0,0 +1,46 @@
+package humanize
+
+import (
+ "strconv"
+ "strings"
+)
+
+func stripTrailingZeros(s string) string {
+ offset := len(s) - 1
+ for offset > 0 {
+ if s[offset] == '.' {
+ offset--
+ break
+ }
+ if s[offset] != '0' {
+ break
+ }
+ offset--
+ }
+ return s[:offset+1]
+}
+
+func stripTrailingDigits(s string, digits int) string {
+ if i := strings.Index(s, "."); i >= 0 {
+ if digits <= 0 {
+ return s[:i]
+ }
+ i++
+ if i+digits >= len(s) {
+ return s
+ }
+ return s[:i+digits]
+ }
+ return s
+}
+
+// Ftoa converts a float to a string with no trailing zeros.
+func Ftoa(num float64) string {
+ return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
+}
+
+// FtoaWithDigits converts a float to a string but limits the resulting string
+// to the given number of decimal places, and no trailing zeros.
+func FtoaWithDigits(num float64, digits int) string {
+ return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
+}
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/humanize.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/humanize.go
new file mode 100644
index 00000000..a2c2da31
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/humanize.go
@@ -0,0 +1,8 @@
+/*
+Package humanize converts boring ugly numbers to human-friendly strings and back.
+
+Durations can be turned into strings such as "3 days ago", numbers
+representing sizes like 82854982 into useful strings like, "83 MB" or
+"79 MiB" (whichever you prefer).
+*/
+package humanize
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/number.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/number.go
new file mode 100644
index 00000000..dec61865
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/number.go
@@ -0,0 +1,192 @@
+package humanize
+
+/*
+Slightly adapted from the source to fit go-humanize.
+
+Author: https://github.com/gorhill
+Source: https://gist.github.com/gorhill/5285193
+
+*/
+
+import (
+ "math"
+ "strconv"
+)
+
+var (
+ renderFloatPrecisionMultipliers = [...]float64{
+ 1,
+ 10,
+ 100,
+ 1000,
+ 10000,
+ 100000,
+ 1000000,
+ 10000000,
+ 100000000,
+ 1000000000,
+ }
+
+ renderFloatPrecisionRounders = [...]float64{
+ 0.5,
+ 0.05,
+ 0.005,
+ 0.0005,
+ 0.00005,
+ 0.000005,
+ 0.0000005,
+ 0.00000005,
+ 0.000000005,
+ 0.0000000005,
+ }
+)
+
+// FormatFloat produces a formatted number as string based on the following user-specified criteria:
+// * thousands separator
+// * decimal separator
+// * decimal precision
+//
+// Usage: s := RenderFloat(format, n)
+// The format parameter tells how to render the number n.
+//
+// See examples: http://play.golang.org/p/LXc1Ddm1lJ
+//
+// Examples of format strings, given n = 12345.6789:
+// "#,###.##" => "12,345.67"
+// "#,###." => "12,345"
+// "#,###" => "12345,678"
+// "#\u202F###,##" => "12 345,68"
+// "#.###,###### => 12.345,678900
+// "" (aka default format) => 12,345.67
+//
+// The highest precision allowed is 9 digits after the decimal symbol.
+// There is also a version for integer number, FormatInteger(),
+// which is convenient for calls within template.
+func FormatFloat(format string, n float64) string {
+ // Special cases:
+ // NaN = "NaN"
+ // +Inf = "+Infinity"
+ // -Inf = "-Infinity"
+ if math.IsNaN(n) {
+ return "NaN"
+ }
+ if n > math.MaxFloat64 {
+ return "Infinity"
+ }
+ if n < -math.MaxFloat64 {
+ return "-Infinity"
+ }
+
+ // default format
+ precision := 2
+ decimalStr := "."
+ thousandStr := ","
+ positiveStr := ""
+ negativeStr := "-"
+
+ if len(format) > 0 {
+ format := []rune(format)
+
+ // If there is an explicit format directive,
+ // then default values are these:
+ precision = 9
+ thousandStr = ""
+
+ // collect indices of meaningful formatting directives
+ formatIndx := []int{}
+ for i, char := range format {
+ if char != '#' && char != '0' {
+ formatIndx = append(formatIndx, i)
+ }
+ }
+
+ if len(formatIndx) > 0 {
+ // Directive at index 0:
+ // Must be a '+'
+ // Raise an error if not the case
+ // index: 0123456789
+ // +0.000,000
+ // +000,000.0
+ // +0000.00
+ // +0000
+ if formatIndx[0] == 0 {
+ if format[formatIndx[0]] != '+' {
+ panic("RenderFloat(): invalid positive sign directive")
+ }
+ positiveStr = "+"
+ formatIndx = formatIndx[1:]
+ }
+
+ // Two directives:
+ // First is thousands separator
+ // Raise an error if not followed by 3-digit
+ // 0123456789
+ // 0.000,000
+ // 000,000.00
+ if len(formatIndx) == 2 {
+ if (formatIndx[1] - formatIndx[0]) != 4 {
+ panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
+ }
+ thousandStr = string(format[formatIndx[0]])
+ formatIndx = formatIndx[1:]
+ }
+
+ // One directive:
+ // Directive is decimal separator
+ // The number of digit-specifier following the separator indicates wanted precision
+ // 0123456789
+ // 0.00
+ // 000,0000
+ if len(formatIndx) == 1 {
+ decimalStr = string(format[formatIndx[0]])
+ precision = len(format) - formatIndx[0] - 1
+ }
+ }
+ }
+
+ // generate sign part
+ var signStr string
+ if n >= 0.000000001 {
+ signStr = positiveStr
+ } else if n <= -0.000000001 {
+ signStr = negativeStr
+ n = -n
+ } else {
+ signStr = ""
+ n = 0.0
+ }
+
+ // split number into integer and fractional parts
+ intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
+
+ // generate integer part string
+ intStr := strconv.FormatInt(int64(intf), 10)
+
+ // add thousand separator if required
+ if len(thousandStr) > 0 {
+ for i := len(intStr); i > 3; {
+ i -= 3
+ intStr = intStr[:i] + thousandStr + intStr[i:]
+ }
+ }
+
+ // no fractional part, we can leave now
+ if precision == 0 {
+ return signStr + intStr
+ }
+
+ // generate fractional part
+ fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
+ // may need padding
+ if len(fracStr) < precision {
+ fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
+ }
+
+ return signStr + intStr + decimalStr + fracStr
+}
+
+// FormatInteger produces a formatted number as string.
+// See FormatFloat.
+func FormatInteger(format string, n int) string {
+ return FormatFloat(format, float64(n))
+}
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/ordinals.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/ordinals.go
new file mode 100644
index 00000000..43d88a86
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/ordinals.go
@@ -0,0 +1,25 @@
+package humanize
+
+import "strconv"
+
+// Ordinal gives you the input number in a rank/ordinal format.
+//
+// Ordinal(3) -> 3rd
+func Ordinal(x int) string {
+ suffix := "th"
+ switch x % 10 {
+ case 1:
+ if x%100 != 11 {
+ suffix = "st"
+ }
+ case 2:
+ if x%100 != 12 {
+ suffix = "nd"
+ }
+ case 3:
+ if x%100 != 13 {
+ suffix = "rd"
+ }
+ }
+ return strconv.Itoa(x) + suffix
+}
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/si.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/si.go
new file mode 100644
index 00000000..ae659e0e
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/si.go
@@ -0,0 +1,123 @@
+package humanize
+
+import (
+ "errors"
+ "math"
+ "regexp"
+ "strconv"
+)
+
+var siPrefixTable = map[float64]string{
+ -24: "y", // yocto
+ -21: "z", // zepto
+ -18: "a", // atto
+ -15: "f", // femto
+ -12: "p", // pico
+ -9: "n", // nano
+ -6: "µ", // micro
+ -3: "m", // milli
+ 0: "",
+ 3: "k", // kilo
+ 6: "M", // mega
+ 9: "G", // giga
+ 12: "T", // tera
+ 15: "P", // peta
+ 18: "E", // exa
+ 21: "Z", // zetta
+ 24: "Y", // yotta
+}
+
+var revSIPrefixTable = revfmap(siPrefixTable)
+
+// revfmap reverses the map and precomputes the power multiplier
+func revfmap(in map[float64]string) map[string]float64 {
+ rv := map[string]float64{}
+ for k, v := range in {
+ rv[v] = math.Pow(10, k)
+ }
+ return rv
+}
+
+var riParseRegex *regexp.Regexp
+
+func init() {
+ ri := `^([\-0-9.]+)\s?([`
+ for _, v := range siPrefixTable {
+ ri += v
+ }
+ ri += `]?)(.*)`
+
+ riParseRegex = regexp.MustCompile(ri)
+}
+
+// ComputeSI finds the most appropriate SI prefix for the given number
+// and returns the prefix along with the value adjusted to be within
+// that prefix.
+//
+// See also: SI, ParseSI.
+//
+// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
+func ComputeSI(input float64) (float64, string) {
+ if input == 0 {
+ return 0, ""
+ }
+ mag := math.Abs(input)
+ exponent := math.Floor(logn(mag, 10))
+ exponent = math.Floor(exponent/3) * 3
+
+ value := mag / math.Pow(10, exponent)
+
+ // Handle special case where value is exactly 1000.0
+ // Should return 1 M instead of 1000 k
+ if value == 1000.0 {
+ exponent += 3
+ value = mag / math.Pow(10, exponent)
+ }
+
+ value = math.Copysign(value, input)
+
+ prefix := siPrefixTable[exponent]
+ return value, prefix
+}
+
+// SI returns a string with default formatting.
+//
+// SI uses Ftoa to format float value, removing trailing zeros.
+//
+// See also: ComputeSI, ParseSI.
+//
+// e.g. SI(1000000, "B") -> 1 MB
+// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
+func SI(input float64, unit string) string {
+ value, prefix := ComputeSI(input)
+ return Ftoa(value) + " " + prefix + unit
+}
+
+// SIWithDigits works like SI but limits the resulting string to the
+// given number of decimal places.
+//
+// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
+// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
+func SIWithDigits(input float64, decimals int, unit string) string {
+ value, prefix := ComputeSI(input)
+ return FtoaWithDigits(value, decimals) + " " + prefix + unit
+}
+
+var errInvalid = errors.New("invalid input")
+
+// ParseSI parses an SI string back into the number and unit.
+//
+// See also: SI, ComputeSI.
+//
+// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
+func ParseSI(input string) (float64, string, error) {
+ found := riParseRegex.FindStringSubmatch(input)
+ if len(found) != 4 {
+ return 0, "", errInvalid
+ }
+ mag := revSIPrefixTable[found[2]]
+ unit := found[3]
+
+ base, err := strconv.ParseFloat(found[1], 64)
+ return base * mag, unit, err
+}
diff --git a/src/margo.sh/vendor/github.com/dustin/go-humanize/times.go b/src/margo.sh/vendor/github.com/dustin/go-humanize/times.go
new file mode 100644
index 00000000..dd3fbf5e
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/dustin/go-humanize/times.go
@@ -0,0 +1,117 @@
+package humanize
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "time"
+)
+
+// Seconds-based time units
+const (
+ Day = 24 * time.Hour
+ Week = 7 * Day
+ Month = 30 * Day
+ Year = 12 * Month
+ LongTime = 37 * Year
+)
+
+// Time formats a time into a relative string.
+//
+// Time(someT) -> "3 weeks ago"
+func Time(then time.Time) string {
+ return RelTime(then, time.Now(), "ago", "from now")
+}
+
+// A RelTimeMagnitude struct contains a relative time point at which
+// the relative format of time will switch to a new format string. A
+// slice of these in ascending order by their "D" field is passed to
+// CustomRelTime to format durations.
+//
+// The Format field is a string that may contain a "%s" which will be
+// replaced with the appropriate signed label (e.g. "ago" or "from
+// now") and a "%d" that will be replaced by the quantity.
+//
+// The DivBy field is the amount of time the time difference must be
+// divided by in order to display correctly.
+//
+// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
+// DivBy should be time.Minute so whatever the duration is will be
+// expressed in minutes.
+type RelTimeMagnitude struct {
+ D time.Duration
+ Format string
+ DivBy time.Duration
+}
+
+var defaultMagnitudes = []RelTimeMagnitude{
+ {time.Second, "now", time.Second},
+ {2 * time.Second, "1 second %s", 1},
+ {time.Minute, "%d seconds %s", time.Second},
+ {2 * time.Minute, "1 minute %s", 1},
+ {time.Hour, "%d minutes %s", time.Minute},
+ {2 * time.Hour, "1 hour %s", 1},
+ {Day, "%d hours %s", time.Hour},
+ {2 * Day, "1 day %s", 1},
+ {Week, "%d days %s", Day},
+ {2 * Week, "1 week %s", 1},
+ {Month, "%d weeks %s", Week},
+ {2 * Month, "1 month %s", 1},
+ {Year, "%d months %s", Month},
+ {18 * Month, "1 year %s", 1},
+ {2 * Year, "2 years %s", 1},
+ {LongTime, "%d years %s", Year},
+ {math.MaxInt64, "a long while %s", 1},
+}
+
+// RelTime formats a time into a relative string.
+//
+// It takes two times and two labels. In addition to the generic time
+// delta string (e.g. 5 minutes), the labels are used applied so that
+// the label corresponding to the smaller time is applied.
+//
+// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
+func RelTime(a, b time.Time, albl, blbl string) string {
+ return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
+}
+
+// CustomRelTime formats a time into a relative string.
+//
+// It takes two times two labels and a table of relative time formats.
+// In addition to the generic time delta string (e.g. 5 minutes), the
+// labels are used applied so that the label corresponding to the
+// smaller time is applied.
+func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
+ lbl := albl
+ diff := b.Sub(a)
+
+ if a.After(b) {
+ lbl = blbl
+ diff = a.Sub(b)
+ }
+
+ n := sort.Search(len(magnitudes), func(i int) bool {
+ return magnitudes[i].D > diff
+ })
+
+ if n >= len(magnitudes) {
+ n = len(magnitudes) - 1
+ }
+ mag := magnitudes[n]
+ args := []interface{}{}
+ escaped := false
+ for _, ch := range mag.Format {
+ if escaped {
+ switch ch {
+ case 's':
+ args = append(args, lbl)
+ case 'd':
+ args = append(args, diff/mag.DivBy)
+ }
+ escaped = false
+ } else {
+ escaped = ch == '%'
+ }
+ }
+ return fmt.Sprintf(mag.Format, args...)
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/.gitignore b/src/margo.sh/vendor/github.com/karrick/godirwalk/.gitignore
new file mode 100644
index 00000000..ea2d9ece
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/.gitignore
@@ -0,0 +1,19 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+examples/remove-empty-directories/remove-empty-directories
+examples/sizes/sizes
+examples/walk-fast/walk-fast
+examples/walk-stdlib/walk-stdlib
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/LICENSE b/src/margo.sh/vendor/github.com/karrick/godirwalk/LICENSE
new file mode 100644
index 00000000..01ce194c
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/LICENSE
@@ -0,0 +1,25 @@
+BSD 2-Clause License
+
+Copyright (c) 2017, Karrick McDermott
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/README.md b/src/margo.sh/vendor/github.com/karrick/godirwalk/README.md
new file mode 100644
index 00000000..72c51a5e
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/README.md
@@ -0,0 +1,223 @@
+# godirwalk
+
+`godirwalk` is a library for traversing a directory tree on a file
+system.
+
+[](https://godoc.org/github.com/karrick/godirwalk) [](https://dev.azure.com/microsoft0235/microsoft/_build/latest?definitionId=1&branchName=master)
+
+In short, why do I use this library?
+
+1. It's faster than `filepath.Walk`.
+1. It's more correct on Windows than `filepath.Walk`.
+1. It's more easy to use than `filepath.Walk`.
+1. It's more flexible than `filepath.Walk`.
+
+## Usage Example
+
+Additional examples are provided in the `examples/` subdirectory.
+
+This library will normalize the provided top level directory name
+based on the os-specific path separator by calling `filepath.Clean` on
+its first argument. However it always provides the pathname created by
+using the correct os-specific path separator when invoking the
+provided callback function.
+
+```Go
+ dirname := "some/directory/root"
+ err := godirwalk.Walk(dirname, &godirwalk.Options{
+ Callback: func(osPathname string, de *godirwalk.Dirent) error {
+ fmt.Printf("%s %s\n", de.ModeType(), osPathname)
+ return nil
+ },
+ Unsorted: true, // (optional) set true for faster yet non-deterministic enumeration (see godoc)
+ })
+```
+
+This library not only provides functions for traversing a file system
+directory tree, but also for obtaining a list of immediate descendants
+of a particular directory, typically much more quickly than using
+`os.ReadDir` or `os.ReadDirnames`.
+
+## Description
+
+Here's why I use `godirwalk` in preference to `filepath.Walk`,
+`os.ReadDir`, and `os.ReadDirnames`.
+
+### It's faster than `filepath.Walk`
+
+When compared against `filepath.Walk` in benchmarks, it has been
+observed to run between five and ten times the speed on darwin, at
+speeds comparable to the that of the unix `find` utility; about twice
+the speed on linux; and about four times the speed on Windows.
+
+How does it obtain this performance boost? It does less work to give
+you nearly the same output. This library calls the same `syscall`
+functions to do the work, but it makes fewer calls, does not throw
+away information that it might need, and creates less memory churn
+along the way by reusing the same scratch buffer for reading from a
+directory rather than reallocating a new buffer every time it reads
+file system entry data from the operating system.
+
+While traversing a file system directory tree, `filepath.Walk` obtains
+the list of immediate descendants of a directory, and throws away the
+file system node type information provided by the operating system
+that comes with the node's name. Then, immediately prior to invoking
+the callback function, `filepath.Walk` invokes `os.Stat` for each
+node, and passes the returned `os.FileInfo` information to the
+callback.
+
+While the `os.FileInfo` information provided by `os.Stat` is extremely
+helpful--and even includes the `os.FileMode` data--providing it
+requires an additional system call for each node.
+
+Because most callbacks only care about what the node type is, this
+library does not throw the type information away, but rather provides
+that information to the callback function in the form of a
+`os.FileMode` value. Note that the provided `os.FileMode` value that
+this library provides only has the node type information, and does not
+have the permission bits, sticky bits, or other information from the
+file's mode. If the callback does care about a particular node's
+entire `os.FileInfo` data structure, the callback can easiy invoke
+`os.Stat` when needed, and only when needed.
+
+#### Benchmarks
+
+##### macOS
+
+```Bash
+$ go test -bench=. -benchmem
+goos: darwin
+goarch: amd64
+pkg: github.com/karrick/godirwalk
+BenchmarkReadDirnamesStandardLibrary-12 50000 26250 ns/op 10360 B/op 16 allocs/op
+BenchmarkReadDirnamesThisLibrary-12 50000 24372 ns/op 5064 B/op 20 allocs/op
+BenchmarkFilepathWalk-12 1 1099524875 ns/op 228415912 B/op 416952 allocs/op
+BenchmarkGodirwalk-12 2 526754589 ns/op 103110464 B/op 451442 allocs/op
+BenchmarkGodirwalkUnsorted-12 3 509219296 ns/op 100751400 B/op 378800 allocs/op
+BenchmarkFlameGraphFilepathWalk-12 1 7478618820 ns/op 2284138176 B/op 4169453 allocs/op
+BenchmarkFlameGraphGodirwalk-12 1 4977264058 ns/op 1031105328 B/op 4514423 allocs/op
+PASS
+ok github.com/karrick/godirwalk 21.219s
+```
+
+##### Linux
+
+```Bash
+$ go test -bench=. -benchmem
+goos: linux
+goarch: amd64
+pkg: github.com/karrick/godirwalk
+BenchmarkReadDirnamesStandardLibrary-12 100000 15458 ns/op 10360 B/op 16 allocs/op
+BenchmarkReadDirnamesThisLibrary-12 100000 14646 ns/op 5064 B/op 20 allocs/op
+BenchmarkFilepathWalk-12 2 631034745 ns/op 228210216 B/op 416939 allocs/op
+BenchmarkGodirwalk-12 3 358714883 ns/op 102988664 B/op 451437 allocs/op
+BenchmarkGodirwalkUnsorted-12 3 355363915 ns/op 100629234 B/op 378796 allocs/op
+BenchmarkFlameGraphFilepathWalk-12 1 6086913991 ns/op 2282104720 B/op 4169417 allocs/op
+BenchmarkFlameGraphGodirwalk-12 1 3456398824 ns/op 1029886400 B/op 4514373 allocs/op
+PASS
+ok github.com/karrick/godirwalk 19.179s
+```
+
+### It's more correct on Windows than `filepath.Walk`
+
+I did not previously care about this either, but humor me. We all love
+how we can write once and run everywhere. It is essential for the
+language's adoption, growth, and success, that the software we create
+can run unmodified on all architectures and operating systems
+supported by Go.
+
+When the traversed file system has a logical loop caused by symbolic
+links to directories, on unix `filepath.Walk` ignores symbolic links
+and traverses the entire directory tree without error. On Windows
+however, `filepath.Walk` will continue following directory symbolic
+links, even though it is not supposed to, eventually causing
+`filepath.Walk` to terminate early and return an error when the
+pathname gets too long from concatenating endless loops of symbolic
+links onto the pathname. This error comes from Windows, passes through
+`filepath.Walk`, and to the upstream client running `filepath.Walk`.
+
+The takeaway is that behavior is different based on which platform
+`filepath.Walk` is running. While this is clearly not intentional,
+until it is fixed in the standard library, it presents a compatibility
+problem.
+
+This library correctly identifies symbolic links that point to
+directories and will only follow them when `FollowSymbolicLinks` is
+set to true. Behavior on Windows and other operating systems is
+identical.
+
+### It's more easy to use than `filepath.Walk`
+
+Since this library does not invoke `os.Stat` on every file system node
+it encounters, there is no possible error event for the callback
+function to filter on. The third argument in the `filepath.WalkFunc`
+function signature to pass the error from `os.Stat` to the callback
+function is no longer necessary, and thus eliminated from signature of
+the callback function from this library.
+
+Also, `filepath.Walk` invokes the callback function with a solidus
+delimited pathname regardless of the os-specific path separator. This
+library invokes the callback function with the os-specific pathname
+separator, obviating a call to `filepath.Clean` in the callback
+function for each node prior to actually using the provided pathname.
+
+In other words, even on Windows, `filepath.Walk` will invoke the
+callback with `some/path/to/foo.txt`, requiring well written clients
+to perform pathname normalization for every file prior to working with
+the specified file. In truth, many clients developed on unix and not
+tested on Windows neglect this subtlety, and will result in software
+bugs when running on Windows. This library would invoke the callback
+function with `some\path\to\foo.txt` for the same file when running on
+Windows, eliminating the need to normalize the pathname by the client,
+and lessen the likelyhood that a client will work on unix but not on
+Windows.
+
+### It's more flexible than `filepath.Walk`
+
+#### Configurable Handling of Symbolic Links
+
+The default behavior of this library is to ignore symbolic links to
+directories when walking a directory tree, just like `filepath.Walk`
+does. However, it does invoke the callback function with each node it
+finds, including symbolic links. If a particular use case exists to
+follow symbolic links when traversing a directory tree, this library
+can be invoked in manner to do so, by setting the
+`FollowSymbolicLinks` parameter to true.
+
+#### Configurable Sorting of Directory Children
+
+The default behavior of this library is to always sort the immediate
+descendants of a directory prior to visiting each node, just like
+`filepath.Walk` does. This is usually the desired behavior. However,
+this does come at slight performance and memory penalties required to
+sort the names when a directory node has many entries. Additionally if
+caller specifies `Unsorted` enumeration, reading directories is lazily
+performed as the caller consumes entries. If a particular use case
+exists that does not require sorting the directory's immediate
+descendants prior to visiting its nodes, this library will skip the
+sorting step when the `Unsorted` parameter is set to true.
+
+Here's an interesting read of the potential hazzards of traversing a
+file system hierarchy in a non-deterministic order. If you know the
+problem you are solving is not affected by the order files are
+visited, then I encourage you to use `Unsorted`. Otherwise skip
+setting this option.
+
+[Researchers find bug in Python script may have affected hundreds of studies](https://arstechnica.com/information-technology/2019/10/chemists-discover-cross-platform-python-scripts-not-so-cross-platform/)
+
+#### Configurable Post Children Callback
+
+This library provides upstream code with the ability to specify a
+callback to be invoked for each directory after its children are
+processed. This has been used to recursively delete empty directories
+after traversing the file system in a more efficient manner. See the
+`examples/clean-empties` directory for an example of this usage.
+
+#### Configurable Error Callback
+
+This library provides upstream code with the ability to specify a
+callback to be invoked for errors that the operating system returns,
+allowing the upstream code to determine the next course of action to
+take, whether to halt walking the hierarchy, as it would do were no
+error callback provided, or skip the node that caused the error. See
+the `examples/walk-fast` directory for an example of this usage.
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/azure-pipelines.yml b/src/margo.sh/vendor/github.com/karrick/godirwalk/azure-pipelines.yml
new file mode 100644
index 00000000..d56f2484
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/azure-pipelines.yml
@@ -0,0 +1,53 @@
+# Go
+# Build your Go project.
+# Add steps that test, save build artifacts, deploy, and more:
+# https://docs.microsoft.com/azure/devops/pipelines/languages/go
+
+trigger:
+- master
+
+variables:
+ GOVERSION: 1.13
+
+jobs:
+ - job: Linux
+ pool:
+ vmImage: 'ubuntu-latest'
+ steps:
+ - task: GoTool@0
+ displayName: 'Use Go $(GOVERSION)'
+ inputs:
+ version: $(GOVERSION)
+ - task: Go@0
+ inputs:
+ command: test
+ arguments: -race -v ./...
+ displayName: 'Execute Tests'
+
+ - job: Mac
+ pool:
+ vmImage: 'macos-latest'
+ steps:
+ - task: GoTool@0
+ displayName: 'Use Go $(GOVERSION)'
+ inputs:
+ version: $(GOVERSION)
+ - task: Go@0
+ inputs:
+ command: test
+ arguments: -race -v ./...
+ displayName: 'Execute Tests'
+
+ - job: Windows
+ pool:
+ vmImage: 'windows-latest'
+ steps:
+ - task: GoTool@0
+ displayName: 'Use Go $(GOVERSION)'
+ inputs:
+ version: $(GOVERSION)
+ - task: Go@0
+ inputs:
+ command: test
+ arguments: -race -v ./...
+ displayName: 'Execute Tests'
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/bench.sh b/src/margo.sh/vendor/github.com/karrick/godirwalk/bench.sh
new file mode 100755
index 00000000..b2ba374c
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/bench.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# for version in v1.9.1 v1.10.0 v1.10.3 v1.10.12 v1.11.2 v1.11.3 v1.12.0 v1.13.1 v1.14.0 v1.14.1 ; do
+for version in v1.10.12 v1.14.1 v1.15.2 ; do
+ echo "### $version" > $version.txt
+ git checkout -- go.mod && git checkout $version && go test -run=NONE -bench=Benchmark2 >> $version.txt || exit 1
+done
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_development.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_development.go
new file mode 100644
index 00000000..6e1cb0bf
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_development.go
@@ -0,0 +1,14 @@
+// +build godirwalk_debug
+
+package godirwalk
+
+import (
+ "fmt"
+ "os"
+)
+
+// debug formats and prints arguments to stderr for development builds
+func debug(f string, a ...interface{}) {
+ // fmt.Fprintf(os.Stderr, f, a...)
+ os.Stderr.Write([]byte("godirwalk: " + fmt.Sprintf(f, a...)))
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_release.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_release.go
new file mode 100644
index 00000000..98617873
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_release.go
@@ -0,0 +1,6 @@
+// +build !godirwalk_debug
+
+package godirwalk
+
+// debug is a no-op for release builds
+func debug(_ string, _ ...interface{}) {}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/dirent.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/dirent.go
new file mode 100644
index 00000000..38e141ba
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/dirent.go
@@ -0,0 +1,104 @@
+package godirwalk
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// Dirent stores the name and file system mode type of discovered file system
+// entries.
+type Dirent struct {
+ name string // base name of the file system entry.
+ path string // path name of the file system entry.
+ modeType os.FileMode // modeType is the type of file system entry.
+}
+
+// NewDirent returns a newly initialized Dirent structure, or an error. This
+// function does not follow symbolic links.
+//
+// This function is rarely used, as Dirent structures are provided by other
+// functions in this library that read and walk directories, but is provided,
+// however, for the occasion when a program needs to create a Dirent.
+func NewDirent(osPathname string) (*Dirent, error) {
+ modeType, err := modeType(osPathname)
+ if err != nil {
+ return nil, err
+ }
+ return &Dirent{
+ name: filepath.Base(osPathname),
+ path: filepath.Dir(osPathname),
+ modeType: modeType,
+ }, nil
+}
+
+// IsDir returns true if and only if the Dirent represents a file system
+// directory. Note that on some operating systems, more than one file mode bit
+// may be set for a node. For instance, on Windows, a symbolic link that points
+// to a directory will have both the directory and the symbolic link bits set.
+func (de Dirent) IsDir() bool { return de.modeType&os.ModeDir != 0 }
+
+// IsDirOrSymlinkToDir returns true if and only if the Dirent represents a file
+// system directory, or a symbolic link to a directory. Note that if the Dirent
+// is not a directory but is a symbolic link, this method will resolve by
+// sending a request to the operating system to follow the symbolic link.
+func (de Dirent) IsDirOrSymlinkToDir() (bool, error) {
+ if de.IsDir() {
+ return true, nil
+ }
+ if !de.IsSymlink() {
+ return false, nil
+ }
+ // Does this symlink point to a directory?
+ info, err := os.Stat(filepath.Join(de.path, de.name))
+ if err != nil {
+ return false, err
+ }
+ return info.IsDir(), nil
+}
+
+// IsRegular returns true if and only if the Dirent represents a regular file.
+// That is, it ensures that no mode type bits are set.
+func (de Dirent) IsRegular() bool { return de.modeType&os.ModeType == 0 }
+
+// IsSymlink returns true if and only if the Dirent represents a file system
+// symbolic link. Note that on some operating systems, more than one file mode
+// bit may be set for a node. For instance, on Windows, a symbolic link that
+// points to a directory will have both the directory and the symbolic link bits
+// set.
+func (de Dirent) IsSymlink() bool { return de.modeType&os.ModeSymlink != 0 }
+
+// IsDevice returns true if and only if the Dirent represents a device file.
+func (de Dirent) IsDevice() bool { return de.modeType&os.ModeDevice != 0 }
+
+// ModeType returns the mode bits that specify the file system node type. We
+// could make our own enum-like data type for encoding the file type, but Go's
+// runtime already gives us architecture independent file modes, as discussed in
+// `os/types.go`:
+//
+// Go's runtime FileMode type has same definition on all systems, so that
+// information about files can be moved from one system to another portably.
+func (de Dirent) ModeType() os.FileMode { return de.modeType }
+
+// Name returns the base name of the file system entry.
+func (de Dirent) Name() string { return de.name }
+
+// reset releases memory held by entry err and name, and resets mode type to 0.
+func (de *Dirent) reset() {
+ de.name = ""
+ de.path = ""
+ de.modeType = 0
+}
+
+// Dirents represents a slice of Dirent pointers, which are sortable by base
+// name. This type satisfies the `sort.Interface` interface.
+type Dirents []*Dirent
+
+// Len returns the count of Dirent structures in the slice.
+func (l Dirents) Len() int { return len(l) }
+
+// Less returns true if and only if the base name of the element specified by
+// the first index is lexicographically less than that of the second index.
+func (l Dirents) Less(i, j int) bool { return l[i].name < l[j].name }
+
+// Swap exchanges the two Dirent entries specified by the two provided indexes.
+func (l Dirents) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/doc.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/doc.go
new file mode 100644
index 00000000..440aa82a
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/doc.go
@@ -0,0 +1,42 @@
+/*
+Package godirwalk provides functions to read and traverse directory trees.
+
+In short, why do I use this library?
+
+* It's faster than `filepath.Walk`.
+
+* It's more correct on Windows than `filepath.Walk`.
+
+* It's more easy to use than `filepath.Walk`.
+
+* It's more flexible than `filepath.Walk`.
+
+USAGE
+
+This library will normalize the provided top level directory name based on the
+os-specific path separator by calling `filepath.Clean` on its first
+argument. However it always provides the pathname created by using the correct
+os-specific path separator when invoking the provided callback function.
+
+ dirname := "some/directory/root"
+ err := godirwalk.Walk(dirname, &godirwalk.Options{
+ Callback: func(osPathname string, de *godirwalk.Dirent) error {
+ fmt.Printf("%s %s\n", de.ModeType(), osPathname)
+ return nil
+ },
+ })
+
+This library not only provides functions for traversing a file system directory
+tree, but also for obtaining a list of immediate descendants of a particular
+directory, typically much more quickly than using `os.ReadDir` or
+`os.ReadDirnames`.
+
+ scratchBuffer := make([]byte, godirwalk.MinimumScratchBufferSize)
+
+ names, err := godirwalk.ReadDirnames("some/directory", scratchBuffer)
+ // ...
+
+ entries, err := godirwalk.ReadDirents("another/directory", scratchBuffer)
+ // ...
+*/
+package godirwalk
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/go.mod b/src/margo.sh/vendor/github.com/karrick/godirwalk/go.mod
new file mode 100644
index 00000000..faf23fd9
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/go.mod
@@ -0,0 +1,3 @@
+module github.com/karrick/godirwalk
+
+go 1.13
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/go.sum b/src/margo.sh/vendor/github.com/karrick/godirwalk/go.sum
new file mode 100644
index 00000000..e69de29b
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/inoWithFileno.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/inoWithFileno.go
new file mode 100644
index 00000000..1dc04a71
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/inoWithFileno.go
@@ -0,0 +1,9 @@
+// +build dragonfly freebsd openbsd netbsd
+
+package godirwalk
+
+import "syscall"
+
+func inoFromDirent(de *syscall.Dirent) uint64 {
+ return uint64(de.Fileno)
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/inoWithIno.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/inoWithIno.go
new file mode 100644
index 00000000..842a6662
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/inoWithIno.go
@@ -0,0 +1,9 @@
+// +build aix darwin linux nacl solaris
+
+package godirwalk
+
+import "syscall"
+
+func inoFromDirent(de *syscall.Dirent) uint64 {
+ return uint64(de.Ino)
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/modeType.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeType.go
new file mode 100644
index 00000000..6427a685
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeType.go
@@ -0,0 +1,22 @@
+package godirwalk
+
+import (
+ "os"
+)
+
+// modeType returns the mode type of the file system entry identified by
+// osPathname by calling os.LStat function, to intentionally not follow symbolic
+// links.
+//
+// Even though os.LStat provides all file mode bits, we want to ensure same
+// values returned to caller regardless of whether we obtained file mode bits
+// from syscall or stat call. Therefore mask out the additional file mode bits
+// that are provided by stat but not by the syscall, so users can rely on their
+// values.
+func modeType(osPathname string) (os.FileMode, error) {
+ fi, err := os.Lstat(osPathname)
+ if err == nil {
+ return fi.Mode() & os.ModeType, nil
+ }
+ return 0, err
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithType.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithType.go
new file mode 100644
index 00000000..7890e776
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithType.go
@@ -0,0 +1,37 @@
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package godirwalk
+
+import (
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// modeTypeFromDirent converts a syscall defined constant, which is in purview
+// of OS, to a constant defined by Go, assumed by this project to be stable.
+//
+// When the syscall constant is not recognized, this function falls back to a
+// Stat on the file system.
+func modeTypeFromDirent(de *syscall.Dirent, osDirname, osBasename string) (os.FileMode, error) {
+ switch de.Type {
+ case syscall.DT_REG:
+ return 0, nil
+ case syscall.DT_DIR:
+ return os.ModeDir, nil
+ case syscall.DT_LNK:
+ return os.ModeSymlink, nil
+ case syscall.DT_CHR:
+ return os.ModeDevice | os.ModeCharDevice, nil
+ case syscall.DT_BLK:
+ return os.ModeDevice, nil
+ case syscall.DT_FIFO:
+ return os.ModeNamedPipe, nil
+ case syscall.DT_SOCK:
+ return os.ModeSocket, nil
+ default:
+ // If syscall returned unknown type (e.g., DT_UNKNOWN, DT_WHT), then
+ // resolve actual mode by reading file information.
+ return modeType(filepath.Join(osDirname, osBasename))
+ }
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithoutType.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithoutType.go
new file mode 100644
index 00000000..5299392e
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithoutType.go
@@ -0,0 +1,18 @@
+// +build aix js nacl solaris
+
+package godirwalk
+
+import (
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// modeTypeFromDirent converts a syscall defined constant, which is in purview
+// of OS, to a constant defined by Go, assumed by this project to be stable.
+//
+// Because some operating system syscall.Dirent structures do not include a Type
+// field, fall back on Stat of the file system.
+func modeTypeFromDirent(_ *syscall.Dirent, osDirname, osBasename string) (os.FileMode, error) {
+ return modeType(filepath.Join(osDirname, osBasename))
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/nameWithNamlen.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/nameWithNamlen.go
new file mode 100644
index 00000000..2c0231ee
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/nameWithNamlen.go
@@ -0,0 +1,29 @@
+// +build aix darwin dragonfly freebsd netbsd openbsd
+
+package godirwalk
+
+import (
+ "reflect"
+ "syscall"
+ "unsafe"
+)
+
+func nameFromDirent(de *syscall.Dirent) []byte {
+ // Because this GOOS' syscall.Dirent provides a Namlen field that says how
+ // long the name is, this function does not need to search for the NULL
+ // byte.
+ ml := int(de.Namlen)
+
+ // Convert syscall.Dirent.Name, which is array of int8, to []byte, by
+ // overwriting Cap, Len, and Data slice header fields to values from
+ // syscall.Dirent fields. Setting the Cap, Len, and Data field values for
+ // the slice header modifies what the slice header points to, and in this
+ // case, the name buffer.
+ var name []byte
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&name))
+ sh.Cap = ml
+ sh.Len = ml
+ sh.Data = uintptr(unsafe.Pointer(&de.Name[0]))
+
+ return name
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/nameWithoutNamlen.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/nameWithoutNamlen.go
new file mode 100644
index 00000000..f776fbc7
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/nameWithoutNamlen.go
@@ -0,0 +1,42 @@
+// +build nacl linux js solaris
+
+package godirwalk
+
+import (
+ "bytes"
+ "reflect"
+ "syscall"
+ "unsafe"
+)
+
+// nameOffset is a compile time constant
+const nameOffset = int(unsafe.Offsetof(syscall.Dirent{}.Name))
+
+func nameFromDirent(de *syscall.Dirent) (name []byte) {
+ // Because this GOOS' syscall.Dirent does not provide a field that specifies
+ // the name length, this function must first calculate the max possible name
+ // length, and then search for the NULL byte.
+ ml := int(de.Reclen) - nameOffset
+
+ // Convert syscall.Dirent.Name, which is array of int8, to []byte, by
+ // overwriting Cap, Len, and Data slice header fields to the max possible
+ // name length computed above, and finding the terminating NULL byte.
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&name))
+ sh.Cap = ml
+ sh.Len = ml
+ sh.Data = uintptr(unsafe.Pointer(&de.Name[0]))
+
+ if index := bytes.IndexByte(name, 0); index >= 0 {
+ // Found NULL byte; set slice's cap and len accordingly.
+ sh.Cap = index
+ sh.Len = index
+ return
+ }
+
+ // NOTE: This branch is not expected, but included for defensive
+ // programming, and provides a hard stop on the name based on the structure
+ // field array size.
+ sh.Cap = len(de.Name)
+ sh.Len = sh.Cap
+ return
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir.go
new file mode 100644
index 00000000..c8fa8155
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir.go
@@ -0,0 +1,53 @@
+package godirwalk
+
+// ReadDirents returns a sortable slice of pointers to Dirent structures, each
+// representing the file system name and mode type for one of the immediate
+// descendant of the specified directory. If the specified directory is a
+// symbolic link, it will be resolved.
+//
+// If an optional scratch buffer is provided that is at least one page of
+// memory, it will be used when reading directory entries from the file
+// system. If you plan on calling this function in a loop, you will have
+// significantly better performance if you allocate a scratch buffer and use it
+// each time you call this function.
+//
+// children, err := godirwalk.ReadDirents(osDirname, nil)
+// if err != nil {
+// return nil, errors.Wrap(err, "cannot get list of directory children")
+// }
+// sort.Sort(children)
+// for _, child := range children {
+// fmt.Printf("%s %s\n", child.ModeType, child.Name)
+// }
+func ReadDirents(osDirname string, scratchBuffer []byte) (Dirents, error) {
+ return readDirents(osDirname, scratchBuffer)
+}
+
+// ReadDirnames returns a slice of strings, representing the immediate
+// descendants of the specified directory. If the specified directory is a
+// symbolic link, it will be resolved.
+//
+// If an optional scratch buffer is provided that is at least one page of
+// memory, it will be used when reading directory entries from the file
+// system. If you plan on calling this function in a loop, you will have
+// significantly better performance if you allocate a scratch buffer and use it
+// each time you call this function.
+//
+// Note that this function, depending on operating system, may or may not invoke
+// the ReadDirents function, in order to prepare the list of immediate
+// descendants. Therefore, if your program needs both the names and the file
+// system mode types of descendants, it will always be faster to invoke
+// ReadDirents directly, rather than calling this function, then looping over
+// the results and calling os.Stat or os.LStat for each entry.
+//
+// children, err := godirwalk.ReadDirnames(osDirname, nil)
+// if err != nil {
+// return nil, errors.Wrap(err, "cannot get list of directory children")
+// }
+// sort.Strings(children)
+// for _, child := range children {
+// fmt.Printf("%s\n", child)
+// }
+func ReadDirnames(osDirname string, scratchBuffer []byte) ([]string, error) {
+ return readDirnames(osDirname, scratchBuffer)
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_unix.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_unix.go
new file mode 100644
index 00000000..a993038b
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_unix.go
@@ -0,0 +1,131 @@
+// +build !windows
+
+package godirwalk
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// MinimumScratchBufferSize specifies the minimum size of the scratch buffer
+// that ReadDirents, ReadDirnames, Scanner, and Walk will use when reading file
+// entries from the operating system. During program startup it is initialized
+// to the result from calling `os.Getpagesize()` for non Windows environments,
+// and 0 for Windows.
+var MinimumScratchBufferSize = os.Getpagesize()
+
+func newScratchBuffer() []byte { return make([]byte, MinimumScratchBufferSize) }
+
+func readDirents(osDirname string, scratchBuffer []byte) ([]*Dirent, error) {
+ var entries []*Dirent
+ var workBuffer []byte
+
+ dh, err := os.Open(osDirname)
+ if err != nil {
+ return nil, err
+ }
+ fd := int(dh.Fd())
+
+ if len(scratchBuffer) < MinimumScratchBufferSize {
+ scratchBuffer = newScratchBuffer()
+ }
+
+ var sde syscall.Dirent
+ for {
+ if len(workBuffer) == 0 {
+ n, err := syscall.ReadDirent(fd, scratchBuffer)
+ // n, err := unix.ReadDirent(fd, scratchBuffer)
+ if err != nil {
+ if err == syscall.EINTR /* || err == unix.EINTR */ {
+ continue
+ }
+ _ = dh.Close()
+ return nil, err
+ }
+ if n <= 0 { // end of directory: normal exit
+ if err = dh.Close(); err != nil {
+ return nil, err
+ }
+ return entries, nil
+ }
+ workBuffer = scratchBuffer[:n] // trim work buffer to number of bytes read
+ }
+
+ copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(&sde))[:], workBuffer)
+ workBuffer = workBuffer[reclen(&sde):] // advance buffer for next iteration through loop
+
+ if inoFromDirent(&sde) == 0 {
+ continue // inode set to 0 indicates an entry that was marked as deleted
+ }
+
+ nameSlice := nameFromDirent(&sde)
+ nameLength := len(nameSlice)
+
+ if nameLength == 0 || (nameSlice[0] == '.' && (nameLength == 1 || (nameLength == 2 && nameSlice[1] == '.'))) {
+ continue
+ }
+
+ childName := string(nameSlice)
+ mt, err := modeTypeFromDirent(&sde, osDirname, childName)
+ if err != nil {
+ _ = dh.Close()
+ return nil, err
+ }
+ entries = append(entries, &Dirent{name: childName, path: osDirname, modeType: mt})
+ }
+}
+
+func readDirnames(osDirname string, scratchBuffer []byte) ([]string, error) {
+ var entries []string
+ var workBuffer []byte
+ var sde *syscall.Dirent
+
+ dh, err := os.Open(osDirname)
+ if err != nil {
+ return nil, err
+ }
+ fd := int(dh.Fd())
+
+ if len(scratchBuffer) < MinimumScratchBufferSize {
+ scratchBuffer = newScratchBuffer()
+ }
+
+ for {
+ if len(workBuffer) == 0 {
+ n, err := syscall.ReadDirent(fd, scratchBuffer)
+ // n, err := unix.ReadDirent(fd, scratchBuffer)
+ if err != nil {
+ if err == syscall.EINTR /* || err == unix.EINTR */ {
+ continue
+ }
+ _ = dh.Close()
+ return nil, err
+ }
+ if n <= 0 { // end of directory: normal exit
+ if err = dh.Close(); err != nil {
+ return nil, err
+ }
+ return entries, nil
+ }
+ workBuffer = scratchBuffer[:n] // trim work buffer to number of bytes read
+ }
+
+ sde = (*syscall.Dirent)(unsafe.Pointer(&workBuffer[0])) // point entry to first syscall.Dirent in buffer
+ // Handle first entry in the work buffer.
+ workBuffer = workBuffer[reclen(sde):] // advance buffer for next iteration through loop
+
+ if inoFromDirent(sde) == 0 {
+ continue // inode set to 0 indicates an entry that was marked as deleted
+ }
+
+ nameSlice := nameFromDirent(sde)
+ nameLength := len(nameSlice)
+
+ if nameLength == 0 || (nameSlice[0] == '.' && (nameLength == 1 || (nameLength == 2 && nameSlice[1] == '.'))) {
+ continue
+ }
+
+ entries = append(entries, string(nameSlice))
+ }
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_windows.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_windows.go
new file mode 100644
index 00000000..7dd76cbc
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_windows.go
@@ -0,0 +1,66 @@
+// +build windows
+
+package godirwalk
+
+import "os"
+
+// MinimumScratchBufferSize specifies the minimum size of the scratch buffer
+// that ReadDirents, ReadDirnames, Scanner, and Walk will use when reading file
+// entries from the operating system. During program startup it is initialized
+// to the result from calling `os.Getpagesize()` for non Windows environments,
+// and 0 for Windows.
+var MinimumScratchBufferSize = 0
+
+func newScratchBuffer() []byte { return nil }
+
+func readDirents(osDirname string, _ []byte) ([]*Dirent, error) {
+ dh, err := os.Open(osDirname)
+ if err != nil {
+ return nil, err
+ }
+
+ fileinfos, err := dh.Readdir(-1)
+ if err != nil {
+ _ = dh.Close()
+ return nil, err
+ }
+
+ entries := make([]*Dirent, len(fileinfos))
+
+ for i, fi := range fileinfos {
+ entries[i] = &Dirent{
+ name: fi.Name(),
+ path: osDirname,
+ modeType: fi.Mode() & os.ModeType,
+ }
+ }
+
+ if err = dh.Close(); err != nil {
+ return nil, err
+ }
+ return entries, nil
+}
+
+func readDirnames(osDirname string, _ []byte) ([]string, error) {
+ dh, err := os.Open(osDirname)
+ if err != nil {
+ return nil, err
+ }
+
+ fileinfos, err := dh.Readdir(-1)
+ if err != nil {
+ _ = dh.Close()
+ return nil, err
+ }
+
+ entries := make([]string, len(fileinfos))
+
+ for i, fi := range fileinfos {
+ entries[i] = fi.Name()
+ }
+
+ if err = dh.Close(); err != nil {
+ return nil, err
+ }
+ return entries, nil
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/reclenFromNamlen.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/reclenFromNamlen.go
new file mode 100644
index 00000000..baeefcee
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/reclenFromNamlen.go
@@ -0,0 +1,9 @@
+// +build dragonfly
+
+package godirwalk
+
+import "syscall"
+
+func reclen(de *syscall.Dirent) uint64 {
+ return (16 + uint64(de.Namlen) + 1 + 7) &^ 7
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/reclenFromReclen.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/reclenFromReclen.go
new file mode 100644
index 00000000..99be34d8
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/reclenFromReclen.go
@@ -0,0 +1,9 @@
+// +build nacl linux js solaris aix darwin freebsd netbsd openbsd
+
+package godirwalk
+
+import "syscall"
+
+func reclen(de *syscall.Dirent) uint64 {
+ return uint64(de.Reclen)
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_unix.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_unix.go
new file mode 100644
index 00000000..33250b61
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_unix.go
@@ -0,0 +1,166 @@
+// +build !windows
+
+package godirwalk
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// Scanner is an iterator to enumerate the contents of a directory.
+type Scanner struct {
+ scratchBuffer []byte // read directory bytes from file system into this buffer
+ workBuffer []byte // points into scratchBuffer, from which we chunk out directory entries
+ osDirname string
+ childName string
+ err error // err is the error associated with scanning directory
+ statErr error // statErr is any error return while attempting to stat an entry
+ dh *os.File // used to close directory after done reading
+ de *Dirent // most recently decoded directory entry
+ sde syscall.Dirent
+ fd int // file descriptor used to read entries from directory
+}
+
+// NewScanner returns a new directory Scanner that lazily enumerates the
+// contents of a single directory.
+//
+// scanner, err := godirwalk.NewScanner(dirname)
+// if err != nil {
+// fatal("cannot scan directory: %s", err)
+// }
+//
+// for scanner.Scan() {
+// dirent, err := scanner.Dirent()
+// if err != nil {
+// warning("cannot get dirent: %s", err)
+// continue
+// }
+// name := dirent.Name()
+// if name == "break" {
+// break
+// }
+// if name == "continue" {
+// continue
+// }
+// fmt.Printf("%v %v\n", dirent.ModeType(), dirent.Name())
+// }
+// if err := scanner.Err(); err != nil {
+// fatal("cannot scan directory: %s", err)
+// }
+func NewScanner(osDirname string) (*Scanner, error) {
+ return NewScannerWithScratchBuffer(osDirname, nil)
+}
+
+// NewScannerWithScratchBuffer returns a new directory Scanner that lazily
+// enumerates the contents of a single directory. On platforms other than
+// Windows it uses the provided scratch buffer to read from the file system. On
+// Windows the scratch buffer is ignored.
+func NewScannerWithScratchBuffer(osDirname string, scratchBuffer []byte) (*Scanner, error) {
+ dh, err := os.Open(osDirname)
+ if err != nil {
+ return nil, err
+ }
+ if len(scratchBuffer) < MinimumScratchBufferSize {
+ scratchBuffer = newScratchBuffer()
+ }
+ scanner := &Scanner{
+ scratchBuffer: scratchBuffer,
+ osDirname: osDirname,
+ dh: dh,
+ fd: int(dh.Fd()),
+ }
+ return scanner, nil
+}
+
+// Dirent returns the current directory entry while scanning a directory.
+func (s *Scanner) Dirent() (*Dirent, error) {
+ if s.de == nil {
+ s.de = &Dirent{name: s.childName, path: s.osDirname}
+ s.de.modeType, s.statErr = modeTypeFromDirent(&s.sde, s.osDirname, s.childName)
+ }
+ return s.de, s.statErr
+}
+
+// done is called when directory scanner unable to continue, with either the
+// triggering error, or nil when there are simply no more entries to read from
+// the directory.
+func (s *Scanner) done(err error) {
+ if s.dh == nil {
+ return
+ }
+
+ if cerr := s.dh.Close(); err == nil {
+ s.err = cerr
+ }
+
+ s.osDirname, s.childName = "", ""
+ s.scratchBuffer, s.workBuffer = nil, nil
+ s.dh, s.de, s.statErr = nil, nil, nil
+ s.sde = syscall.Dirent{}
+ s.fd = 0
+}
+
+// Err returns any error associated with scanning a directory. It is normal to
+// call Err after Scan returns false, even though they both ensure Scanner
+// resources are released. Do not call until done scanning a directory.
+func (s *Scanner) Err() error {
+ s.done(nil)
+ return s.err
+}
+
+// Name returns the base name of the current directory entry while scanning a
+// directory.
+func (s *Scanner) Name() string { return s.childName }
+
+// Scan potentially reads and then decodes the next directory entry from the
+// file system.
+//
+// When it returns false, this releases resources used by the Scanner then
+// returns any error associated with closing the file system directory resource.
+func (s *Scanner) Scan() bool {
+ if s.dh == nil {
+ return false
+ }
+
+ s.de = nil
+
+ for {
+ // When the work buffer has nothing remaining to decode, we need to load
+ // more data from disk.
+ if len(s.workBuffer) == 0 {
+ n, err := syscall.ReadDirent(s.fd, s.scratchBuffer)
+ // n, err := unix.ReadDirent(s.fd, s.scratchBuffer)
+ if err != nil {
+ if err == syscall.EINTR /* || err == unix.EINTR */ {
+ continue
+ }
+ s.done(err)
+ return false
+ }
+ if n <= 0 { // end of directory: normal exit
+ s.done(nil)
+ return false
+ }
+ s.workBuffer = s.scratchBuffer[:n] // trim work buffer to number of bytes read
+ }
+
+ // point entry to first syscall.Dirent in buffer
+ copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(&s.sde))[:], s.workBuffer)
+ s.workBuffer = s.workBuffer[reclen(&s.sde):] // advance buffer for next iteration through loop
+
+ if inoFromDirent(&s.sde) == 0 {
+ continue // inode set to 0 indicates an entry that was marked as deleted
+ }
+
+ nameSlice := nameFromDirent(&s.sde)
+ nameLength := len(nameSlice)
+
+ if nameLength == 0 || (nameSlice[0] == '.' && (nameLength == 1 || (nameLength == 2 && nameSlice[1] == '.'))) {
+ continue
+ }
+
+ s.childName = string(nameSlice)
+ return true
+ }
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_windows.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_windows.go
new file mode 100644
index 00000000..a2110618
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_windows.go
@@ -0,0 +1,133 @@
+// +build windows
+
+package godirwalk
+
+import (
+ "fmt"
+ "os"
+)
+
+// Scanner is an iterator to enumerate the contents of a directory.
+type Scanner struct {
+ osDirname string
+ childName string
+ dh *os.File // dh is handle to open directory
+ de *Dirent
+ err error // err is the error associated with scanning directory
+ childMode os.FileMode
+}
+
+// NewScanner returns a new directory Scanner that lazily enumerates the
+// contents of a single directory.
+//
+// scanner, err := godirwalk.NewScanner(dirname)
+// if err != nil {
+// fatal("cannot scan directory: %s", err)
+// }
+//
+// for scanner.Scan() {
+// dirent, err := scanner.Dirent()
+// if err != nil {
+// warning("cannot get dirent: %s", err)
+// continue
+// }
+// name := dirent.Name()
+// if name == "break" {
+// break
+// }
+// if name == "continue" {
+// continue
+// }
+// fmt.Printf("%v %v\n", dirent.ModeType(), dirent.Name())
+// }
+// if err := scanner.Err(); err != nil {
+// fatal("cannot scan directory: %s", err)
+// }
+func NewScanner(osDirname string) (*Scanner, error) {
+ dh, err := os.Open(osDirname)
+ if err != nil {
+ return nil, err
+ }
+ scanner := &Scanner{
+ osDirname: osDirname,
+ dh: dh,
+ }
+ return scanner, nil
+}
+
+// NewScannerWithScratchBuffer returns a new directory Scanner that lazily
+// enumerates the contents of a single directory. On platforms other than
+// Windows it uses the provided scratch buffer to read from the file system. On
+// Windows the scratch buffer parameter is ignored.
+func NewScannerWithScratchBuffer(osDirname string, scratchBuffer []byte) (*Scanner, error) {
+ return NewScanner(osDirname)
+}
+
+// Dirent returns the current directory entry while scanning a directory.
+func (s *Scanner) Dirent() (*Dirent, error) {
+ if s.de == nil {
+ s.de = &Dirent{
+ name: s.childName,
+ path: s.osDirname,
+ modeType: s.childMode,
+ }
+ }
+ return s.de, nil
+}
+
+// done is called when directory scanner unable to continue, with either the
+// triggering error, or nil when there are simply no more entries to read from
+// the directory.
+func (s *Scanner) done(err error) {
+ if s.dh == nil {
+ return
+ }
+
+ if cerr := s.dh.Close(); err == nil {
+ s.err = cerr
+ }
+
+ s.childName, s.osDirname = "", ""
+ s.de, s.dh = nil, nil
+}
+
+// Err returns any error associated with scanning a directory. It is normal to
+// call Err after Scan returns false, even though they both ensure Scanner
+// resources are released. Do not call until done scanning a directory.
+func (s *Scanner) Err() error {
+ s.done(nil)
+ return s.err
+}
+
+// Name returns the base name of the current directory entry while scanning a
+// directory.
+func (s *Scanner) Name() string { return s.childName }
+
+// Scan potentially reads and then decodes the next directory entry from the
+// file system.
+//
+// When it returns false, this releases resources used by the Scanner then
+// returns any error associated with closing the file system directory resource.
+func (s *Scanner) Scan() bool {
+ if s.dh == nil {
+ return false
+ }
+
+ s.de = nil
+
+ fileinfos, err := s.dh.Readdir(1)
+ if err != nil {
+ s.done(err)
+ return false
+ }
+
+ if l := len(fileinfos); l != 1 {
+ s.done(fmt.Errorf("expected a single entry rather than %d", l))
+ return false
+ }
+
+ fi := fileinfos[0]
+ s.childMode = fi.Mode() & os.ModeType
+ s.childName = fi.Name()
+ return true
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/scanner.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/scanner.go
new file mode 100644
index 00000000..c08179e1
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/scanner.go
@@ -0,0 +1,44 @@
+package godirwalk
+
+import "sort"
+
+type scanner interface {
+ Dirent() (*Dirent, error)
+ Err() error
+ Name() string
+ Scan() bool
+}
+
+// sortedScanner enumerates through a directory's contents after reading the
+// entire directory and sorting the entries by name. Used by walk to simplify
+// its implementation.
+type sortedScanner struct {
+ dd []*Dirent
+ de *Dirent
+}
+
+func newSortedScanner(osPathname string, scratchBuffer []byte) (*sortedScanner, error) {
+ deChildren, err := ReadDirents(osPathname, scratchBuffer)
+ if err != nil {
+ return nil, err
+ }
+ sort.Sort(deChildren)
+ return &sortedScanner{dd: deChildren}, nil
+}
+
+func (d *sortedScanner) Err() error {
+ d.dd, d.de = nil, nil
+ return nil
+}
+
+func (d *sortedScanner) Dirent() (*Dirent, error) { return d.de, nil }
+
+func (d *sortedScanner) Name() string { return d.de.name }
+
+func (d *sortedScanner) Scan() bool {
+ if len(d.dd) > 0 {
+ d.de, d.dd = d.dd[0], d.dd[1:]
+ return true
+ }
+ return false
+}
diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/walk.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/walk.go
new file mode 100644
index 00000000..b15a1903
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/walk.go
@@ -0,0 +1,320 @@
+package godirwalk
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+// Options provide parameters for how the Walk function operates.
+type Options struct {
+ // ErrorCallback specifies a function to be invoked in the case of an error
+ // that could potentially be ignored while walking a file system
+ // hierarchy. When set to nil or left as its zero-value, any error condition
+ // causes Walk to immediately return the error describing what took
+ // place. When non-nil, this user supplied function is invoked with the OS
+ // pathname of the file system object that caused the error along with the
+ // error that took place. The return value of the supplied ErrorCallback
+ // function determines whether the error will cause Walk to halt immediately
+ // as it would were no ErrorCallback value provided, or skip this file
+ // system node yet continue on with the remaining nodes in the file system
+ // hierarchy.
+ //
+ // ErrorCallback is invoked both for errors that are returned by the
+ // runtime, and for errors returned by other user supplied callback
+ // functions.
+ ErrorCallback func(string, error) ErrorAction
+
+ // FollowSymbolicLinks specifies whether Walk will follow symbolic links
+ // that refer to directories. When set to false or left as its zero-value,
+ // Walk will still invoke the callback function with symbolic link nodes,
+ // but if the symbolic link refers to a directory, it will not recurse on
+ // that directory. When set to true, Walk will recurse on symbolic links
+ // that refer to a directory.
+ FollowSymbolicLinks bool
+
+ // Unsorted controls whether or not Walk will sort the immediate descendants
+ // of a directory by their relative names prior to visiting each of those
+ // entries.
+ //
+ // When set to false or left at its zero-value, Walk will get the list of
+ // immediate descendants of a particular directory, sort that list by
+ // lexical order of their names, and then visit each node in the list in
+ // sorted order. This will cause Walk to always traverse the same directory
+ // tree in the same order, however may be inefficient for directories with
+ // many immediate descendants.
+ //
+ // When set to true, Walk skips sorting the list of immediate descendants
+ // for a directory, and simply visits each node in the order the operating
+ // system enumerated them. This will be more fast, but with the side effect
+ // that the traversal order may be different from one invocation to the
+ // next.
+ Unsorted bool
+
+ // Callback is a required function that Walk will invoke for every file
+ // system node it encounters.
+ Callback WalkFunc
+
+ // PostChildrenCallback is an option function that Walk will invoke for
+ // every file system directory it encounters after its children have been
+ // processed.
+ PostChildrenCallback WalkFunc
+
+ // ScratchBuffer is an optional byte slice to use as a scratch buffer for
+ // Walk to use when reading directory entries, to reduce amount of garbage
+ // generation. Not all architectures take advantage of the scratch
+ // buffer. If omitted or the provided buffer has fewer bytes than
+ // MinimumScratchBufferSize, then a buffer with MinimumScratchBufferSize
+ // bytes will be created and used once per Walk invocation.
+ ScratchBuffer []byte
+
+ // AllowNonDirectory causes Walk to bypass the check that ensures it is
+ // being called on a directory node, or when FollowSymbolicLinks is true, a
+ // symbolic link that points to a directory. Leave this value false to have
+ // Walk return an error when called on a non-directory. Set this true to
+ // have Walk run even when called on a non-directory node.
+ AllowNonDirectory bool
+}
+
+// ErrorAction defines a set of actions the Walk function could take based on
+// the occurrence of an error while walking the file system. See the
+// documentation for the ErrorCallback field of the Options structure for more
+// information.
+type ErrorAction int
+
+const (
+ // Halt is the ErrorAction return value when the upstream code wants to halt
+ // the walk process when a runtime error takes place. It matches the default
+ // action the Walk function would take were no ErrorCallback provided.
+ Halt ErrorAction = iota
+
+ // SkipNode is the ErrorAction return value when the upstream code wants to
+ // ignore the runtime error for the current file system node, skip
+ // processing of the node that caused the error, and continue walking the
+ // file system hierarchy with the remaining nodes.
+ SkipNode
+)
+
+// WalkFunc is the type of the function called for each file system node visited
+// by Walk. The pathname argument will contain the argument to Walk as a prefix;
+// that is, if Walk is called with "dir", which is a directory containing the
+// file "a", the provided WalkFunc will be invoked with the argument "dir/a",
+// using the correct os.PathSeparator for the Go Operating System architecture,
+// GOOS. The directory entry argument is a pointer to a Dirent for the node,
+// providing access to both the basename and the mode type of the file system
+// node.
+//
+// If an error is returned by the Callback or PostChildrenCallback functions,
+// and no ErrorCallback function is provided, processing stops. If an
+// ErrorCallback function is provided, then it is invoked with the OS pathname
+// of the node that caused the error along along with the error. The return
+// value of the ErrorCallback function determines whether to halt processing, or
+// skip this node and continue processing remaining file system nodes.
+//
+// The exception is when the function returns the special value
+// filepath.SkipDir. If the function returns filepath.SkipDir when invoked on a
+// directory, Walk skips the directory's contents entirely. If the function
+// returns filepath.SkipDir when invoked on a non-directory file system node,
+// Walk skips the remaining files in the containing directory. Note that any
+// supplied ErrorCallback function is not invoked with filepath.SkipDir when the
+// Callback or PostChildrenCallback functions return that special value.
+type WalkFunc func(osPathname string, directoryEntry *Dirent) error
+
+// Walk walks the file tree rooted at the specified directory, calling the
+// specified callback function for each file system node in the tree, including
+// root, symbolic links, and other node types.
+//
+// This function is often much faster than filepath.Walk because it does not
+// invoke os.Stat for every node it encounters, but rather obtains the file
+// system node type when it reads the parent directory.
+//
+// If a runtime error occurs, either from the operating system or from the
+// upstream Callback or PostChildrenCallback functions, processing typically
+// halts. However, when an ErrorCallback function is provided in the provided
+// Options structure, that function is invoked with the error along with the OS
+// pathname of the file system node that caused the error. The ErrorCallback
+// function's return value determines the action that Walk will then take.
+//
+// func main() {
+// dirname := "."
+// if len(os.Args) > 1 {
+// dirname = os.Args[1]
+// }
+// err := godirwalk.Walk(dirname, &godirwalk.Options{
+// Callback: func(osPathname string, de *godirwalk.Dirent) error {
+// fmt.Printf("%s %s\n", de.ModeType(), osPathname)
+// return nil
+// },
+// ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction {
+// // Your program may want to log the error somehow.
+// fmt.Fprintf(os.Stderr, "ERROR: %s\n", err)
+//
+// // For the purposes of this example, a simple SkipNode will suffice,
+// // although in reality perhaps additional logic might be called for.
+// return godirwalk.SkipNode
+// },
+// })
+// if err != nil {
+// fmt.Fprintf(os.Stderr, "%s\n", err)
+// os.Exit(1)
+// }
+// }
+func Walk(pathname string, options *Options) error {
+ if options == nil || options.Callback == nil {
+ return errors.New("cannot walk without non-nil options and Callback function")
+ }
+
+ pathname = filepath.Clean(pathname)
+
+ var fi os.FileInfo
+ var err error
+
+ if options.FollowSymbolicLinks {
+ fi, err = os.Stat(pathname)
+ } else {
+ fi, err = os.Lstat(pathname)
+ }
+ if err != nil {
+ return err
+ }
+
+ mode := fi.Mode()
+ if !options.AllowNonDirectory && mode&os.ModeDir == 0 {
+ return fmt.Errorf("cannot Walk non-directory: %s", pathname)
+ }
+
+ dirent := &Dirent{
+ name: filepath.Base(pathname),
+ path: filepath.Dir(pathname),
+ modeType: mode & os.ModeType,
+ }
+
+ if len(options.ScratchBuffer) < MinimumScratchBufferSize {
+ options.ScratchBuffer = newScratchBuffer()
+ }
+
+ // If ErrorCallback is nil, set to a default value that halts the walk
+ // process on all operating system errors. This is done to allow error
+ // handling to be more succinct in the walk code.
+ if options.ErrorCallback == nil {
+ options.ErrorCallback = defaultErrorCallback
+ }
+
+ if err = walk(pathname, dirent, options); err != filepath.SkipDir {
+ return err
+ }
+ return nil // silence SkipDir for top level
+}
+
+// defaultErrorCallback always returns Halt because if the upstream code did not
+// provide an ErrorCallback function, walking the file system hierarchy ought to
+// halt upon any operating system error.
+func defaultErrorCallback(_ string, _ error) ErrorAction { return Halt }
+
+// walk recursively traverses the file system node specified by pathname and the
+// Dirent.
+func walk(osPathname string, dirent *Dirent, options *Options) error {
+ err := options.Callback(osPathname, dirent)
+ if err != nil {
+ if err == filepath.SkipDir {
+ return err
+ }
+ if action := options.ErrorCallback(osPathname, err); action == SkipNode {
+ return nil
+ }
+ return err
+ }
+
+ if dirent.IsSymlink() {
+ if !options.FollowSymbolicLinks {
+ return nil
+ }
+ // Does this symlink point to a directory?
+ info, err := os.Stat(osPathname)
+ if err != nil {
+ if action := options.ErrorCallback(osPathname, err); action == SkipNode {
+ return nil
+ }
+ return err
+ }
+ if !info.IsDir() {
+ return nil
+ }
+ } else if !dirent.IsDir() {
+ return nil
+ }
+
+ // If get here, then specified pathname refers to a directory or a
+ // symbolic link to a directory.
+
+ var ds scanner
+
+ if options.Unsorted {
+ // When upstream does not request a sorted iteration, it's more memory
+ // efficient to read a single child at a time from the file system.
+ ds, err = NewScanner(osPathname)
+ } else {
+ // When upstream wants a sorted iteration, we must read the entire
+ // directory and sort through the child names, and then iterate on each
+ // child.
+ ds, err = newSortedScanner(osPathname, options.ScratchBuffer)
+ }
+ if err != nil {
+ if action := options.ErrorCallback(osPathname, err); action == SkipNode {
+ return nil
+ }
+ return err
+ }
+
+ for ds.Scan() {
+ deChild, err := ds.Dirent()
+ osChildname := filepath.Join(osPathname, deChild.name)
+ if err != nil {
+ if action := options.ErrorCallback(osChildname, err); action == SkipNode {
+ return nil
+ }
+ return err
+ }
+ err = walk(osChildname, deChild, options)
+ debug("osChildname: %q; error: %v\n", osChildname, err)
+ if err == nil {
+ continue
+ }
+ if err != filepath.SkipDir {
+ return err
+ }
+ // When received SkipDir on a directory or a symbolic link to a
+ // directory, stop processing that directory but continue processing
+ // siblings. When received on a non-directory, stop processing
+ // remaining siblings.
+ isDir, err := deChild.IsDirOrSymlinkToDir()
+ if err != nil {
+ if action := options.ErrorCallback(osChildname, err); action == SkipNode {
+ continue // ignore and continue with next sibling
+ }
+ return err // caller does not approve of this error
+ }
+ if !isDir {
+ break // stop processing remaining siblings, but allow post children callback
+ }
+ // continue processing remaining siblings
+ }
+ if err = ds.Err(); err != nil {
+ return err
+ }
+
+ if options.PostChildrenCallback == nil {
+ return nil
+ }
+
+ err = options.PostChildrenCallback(osPathname, dirent)
+ if err == nil || err == filepath.SkipDir {
+ return err
+ }
+
+ if action := options.ErrorCallback(osPathname, err); action == SkipNode {
+ return nil
+ }
+ return err
+}
diff --git a/src/margo.sh/vendor/github.com/klauspost/asmfmt/.gitignore b/src/margo.sh/vendor/github.com/klauspost/asmfmt/.gitignore
new file mode 100644
index 00000000..daf913b1
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/klauspost/asmfmt/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/src/margo.sh/vendor/github.com/klauspost/asmfmt/.travis.yml b/src/margo.sh/vendor/github.com/klauspost/asmfmt/.travis.yml
new file mode 100644
index 00000000..4854d12c
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/klauspost/asmfmt/.travis.yml
@@ -0,0 +1,30 @@
+language: go
+
+sudo: false
+
+os:
+ - linux
+ - osx
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - master
+
+install:
+ - go get ./...
+ - go get -u golang.org/x/lint/golint
+ - go get -u golang.org/x/tools/cmd/goimports
+
+script:
+ - go vet ./...
+ - diff <(goimports -d .) <(printf "")
+ - diff <(golint ./...) <(printf "")
+ - go test -v -cpu=2 ./...
+ - go test -v -cpu=1,2,4 -short -race ./...
+
+matrix:
+ allow_failures:
+ - go: 'master'
+ fast_finish: true
diff --git a/src/disposa.blue/margo/LICENSE b/src/margo.sh/vendor/github.com/klauspost/asmfmt/LICENSE
similarity index 96%
rename from src/disposa.blue/margo/LICENSE
rename to src/margo.sh/vendor/github.com/klauspost/asmfmt/LICENSE
index 68a72b33..5cec7ee9 100644
--- a/src/disposa.blue/margo/LICENSE
+++ b/src/margo.sh/vendor/github.com/klauspost/asmfmt/LICENSE
@@ -1,6 +1,6 @@
The MIT License (MIT)
-Copyright (c) 2015 The MarGo Authors
+Copyright (c) 2015 Klaus Post
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/src/margo.sh/vendor/github.com/klauspost/asmfmt/README.md b/src/margo.sh/vendor/github.com/klauspost/asmfmt/README.md
new file mode 100644
index 00000000..8e7eab8d
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/klauspost/asmfmt/README.md
@@ -0,0 +1,112 @@
+# asmfmt
+Go Assembler Formatter
+
+This will format your assembler code in a similar way that `gofmt` formats your Go code.
+
+Read Introduction: [asmfmt: Go Assembler Formatter](https://blog.klauspost.com/asmfmt-assembler-formatter/)
+
+[](https://travis-ci.org/klauspost/asmfmt)
+[](https://ci.appveyor.com/project/klauspost/asmfmt/branch/master)
+[![GoDoc][1]][2]
+
+[1]: https://godoc.org/github.com/klauspost/asmfmt?status.svg
+[2]: https://godoc.org/github.com/klauspost/asmfmt
+
+See [Example 1](https://files.klauspost.com/diff.html), [Example 2](https://files.klauspost.com/diff2.html), [Example 3](https://files.klauspost.com/diff3.html), or compare files in the [testdata folder](https://github.com/klauspost/asmfmt/tree/master/testdata).
+
+Status: STABLE. The format will only change if bugs are found. Please report any feedback in the issue section.
+
+# install
+
+To install the standalone formatter,
+`go get -u github.com/klauspost/asmfmt/cmd/asmfmt`
+
+There are also replacements for `gofmt`, `goimports` and `goreturns`, which will process `.s` files alongside your go files when formatting a package.
+
+You can choose which to install:
+```
+go get -u github.com/klauspost/asmfmt/cmd/gofmt/...
+go get -u github.com/klauspost/asmfmt/cmd/goimports/...
+go get -u github.com/klauspost/asmfmt/cmd/goreturns/...
+```
+
+Note that these require **Go 1.5** due to changes in import paths.
+
+To test if the modified version is used, use `goimports -help`, and the output should look like this:
+
+```
+usage: goimports [flags] [path ...]
+ [flags]
+(this version includes asmfmt)
+```
+
+Using `gofmt -w mypackage` will Gofmt your Go files and format all assembler files as well.
+
+# updates
+
+* Aug 8, 2016: Don't indent comments before non-indented instruction.
+* Jun 10, 2016: Fixed crash with end-of-line comments that contained an end-of-block `/*` part.
+* Apr 14, 2016: Fix end of multiline comments in macro definitions.
+* Apr 14, 2016: Updated tools to Go 1.5+
+* Dec 21, 2015: Space before semi-colons in macro definitions is now trimmed.
+* Dec 21, 2015: Fix line comments in macro definitions (only valid with Go 1.5).
+* Dec 17, 2015: Comments are better aligned to the following section.
+* Dec 17, 2015: Clean semi-colons in multiple instruction per line.
+
+# emacs
+
+To automatically format assembler, in `.emacs` add:
+
+```
+(defun asm-mode-setup ()
+ (set (make-local-variable 'gofmt-command) "asmfmt")
+ (add-hook 'before-save-hook 'gofmt nil t)
+)
+
+(add-hook 'asm-mode-hook 'asm-mode-setup)
+```
+
+# usage
+
+`asmfmt [flags] [path ...]`
+
+The flags are similar to `gofmt`, except it will only process `.s` files:
+```
+ -d
+ Do not print reformatted sources to standard output.
+ If a file's formatting is different than asmfmt's, print diffs
+ to standard output.
+ -e
+ Print all (including spurious) errors.
+ -l
+ Do not print reformatted sources to standard output.
+ If a file's formatting is different from asmfmt's, print its name
+ to standard output.
+ -w
+ Do not print reformatted sources to standard output.
+ If a file's formatting is different from asmfmt's, overwrite it
+ with asmfmt's version.
+```
+You should only run `asmfmt` on files that are assembler files. Assembler files cannot be positively identified, so it will mangle non-assembler files.
+
+# formatting
+
+* Automatic indentation.
+* It uses tabs for indentation and blanks for alignment.
+* It will remove trailing whitespace.
+* It will align the first parameter.
+* It will align all comments in a block.
+* It will eliminate multiple blank lines.
+* Removes `;` at end of line.
+* Forced newline before comments, except when preceded by label or another comment.
+* Forced newline before labels, except when preceded by comment.
+* Labels are on a separate lines, except for comments.
+* Retains block breaks (newline between blocks).
+* It will convert single line block comments to line comments.
+* Line comments have a space after `//`, except if comment starts with `+`.
+* There is always a space between parameters.
+* Macros in the same file are tracked, and not included in parameter indentation.
+* `TEXT`, `DATA` and `GLOBL`, `FUNCDATA`, `PCDATA` and labels are level 0 indentation.
+* Aligns `\` in multiline macros.
+* Whitespace before separating `;` is removed. Space is inserted after, if followed by another instruction.
+
diff --git a/src/margo.sh/vendor/github.com/klauspost/asmfmt/appveyor.yml b/src/margo.sh/vendor/github.com/klauspost/asmfmt/appveyor.yml
new file mode 100644
index 00000000..e788e855
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/klauspost/asmfmt/appveyor.yml
@@ -0,0 +1,18 @@
+version: "{build}"
+
+os: Windows Server 2012 R2
+
+clone_folder: c:\gopath\src\github.com\klauspost\asmfmt
+
+environment:
+ GOPATH: c:\gopath
+
+install:
+ - echo %PATH%
+ - echo %GOPATH%
+ - go version
+ - go env
+ - go get -d ./...
+
+build_script:
+ - go test -v ./...
diff --git a/src/margo.sh/vendor/github.com/klauspost/asmfmt/asmfmt.go b/src/margo.sh/vendor/github.com/klauspost/asmfmt/asmfmt.go
new file mode 100644
index 00000000..568514d6
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/klauspost/asmfmt/asmfmt.go
@@ -0,0 +1,625 @@
+package asmfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+ "unicode"
+)
+
+// Format the input and return the formatted data.
+// If any error is encountered, no data will be returned.
+func Format(in io.Reader) ([]byte, error) {
+ var src *bufio.Reader
+ var ok bool
+ src, ok = in.(*bufio.Reader)
+ if !ok {
+ src = bufio.NewReader(in)
+ }
+ dst := &bytes.Buffer{}
+ state := fstate{out: dst, defines: make(map[string]struct{})}
+ for {
+ data, _, err := src.ReadLine()
+ if err == io.EOF {
+ state.flush()
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ err = state.addLine(data)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return dst.Bytes(), nil
+}
+
+type fstate struct {
+ out *bytes.Buffer
+ insideBlock bool // Block comment
+ indentation int // Indentation level
+ lastEmpty bool
+ lastComment bool
+ lastStar bool // Block comment, last line started with a star.
+ lastLabel bool
+ anyContents bool
+ lastContinued bool // Last line continued
+ queued []statement
+ comments []string
+ defines map[string]struct{}
+}
+
+type statement struct {
+ instruction string
+ params []string // Parameters
+ comment string // Without slashes
+ function bool // Probably define call
+ continued bool // Multiline statement, continues on next line
+ contComment bool // Multiline statement, comment only
+}
+
+// Add a new input line.
+// Since you are looking at ths code:
+// This code has grown over a considerable amount of time,
+// and deserves a rewrite with proper parsing instead of this hodgepodge.
+// Its output is stable, and could be used as reference for a rewrite.
+func (f *fstate) addLine(b []byte) error {
+ if bytes.Contains(b, []byte{0}) {
+ return fmt.Errorf("zero (0) byte in input. file is unlikely an assembler file")
+ }
+ s := string(b)
+ // Inside block comment
+ if f.insideBlock {
+ defer func() {
+ f.lastComment = true
+ }()
+ if strings.Contains(s, "*/") {
+ ends := strings.Index(s, "*/")
+ end := s[:ends]
+ if strings.HasPrefix(strings.TrimSpace(s), "*") && f.lastStar {
+ end = strings.TrimSpace(end) + " "
+ }
+ end = end + "*/"
+ f.insideBlock = false
+ s = strings.TrimSpace(s[ends+2:])
+ if strings.HasSuffix(s, "\\") {
+ end = end + " \\"
+ if len(s) == 1 {
+ s = ""
+ }
+ }
+ f.out.WriteString(end + "\n")
+ if len(s) == 0 {
+ return nil
+ }
+ } else {
+ // Insert a space on lines that begin with '*'
+ if strings.HasPrefix(strings.TrimSpace(s), "*") {
+ s = strings.TrimSpace(s)
+ f.out.WriteByte(' ')
+ f.lastStar = true
+ } else {
+ f.lastStar = false
+ }
+ fmt.Fprintln(f.out, s)
+ return nil
+ }
+ }
+ s = strings.TrimSpace(s)
+
+ // Comment is the the only line content.
+ if strings.HasPrefix(s, "//") {
+ // Non-comment content is now added.
+ defer func() {
+ f.anyContents = true
+ f.lastEmpty = false
+ f.lastStar = false
+ }()
+
+ s = strings.TrimPrefix(s, "//")
+ if len(f.queued) > 0 {
+ f.flush()
+ }
+ // Newline before comments
+ if len(f.comments) == 0 {
+ f.newLine()
+ }
+
+ // Preserve whitespace if the first character after the comment
+ // is a whitespace
+ ts := strings.TrimSpace(s)
+ var q string
+ if (ts != s && len(ts) > 0) || (len(s) > 0 && strings.ContainsAny(string(s[0]), `+/`)) {
+ q = fmt.Sprint("//" + s)
+ } else if len(ts) > 0 {
+ // Insert a space before the comment
+ q = fmt.Sprint("// " + s)
+ } else {
+ q = fmt.Sprint("//")
+ }
+ f.comments = append(f.comments, q)
+ f.lastComment = true
+ return nil
+ }
+
+ // Handle end-of blockcomments.
+ if strings.Contains(s, "/*") && !strings.HasSuffix(s, `\`) {
+ starts := strings.Index(s, "/*")
+ ends := strings.Index(s, "*/")
+ lineComment := strings.Index(s, "//")
+ if lineComment >= 0 {
+ if lineComment < starts {
+ goto exitcomm
+ }
+ if lineComment < ends && !f.insideBlock {
+ goto exitcomm
+ }
+ }
+ pre := s[:starts]
+ pre = strings.TrimSpace(pre)
+
+ if len(pre) > 0 {
+ if strings.HasSuffix(s, `\`) {
+ goto exitcomm
+ }
+ // Add items before the comment section as a line.
+ if ends > starts && ends >= len(s)-2 {
+ comm := strings.TrimSpace(s[starts+2 : ends])
+ return f.addLine([]byte(pre + " //" + comm))
+ }
+ err := f.addLine([]byte(pre))
+ if err != nil {
+ return err
+ }
+ }
+
+ f.flush()
+
+ // Convert single line /* comment */ to // Comment
+ if ends > starts && ends >= len(s)-2 {
+ return f.addLine([]byte("// " + strings.TrimSpace(s[starts+2:ends])))
+ }
+
+ // Comments inside multiline defines.
+ if strings.HasSuffix(s, `\`) {
+ f.indent()
+ s = strings.TrimSpace(strings.TrimSuffix(s, `\`)) + ` \`
+ }
+
+ // Otherwise output
+ fmt.Fprint(f.out, "/*")
+ s = strings.TrimSpace(s[starts+2:])
+ f.insideBlock = ends < 0
+ f.lastComment = true
+ f.lastStar = true
+ if len(s) == 0 {
+ f.out.WriteByte('\n')
+ return nil
+ }
+ f.out.WriteByte(' ')
+ f.out.WriteString(s + "\n")
+ return nil
+ }
+exitcomm:
+
+ if len(s) == 0 {
+ f.flush()
+
+ // No more than two empty lines in a row
+ // cannot start with NL
+ if f.lastEmpty || !f.anyContents {
+ return nil
+ }
+ if f.lastContinued {
+ f.indentation = 0
+ f.lastContinued = false
+ }
+ f.lastEmpty = true
+ return f.out.WriteByte('\n')
+ }
+
+ // Non-comment content is now added.
+ defer func() {
+ f.anyContents = true
+ f.lastEmpty = false
+ f.lastStar = false
+ f.lastComment = false
+ }()
+
+ st := newStatement(s, f.defines)
+ if st == nil {
+ return nil
+ }
+ if def := st.define(); def != "" {
+ f.defines[def] = struct{}{}
+ }
+ if st.instruction == "package" {
+ if _, ok := f.defines["package"]; !ok {
+ return fmt.Errorf("package instruction found. Go files are not supported")
+ }
+ }
+
+ // Move anything that isn't a comment to the next line
+ if st.isLabel() && len(st.params) > 0 && !st.continued {
+ idx := strings.Index(s, ":")
+ st = newStatement(s[:idx+1], f.defines)
+ defer f.addLine([]byte(s[idx+1:]))
+ }
+
+ // Should this line be at level 0?
+ if st.level0() && !(st.continued && f.lastContinued) {
+ if st.isTEXT() && len(f.queued) == 0 && len(f.comments) > 0 {
+ f.indentation = 0
+ }
+ f.flush()
+
+ // Add newline before jump target.
+ f.newLine()
+
+ f.indentation = 0
+ f.queued = append(f.queued, *st)
+ f.flush()
+
+ if !st.isPreProcessor() && !st.isGlobal() {
+ f.indentation = 1
+ }
+ f.lastLabel = true
+ return nil
+ }
+
+ defer func() {
+ f.lastLabel = false
+ }()
+ f.queued = append(f.queued, *st)
+ if st.isTerminator() || (f.lastContinued && !st.continued) {
+ // Terminators should always be at level 1
+ f.indentation = 1
+ f.flush()
+ f.indentation = 0
+ } else if st.isCommand() {
+ // handles cases where a JMP/RET isn't a terminator
+ f.indentation = 1
+ }
+ f.lastContinued = st.continued
+ return nil
+}
+
+// indent the current line with current indentation.
+func (f *fstate) indent() {
+ for i := 0; i < f.indentation; i++ {
+ f.out.WriteByte('\t')
+ }
+}
+
+// flush any queued comments and commands
+func (f *fstate) flush() {
+ for _, line := range f.comments {
+ f.indent()
+ fmt.Fprintln(f.out, line)
+ }
+ f.comments = nil
+ s := formatStatements(f.queued)
+ for _, line := range s {
+ f.indent()
+ fmt.Fprintln(f.out, line)
+ }
+ f.queued = nil
+}
+
+// Add a newline, unless last line was empty or a comment
+func (f *fstate) newLine() {
+ // Always newline before comment-only line.
+ if !f.lastEmpty && !f.lastComment && !f.lastLabel && f.anyContents {
+ f.out.WriteByte('\n')
+ }
+}
+
+// newStatement will parse a line and return it as a statement.
+// Will return nil if the line is empty after whitespace removal.
+func newStatement(s string, defs map[string]struct{}) *statement {
+ s = strings.TrimSpace(s)
+ st := statement{}
+
+ // Fix where a comment start if any
+ startcom := strings.Index(s, "//")
+ if startcom > 0 {
+ st.comment = strings.TrimSpace(s[startcom+2:])
+ s = strings.TrimSpace(s[:startcom])
+ }
+
+ // Split into fields
+ fields := strings.Fields(s)
+ if len(fields) < 1 {
+ return nil
+ }
+ st.instruction = fields[0]
+
+ // Handle defined macro calls
+ if len(defs) > 0 {
+ inst := strings.Split(st.instruction, "(")[0]
+ if _, ok := defs[inst]; ok {
+ st.function = true
+ }
+ }
+ if strings.HasPrefix(s, "/*") {
+ st.function = true
+ }
+ // We may not have it defined as a macro, if defined in an external
+ // .h file, so we try to detect the remaining ones.
+ if strings.ContainsAny(st.instruction, "(_") {
+ st.function = true
+ }
+ if len(st.params) > 0 && strings.HasPrefix(st.params[0], "(") {
+ st.function = true
+ }
+ if st.function {
+ st.instruction = s
+ }
+
+ if st.instruction == "\\" && len(st.comment) > 0 {
+ st.instruction = fmt.Sprintf("\\ // %s", st.comment)
+ st.comment = ""
+ st.function = true
+ st.continued = true
+ st.contComment = true
+ }
+
+ s = strings.TrimPrefix(s, st.instruction)
+ st.instruction = strings.Replace(st.instruction, "\t", " ", -1)
+ s = strings.TrimSpace(s)
+
+ st.setParams(s)
+
+ // Remove trailing ;
+ if len(st.params) > 0 {
+ st.params[len(st.params)-1] = strings.TrimSuffix(st.params[len(st.params)-1], ";")
+ } else {
+ st.instruction = strings.TrimSuffix(st.instruction, ";")
+ }
+
+ // Register line continuations.
+ if len(st.params) > 0 {
+ p := st.params[len(st.params)-1]
+ if st.willContinue() {
+ p = strings.TrimSuffix(st.params[len(st.params)-1], `\`)
+ p = strings.TrimSpace(p)
+ if len(p) > 0 {
+ st.params[len(st.params)-1] = p
+ } else {
+ st.params = st.params[:len(st.params)-1]
+ }
+ st.continued = true
+ }
+ }
+ if strings.HasSuffix(st.instruction, `\`) && !st.contComment {
+ i := strings.TrimSuffix(st.instruction, `\`)
+ st.instruction = strings.TrimSpace(i)
+ st.continued = true
+ }
+
+ if len(st.params) == 0 && !st.isLabel() {
+ st.function = true
+ }
+
+ return &st
+}
+
+// setParams will add the string given as parameters.
+// Inline comments are retained.
+// There will be a space after ",", unless inside a comment.
+// A tab is replaced by a space for consistent indentation.
+func (st *statement) setParams(s string) {
+ st.params = make([]string, 0)
+ runes := []rune(s)
+ last := '\n'
+ inComment := false
+ out := make([]rune, 0, len(runes))
+ for _, r := range runes {
+ switch r {
+ case ',':
+ if inComment {
+ break
+ }
+ c := strings.TrimSpace(string(out))
+ if len(c) > 0 {
+ st.params = append(st.params, c)
+ }
+ out = out[0:0]
+ continue
+ case '/':
+ if last == '*' && inComment {
+ inComment = false
+ }
+ case '*':
+ if last == '/' {
+ inComment = true
+ }
+ case '\t':
+ if !st.isPreProcessor() {
+ r = ' '
+ }
+ case ';':
+ if !inComment {
+ out = []rune(strings.TrimSpace(string(out)) + "; ")
+ last = r
+ continue
+ }
+ }
+ if last == ';' && unicode.IsSpace(r) {
+ continue
+ }
+ last = r
+ out = append(out, r)
+ }
+ c := strings.TrimSpace(string(out))
+ if len(c) > 0 {
+ st.params = append(st.params, c)
+ }
+}
+
+// Return true if this line should be at indentation level 0.
+func (st statement) level0() bool {
+ return st.isLabel() || st.isTEXT() || st.isPreProcessor()
+}
+
+// Will return true if the statement is a label.
+func (st statement) isLabel() bool {
+ return strings.HasSuffix(st.instruction, ":")
+}
+
+// isPreProcessor will return if the statement is a preprocessor statement.
+func (st statement) isPreProcessor() bool {
+ return strings.HasPrefix(st.instruction, "#")
+}
+
+// isGlobal returns true if the current instruction is
+// a global. Currently that is DATA, GLOBL, FUNCDATA and PCDATA
+func (st statement) isGlobal() bool {
+ up := strings.ToUpper(st.instruction)
+ switch up {
+ case "DATA", "GLOBL", "FUNCDATA", "PCDATA":
+ return true
+ default:
+ return false
+ }
+}
+
+// isTEXT returns true if the instruction is "TEXT"
+// or one of the "isGlobal" types
+func (st statement) isTEXT() bool {
+ up := strings.ToUpper(st.instruction)
+ return up == "TEXT" || st.isGlobal()
+}
+
+// We attempt to identify "terminators", after which
+// indentation is likely to be level 0.
+func (st statement) isTerminator() bool {
+ up := strings.ToUpper(st.instruction)
+ return up == "RET" || up == "JMP"
+}
+
+// Detects commands based on case.
+func (st statement) isCommand() bool {
+ if st.isLabel() {
+ return false
+ }
+ up := strings.ToUpper(st.instruction)
+ return up == st.instruction
+}
+
+// Detect if last character is '\', indicating a multiline statement.
+func (st statement) willContinue() bool {
+ if st.continued {
+ return true
+ }
+ if len(st.params) == 0 {
+ return false
+ }
+ return strings.HasSuffix(st.params[len(st.params)-1], `\`)
+}
+
+// define returns the macro defined in this line.
+// if none is defined "" is returned.
+func (st statement) define() string {
+ if st.instruction == "#define" && len(st.params) > 0 {
+ r := strings.TrimSpace(strings.Split(st.params[0], "(")[0])
+ r = strings.Trim(r, `\`)
+ return r
+ }
+ return ""
+}
+
+func (st *statement) cleanParams() {
+ // Remove whitespace before semicolons
+ if strings.HasSuffix(st.instruction, ";") {
+ s := strings.TrimSuffix(st.instruction, ";")
+ st.instruction = strings.TrimSpace(s) + ";"
+ }
+}
+
+// formatStatements will format a slice of statements and return each line
+// as a separate string.
+// Comments and line-continuation (\) are aligned with spaces.
+func formatStatements(s []statement) []string {
+ res := make([]string, len(s))
+ maxParam := 0 // Length of longest parameter
+ maxInstr := 0 // Length of longest instruction WITH parameters.
+ maxAlone := 0 // Length of longest instruction without parameters.
+ for i, x := range s {
+ // Clean up and store
+ x.cleanParams()
+ s[i] = x
+
+ il := len([]rune(x.instruction)) + 1 // Instruction length
+ l := il
+ // Ignore length if we are a define "function"
+ // or we are a parameterless instruction.
+ if l > maxInstr && !x.function && !(x.isCommand() && len(x.params) == 0) {
+ maxInstr = l
+ }
+ if x.function && il > maxAlone {
+ maxAlone = il
+ }
+ if len(x.params) > 1 {
+ l = 2 * (len(x.params) - 1) // Spaces between parameters
+ } else {
+ l = 0
+ }
+ // Add parameters
+ for _, y := range x.params {
+ l += len([]rune(y))
+ }
+ l++
+ if l > maxParam {
+ maxParam = l
+ }
+ }
+
+ maxParam += maxInstr
+ if maxInstr == 0 {
+ maxInstr = maxAlone
+ }
+
+ for i, x := range s {
+ r := x.instruction
+ if x.contComment {
+ res[i] = x.instruction
+ continue
+ }
+ p := strings.Join(x.params, ", ")
+ if len(x.params) > 0 || len(x.comment) > 0 {
+ for len(r) < maxInstr {
+ r += " "
+ }
+ }
+ r = r + p
+ if len(x.comment) > 0 && !x.continued {
+ it := maxParam - len([]rune(r))
+ for i := 0; i < it; i++ {
+ r = r + " "
+ }
+ r += fmt.Sprintf("// %s", x.comment)
+ }
+
+ if x.continued {
+ // Find continuation placement.
+ it := maxParam - len([]rune(r))
+ if maxAlone > maxParam {
+ it = maxAlone - len([]rune(r))
+ }
+ for i := 0; i < it; i++ {
+ r = r + " "
+ }
+ r += `\`
+ // Add comment, if any.
+ if len(x.comment) > 0 {
+ r += " // " + x.comment
+ }
+ }
+ res[i] = r
+ }
+ return res
+}
diff --git a/src/margo.sh/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/LICENSE b/src/margo.sh/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/LICENSE
new file mode 100644
index 00000000..74487567
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/margo.sh/vendor/github.com/klauspost/asmfmt/cmd/goimports/LICENSE b/src/margo.sh/vendor/github.com/klauspost/asmfmt/cmd/goimports/LICENSE
new file mode 100644
index 00000000..65d761bc
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/klauspost/asmfmt/cmd/goimports/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/margo.sh/vendor/github.com/klauspost/asmfmt/cmd/goreturns/LICENSE b/src/margo.sh/vendor/github.com/klauspost/asmfmt/cmd/goreturns/LICENSE
new file mode 100644
index 00000000..c0d871c8
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/klauspost/asmfmt/cmd/goreturns/LICENSE
@@ -0,0 +1,59 @@
+Copyright (c) 2014 Sourcegraph Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Sourcegraph nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+
+Portions derived from goimports (license follows).
+
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/margo.sh/vendor/github.com/rogpeppe/go-internal/LICENSE b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/LICENSE
new file mode 100644
index 00000000..49ea0f92
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2018 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go
new file mode 100644
index 00000000..c94b3848
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go
@@ -0,0 +1,47 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: Figure out what gopkg.in should do.
+
+package modfile
+
+import "strings"
+
+// ParseGopkgIn splits gopkg.in import paths into their constituent parts
+func ParseGopkgIn(path string) (root, repo, major, subdir string, ok bool) {
+ if !strings.HasPrefix(path, "gopkg.in/") {
+ return
+ }
+ f := strings.Split(path, "/")
+ if len(f) >= 2 {
+ if elem, v, ok := dotV(f[1]); ok {
+ root = strings.Join(f[:2], "/")
+ repo = "github.com/go-" + elem + "/" + elem
+ major = v
+ subdir = strings.Join(f[2:], "/")
+ return root, repo, major, subdir, true
+ }
+ }
+ if len(f) >= 3 {
+ if elem, v, ok := dotV(f[2]); ok {
+ root = strings.Join(f[:3], "/")
+ repo = "github.com/" + f[1] + "/" + elem
+ major = v
+ subdir = strings.Join(f[3:], "/")
+ return root, repo, major, subdir, true
+ }
+ }
+ return
+}
+
+func dotV(name string) (elem, v string, ok bool) {
+ i := len(name) - 1
+ for i >= 0 && '0' <= name[i] && name[i] <= '9' {
+ i--
+ }
+ if i <= 2 || i+1 >= len(name) || name[i-1] != '.' || name[i] != 'v' || name[i+1] == '0' && len(name) != i+2 {
+ return "", "", false
+ }
+ return name[:i-1], name[i:], true
+}
diff --git a/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/print.go b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/print.go
new file mode 100644
index 00000000..7b1dd8f9
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/print.go
@@ -0,0 +1,164 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modfile implements parsing and formatting for
+// go.mod files.
+package modfile
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+func Format(f *FileSyntax) []byte {
+ pr := &printer{}
+ pr.file(f)
+ return pr.Bytes()
+}
+
+// A printer collects the state during printing of a file or expression.
+type printer struct {
+ bytes.Buffer // output buffer
+ comment []Comment // pending end-of-line comments
+ margin int // left margin (indent), a number of tabs
+}
+
+// printf prints to the buffer.
+func (p *printer) printf(format string, args ...interface{}) {
+ fmt.Fprintf(p, format, args...)
+}
+
+// indent returns the position on the current line, in bytes, 0-indexed.
+func (p *printer) indent() int {
+ b := p.Bytes()
+ n := 0
+ for n < len(b) && b[len(b)-1-n] != '\n' {
+ n++
+ }
+ return n
+}
+
+// newline ends the current line, flushing end-of-line comments.
+func (p *printer) newline() {
+ if len(p.comment) > 0 {
+ p.printf(" ")
+ for i, com := range p.comment {
+ if i > 0 {
+ p.trim()
+ p.printf("\n")
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+ }
+ p.printf("%s", strings.TrimSpace(com.Token))
+ }
+ p.comment = p.comment[:0]
+ }
+
+ p.trim()
+ p.printf("\n")
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+}
+
+// trim removes trailing spaces and tabs from the current line.
+func (p *printer) trim() {
+ // Remove trailing spaces and tabs from line we're about to end.
+ b := p.Bytes()
+ n := len(b)
+ for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') {
+ n--
+ }
+ p.Truncate(n)
+}
+
+// file formats the given file into the print buffer.
+func (p *printer) file(f *FileSyntax) {
+ for _, com := range f.Before {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+
+ for i, stmt := range f.Stmt {
+ switch x := stmt.(type) {
+ case *CommentBlock:
+ // comments already handled
+ p.expr(x)
+
+ default:
+ p.expr(x)
+ p.newline()
+ }
+
+ for _, com := range stmt.Comment().After {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+
+ if i+1 < len(f.Stmt) {
+ p.newline()
+ }
+ }
+}
+
+func (p *printer) expr(x Expr) {
+ // Emit line-comments preceding this expression.
+ if before := x.Comment().Before; len(before) > 0 {
+ // Want to print a line comment.
+ // Line comments must be at the current margin.
+ p.trim()
+ if p.indent() > 0 {
+ // There's other text on the line. Start a new line.
+ p.printf("\n")
+ }
+ // Re-indent to margin.
+ for i := 0; i < p.margin; i++ {
+ p.printf("\t")
+ }
+ for _, com := range before {
+ p.printf("%s", strings.TrimSpace(com.Token))
+ p.newline()
+ }
+ }
+
+ switch x := x.(type) {
+ default:
+ panic(fmt.Errorf("printer: unexpected type %T", x))
+
+ case *CommentBlock:
+ // done
+
+ case *LParen:
+ p.printf("(")
+ case *RParen:
+ p.printf(")")
+
+ case *Line:
+ sep := ""
+ for _, tok := range x.Token {
+ p.printf("%s%s", sep, tok)
+ sep = " "
+ }
+
+ case *LineBlock:
+ for _, tok := range x.Token {
+ p.printf("%s ", tok)
+ }
+ p.expr(&x.LParen)
+ p.margin++
+ for _, l := range x.Line {
+ p.newline()
+ p.expr(l)
+ }
+ p.margin--
+ p.newline()
+ p.expr(&x.RParen)
+ }
+
+ // Queue end-of-line comments for printing when we
+ // reach the end of the line.
+ p.comment = append(p.comment, x.Comment().Suffix...)
+}
diff --git a/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/read.go b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/read.go
new file mode 100644
index 00000000..1d81ff1a
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/read.go
@@ -0,0 +1,869 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Module file parser.
+// This is a simplified copy of Google's buildifier parser.
+
+package modfile
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A Position describes the position between two bytes of input.
+type Position struct {
+ Line int // line in input (starting at 1)
+ LineRune int // rune in line (starting at 1)
+ Byte int // byte in input (starting at 0)
+}
+
+// add returns the position at the end of s, assuming it starts at p.
+func (p Position) add(s string) Position {
+ p.Byte += len(s)
+ if n := strings.Count(s, "\n"); n > 0 {
+ p.Line += n
+ s = s[strings.LastIndex(s, "\n")+1:]
+ p.LineRune = 1
+ }
+ p.LineRune += utf8.RuneCountInString(s)
+ return p
+}
+
+// An Expr represents an input element.
+type Expr interface {
+ // Span returns the start and end position of the expression,
+ // excluding leading or trailing comments.
+ Span() (start, end Position)
+
+ // Comment returns the comments attached to the expression.
+ // This method would normally be named 'Comments' but that
+ // would interfere with embedding a type of the same name.
+ Comment() *Comments
+}
+
+// A Comment represents a single // comment.
+type Comment struct {
+ Start Position
+ Token string // without trailing newline
+ Suffix bool // an end of line (not whole line) comment
+}
+
+// Comments collects the comments associated with an expression.
+type Comments struct {
+ Before []Comment // whole-line comments before this expression
+ Suffix []Comment // end-of-line comments after this expression
+
+ // For top-level expressions only, After lists whole-line
+ // comments following the expression.
+ After []Comment
+}
+
+// Comment returns the receiver. This isn't useful by itself, but
+// a Comments struct is embedded into all the expression
+// implementation types, and this gives each of those a Comment
+// method to satisfy the Expr interface.
+func (c *Comments) Comment() *Comments {
+ return c
+}
+
+// A FileSyntax represents an entire go.mod file.
+type FileSyntax struct {
+ Name string // file path
+ Comments
+ Stmt []Expr
+}
+
+func (x *FileSyntax) Span() (start, end Position) {
+ if len(x.Stmt) == 0 {
+ return
+ }
+ start, _ = x.Stmt[0].Span()
+ _, end = x.Stmt[len(x.Stmt)-1].Span()
+ return start, end
+}
+
+func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line {
+ if hint == nil {
+ // If no hint given, add to the last statement of the given type.
+ Loop:
+ for i := len(x.Stmt) - 1; i >= 0; i-- {
+ stmt := x.Stmt[i]
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt.Token != nil && stmt.Token[0] == tokens[0] {
+ hint = stmt
+ break Loop
+ }
+ case *LineBlock:
+ if stmt.Token[0] == tokens[0] {
+ hint = stmt
+ break Loop
+ }
+ }
+ }
+ }
+
+ if hint != nil {
+ for i, stmt := range x.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt == hint {
+ // Convert line to line block.
+ stmt.InBlock = true
+ block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}}
+ stmt.Token = stmt.Token[1:]
+ x.Stmt[i] = block
+ new := &Line{Token: tokens[1:], InBlock: true}
+ block.Line = append(block.Line, new)
+ return new
+ }
+ case *LineBlock:
+ if stmt == hint {
+ new := &Line{Token: tokens[1:], InBlock: true}
+ stmt.Line = append(stmt.Line, new)
+ return new
+ }
+ for j, line := range stmt.Line {
+ if line == hint {
+ // Add new line after hint.
+ stmt.Line = append(stmt.Line, nil)
+ copy(stmt.Line[j+2:], stmt.Line[j+1:])
+ new := &Line{Token: tokens[1:], InBlock: true}
+ stmt.Line[j+1] = new
+ return new
+ }
+ }
+ }
+ }
+ }
+
+ new := &Line{Token: tokens}
+ x.Stmt = append(x.Stmt, new)
+ return new
+}
+
+func (x *FileSyntax) updateLine(line *Line, tokens ...string) {
+ if line.InBlock {
+ tokens = tokens[1:]
+ }
+ line.Token = tokens
+}
+
+func (x *FileSyntax) removeLine(line *Line) {
+ line.Token = nil
+}
+
+// Cleanup cleans up the file syntax x after any edit operations.
+// To avoid quadratic behavior, removeLine marks the line as dead
+// by setting line.Token = nil but does not remove it from the slice
+// in which it appears. After edits have all been indicated,
+// calling Cleanup cleans out the dead lines.
+func (x *FileSyntax) Cleanup() {
+ w := 0
+ for _, stmt := range x.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if stmt.Token == nil {
+ continue
+ }
+ case *LineBlock:
+ ww := 0
+ for _, line := range stmt.Line {
+ if line.Token != nil {
+ stmt.Line[ww] = line
+ ww++
+ }
+ }
+ if ww == 0 {
+ continue
+ }
+ if ww == 1 {
+ // Collapse block into single line.
+ line := &Line{
+ Comments: Comments{
+ Before: commentsAdd(stmt.Before, stmt.Line[0].Before),
+ Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix),
+ After: commentsAdd(stmt.Line[0].After, stmt.After),
+ },
+ Token: stringsAdd(stmt.Token, stmt.Line[0].Token),
+ }
+ x.Stmt[w] = line
+ w++
+ continue
+ }
+ stmt.Line = stmt.Line[:ww]
+ }
+ x.Stmt[w] = stmt
+ w++
+ }
+ x.Stmt = x.Stmt[:w]
+}
+
+func commentsAdd(x, y []Comment) []Comment {
+ return append(x[:len(x):len(x)], y...)
+}
+
+func stringsAdd(x, y []string) []string {
+ return append(x[:len(x):len(x)], y...)
+}
+
+// A CommentBlock represents a top-level block of comments separate
+// from any rule.
+type CommentBlock struct {
+ Comments
+ Start Position
+}
+
+func (x *CommentBlock) Span() (start, end Position) {
+ return x.Start, x.Start
+}
+
+// A Line is a single line of tokens.
+type Line struct {
+ Comments
+ Start Position
+ Token []string
+ InBlock bool
+ End Position
+}
+
+func (x *Line) Span() (start, end Position) {
+ return x.Start, x.End
+}
+
+// A LineBlock is a factored block of lines, like
+//
+// require (
+// "x"
+// "y"
+// )
+//
+type LineBlock struct {
+ Comments
+ Start Position
+ LParen LParen
+ Token []string
+ Line []*Line
+ RParen RParen
+}
+
+func (x *LineBlock) Span() (start, end Position) {
+ return x.Start, x.RParen.Pos.add(")")
+}
+
+// An LParen represents the beginning of a parenthesized line block.
+// It is a place to store suffix comments.
+type LParen struct {
+ Comments
+ Pos Position
+}
+
+func (x *LParen) Span() (start, end Position) {
+ return x.Pos, x.Pos.add(")")
+}
+
+// An RParen represents the end of a parenthesized line block.
+// It is a place to store whole-line (before) comments.
+type RParen struct {
+ Comments
+ Pos Position
+}
+
+func (x *RParen) Span() (start, end Position) {
+ return x.Pos, x.Pos.add(")")
+}
+
+// An input represents a single input file being parsed.
+type input struct {
+ // Lexing state.
+ filename string // name of input file, for errors
+ complete []byte // entire input
+ remaining []byte // remaining input
+ token []byte // token being scanned
+ lastToken string // most recently returned token, for error messages
+ pos Position // current input position
+ comments []Comment // accumulated comments
+ endRule int // position of end of current rule
+
+ // Parser state.
+ file *FileSyntax // returned top-level syntax tree
+ parseError error // error encountered during parsing
+
+ // Comment assignment state.
+ pre []Expr // all expressions, in preorder traversal
+ post []Expr // all expressions, in postorder traversal
+}
+
+func newInput(filename string, data []byte) *input {
+ return &input{
+ filename: filename,
+ complete: data,
+ remaining: data,
+ pos: Position{Line: 1, LineRune: 1, Byte: 0},
+ }
+}
+
+// parse parses the input file.
+func parse(file string, data []byte) (f *FileSyntax, err error) {
+ in := newInput(file, data)
+ // The parser panics for both routine errors like syntax errors
+ // and for programmer bugs like array index errors.
+ // Turn both into error returns. Catching bug panics is
+ // especially important when processing many files.
+ defer func() {
+ if e := recover(); e != nil {
+ if e == in.parseError {
+ err = in.parseError
+ } else {
+ err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e)
+ }
+ }
+ }()
+
+ // Invoke the parser.
+ in.parseFile()
+ if in.parseError != nil {
+ return nil, in.parseError
+ }
+ in.file.Name = in.filename
+
+ // Assign comments to nearby syntax.
+ in.assignComments()
+
+ return in.file, nil
+}
+
+// Error is called to report an error.
+// The reason s is often "syntax error".
+// Error does not return: it panics.
+func (in *input) Error(s string) {
+ if s == "syntax error" && in.lastToken != "" {
+ s += " near " + in.lastToken
+ }
+ in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s)
+ panic(in.parseError)
+}
+
+// eof reports whether the input has reached end of file.
+func (in *input) eof() bool {
+ return len(in.remaining) == 0
+}
+
+// peekRune returns the next rune in the input without consuming it.
+func (in *input) peekRune() int {
+ if len(in.remaining) == 0 {
+ return 0
+ }
+ r, _ := utf8.DecodeRune(in.remaining)
+ return int(r)
+}
+
+// peekPrefix reports whether the remaining input begins with the given prefix.
+func (in *input) peekPrefix(prefix string) bool {
+ // This is like bytes.HasPrefix(in.remaining, []byte(prefix))
+ // but without the allocation of the []byte copy of prefix.
+ for i := 0; i < len(prefix); i++ {
+ if i >= len(in.remaining) || in.remaining[i] != prefix[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// readRune consumes and returns the next rune in the input.
+func (in *input) readRune() int {
+ if len(in.remaining) == 0 {
+ in.Error("internal lexer error: readRune at EOF")
+ }
+ r, size := utf8.DecodeRune(in.remaining)
+ in.remaining = in.remaining[size:]
+ if r == '\n' {
+ in.pos.Line++
+ in.pos.LineRune = 1
+ } else {
+ in.pos.LineRune++
+ }
+ in.pos.Byte += size
+ return int(r)
+}
+
+type symType struct {
+ pos Position
+ endPos Position
+ text string
+}
+
+// startToken marks the beginning of the next input token.
+// It must be followed by a call to endToken, once the token has
+// been consumed using readRune.
+func (in *input) startToken(sym *symType) {
+ in.token = in.remaining
+ sym.text = ""
+ sym.pos = in.pos
+}
+
+// endToken marks the end of an input token.
+// It records the actual token string in sym.text if the caller
+// has not done that already.
+func (in *input) endToken(sym *symType) {
+ if sym.text == "" {
+ tok := string(in.token[:len(in.token)-len(in.remaining)])
+ sym.text = tok
+ in.lastToken = sym.text
+ }
+ sym.endPos = in.pos
+}
+
+// lex is called from the parser to obtain the next input token.
+// It returns the token value (either a rune like '+' or a symbolic token _FOR)
+// and sets val to the data associated with the token.
+// For all our input tokens, the associated data is
+// val.Pos (the position where the token begins)
+// and val.Token (the input string corresponding to the token).
+func (in *input) lex(sym *symType) int {
+ // Skip past spaces, stopping at non-space or EOF.
+ countNL := 0 // number of newlines we've skipped past
+ for !in.eof() {
+ // Skip over spaces. Count newlines so we can give the parser
+ // information about where top-level blank lines are,
+ // for top-level comment assignment.
+ c := in.peekRune()
+ if c == ' ' || c == '\t' || c == '\r' {
+ in.readRune()
+ continue
+ }
+
+ // Comment runs to end of line.
+ if in.peekPrefix("//") {
+ in.startToken(sym)
+
+ // Is this comment the only thing on its line?
+ // Find the last \n before this // and see if it's all
+ // spaces from there to here.
+ i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n"))
+ suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0
+ in.readRune()
+ in.readRune()
+
+ // Consume comment.
+ for len(in.remaining) > 0 && in.readRune() != '\n' {
+ }
+ in.endToken(sym)
+
+ sym.text = strings.TrimRight(sym.text, "\n")
+ in.lastToken = "comment"
+
+ // If we are at top level (not in a statement), hand the comment to
+ // the parser as a _COMMENT token. The grammar is written
+ // to handle top-level comments itself.
+ if !suffix {
+ // Not in a statement. Tell parser about top-level comment.
+ return _COMMENT
+ }
+
+ // Otherwise, save comment for later attachment to syntax tree.
+ if countNL > 1 {
+ in.comments = append(in.comments, Comment{sym.pos, "", false})
+ }
+ in.comments = append(in.comments, Comment{sym.pos, sym.text, suffix})
+ countNL = 1
+ return _EOL
+ }
+
+ if in.peekPrefix("/*") {
+ in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)"))
+ }
+
+ // Found non-space non-comment.
+ break
+ }
+
+ // Found the beginning of the next token.
+ in.startToken(sym)
+ defer in.endToken(sym)
+
+ // End of file.
+ if in.eof() {
+ in.lastToken = "EOF"
+ return _EOF
+ }
+
+ // Punctuation tokens.
+ switch c := in.peekRune(); c {
+ case '\n':
+ in.readRune()
+ return c
+
+ case '(':
+ in.readRune()
+ return c
+
+ case ')':
+ in.readRune()
+ return c
+
+ case '"', '`': // quoted string
+ quote := c
+ in.readRune()
+ for {
+ if in.eof() {
+ in.pos = sym.pos
+ in.Error("unexpected EOF in string")
+ }
+ if in.peekRune() == '\n' {
+ in.Error("unexpected newline in string")
+ }
+ c := in.readRune()
+ if c == quote {
+ break
+ }
+ if c == '\\' && quote != '`' {
+ if in.eof() {
+ in.pos = sym.pos
+ in.Error("unexpected EOF in string")
+ }
+ in.readRune()
+ }
+ }
+ in.endToken(sym)
+ return _STRING
+ }
+
+ // Checked all punctuation. Must be identifier token.
+ if c := in.peekRune(); !isIdent(c) {
+ in.Error(fmt.Sprintf("unexpected input character %#q", c))
+ }
+
+ // Scan over identifier.
+ for isIdent(in.peekRune()) {
+ if in.peekPrefix("//") {
+ break
+ }
+ if in.peekPrefix("/*") {
+ in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)"))
+ }
+ in.readRune()
+ }
+ return _IDENT
+}
+
+// isIdent reports whether c is an identifier rune.
+// We treat nearly all runes as identifier runes.
+func isIdent(c int) bool {
+ return c != 0 && !unicode.IsSpace(rune(c))
+}
+
+// Comment assignment.
+// We build two lists of all subexpressions, preorder and postorder.
+// The preorder list is ordered by start location, with outer expressions first.
+// The postorder list is ordered by end location, with outer expressions last.
+// We use the preorder list to assign each whole-line comment to the syntax
+// immediately following it, and we use the postorder list to assign each
+// end-of-line comment to the syntax immediately preceding it.
+
+// order walks the expression adding it and its subexpressions to the
+// preorder and postorder lists.
+func (in *input) order(x Expr) {
+ if x != nil {
+ in.pre = append(in.pre, x)
+ }
+ switch x := x.(type) {
+ default:
+ panic(fmt.Errorf("order: unexpected type %T", x))
+ case nil:
+ // nothing
+ case *LParen, *RParen:
+ // nothing
+ case *CommentBlock:
+ // nothing
+ case *Line:
+ // nothing
+ case *FileSyntax:
+ for _, stmt := range x.Stmt {
+ in.order(stmt)
+ }
+ case *LineBlock:
+ in.order(&x.LParen)
+ for _, l := range x.Line {
+ in.order(l)
+ }
+ in.order(&x.RParen)
+ }
+ if x != nil {
+ in.post = append(in.post, x)
+ }
+}
+
+// assignComments attaches comments to nearby syntax.
+func (in *input) assignComments() {
+ const debug = false
+
+ // Generate preorder and postorder lists.
+ in.order(in.file)
+
+ // Split into whole-line comments and suffix comments.
+ var line, suffix []Comment
+ for _, com := range in.comments {
+ if com.Suffix {
+ suffix = append(suffix, com)
+ } else {
+ line = append(line, com)
+ }
+ }
+
+ if debug {
+ for _, c := range line {
+ fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
+ }
+ }
+
+ // Assign line comments to syntax immediately following.
+ for _, x := range in.pre {
+ start, _ := x.Span()
+ if debug {
+ fmt.Printf("pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte)
+ }
+ xcom := x.Comment()
+ for len(line) > 0 && start.Byte >= line[0].Start.Byte {
+ if debug {
+ fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte)
+ }
+ xcom.Before = append(xcom.Before, line[0])
+ line = line[1:]
+ }
+ }
+
+ // Remaining line comments go at end of file.
+ in.file.After = append(in.file.After, line...)
+
+ if debug {
+ for _, c := range suffix {
+ fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte)
+ }
+ }
+
+ // Assign suffix comments to syntax immediately before.
+ for i := len(in.post) - 1; i >= 0; i-- {
+ x := in.post[i]
+
+ start, end := x.Span()
+ if debug {
+ fmt.Printf("post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte)
+ }
+
+ // Do not assign suffix comments to end of line block or whole file.
+ // Instead assign them to the last element inside.
+ switch x.(type) {
+ case *FileSyntax:
+ continue
+ }
+
+ // Do not assign suffix comments to something that starts
+ // on an earlier line, so that in
+ //
+ // x ( y
+ // z ) // comment
+ //
+ // we assign the comment to z and not to x ( ... ).
+ if start.Line != end.Line {
+ continue
+ }
+ xcom := x.Comment()
+ for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte {
+ if debug {
+ fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte)
+ }
+ xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1])
+ suffix = suffix[:len(suffix)-1]
+ }
+ }
+
+ // We assigned suffix comments in reverse.
+ // If multiple suffix comments were appended to the same
+ // expression node, they are now in reverse. Fix that.
+ for _, x := range in.post {
+ reverseComments(x.Comment().Suffix)
+ }
+
+ // Remaining suffix comments go at beginning of file.
+ in.file.Before = append(in.file.Before, suffix...)
+}
+
+// reverseComments reverses the []Comment list.
+func reverseComments(list []Comment) {
+ for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
+ list[i], list[j] = list[j], list[i]
+ }
+}
+
+func (in *input) parseFile() {
+ in.file = new(FileSyntax)
+ var sym symType
+ var cb *CommentBlock
+ for {
+ tok := in.lex(&sym)
+ switch tok {
+ case '\n':
+ if cb != nil {
+ in.file.Stmt = append(in.file.Stmt, cb)
+ cb = nil
+ }
+ case _COMMENT:
+ if cb == nil {
+ cb = &CommentBlock{Start: sym.pos}
+ }
+ com := cb.Comment()
+ com.Before = append(com.Before, Comment{Start: sym.pos, Token: sym.text})
+ case _EOF:
+ if cb != nil {
+ in.file.Stmt = append(in.file.Stmt, cb)
+ }
+ return
+ default:
+ in.parseStmt(&sym)
+ if cb != nil {
+ in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before
+ cb = nil
+ }
+ }
+ }
+}
+
+func (in *input) parseStmt(sym *symType) {
+ start := sym.pos
+ end := sym.endPos
+ token := []string{sym.text}
+ for {
+ tok := in.lex(sym)
+ switch tok {
+ case '\n', _EOF, _EOL:
+ in.file.Stmt = append(in.file.Stmt, &Line{
+ Start: start,
+ Token: token,
+ End: end,
+ })
+ return
+ case '(':
+ in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, token, sym))
+ return
+ default:
+ token = append(token, sym.text)
+ end = sym.endPos
+ }
+ }
+}
+
+func (in *input) parseLineBlock(start Position, token []string, sym *symType) *LineBlock {
+ x := &LineBlock{
+ Start: start,
+ Token: token,
+ LParen: LParen{Pos: sym.pos},
+ }
+ var comments []Comment
+ for {
+ tok := in.lex(sym)
+ switch tok {
+ case _EOL:
+ // ignore
+ case '\n':
+ if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" {
+ comments = append(comments, Comment{})
+ }
+ case _COMMENT:
+ comments = append(comments, Comment{Start: sym.pos, Token: sym.text})
+ case _EOF:
+ in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune))
+ case ')':
+ x.RParen.Before = comments
+ x.RParen.Pos = sym.pos
+ tok = in.lex(sym)
+ if tok != '\n' && tok != _EOF && tok != _EOL {
+ in.Error("syntax error (expected newline after closing paren)")
+ }
+ return x
+ default:
+ l := in.parseLine(sym)
+ x.Line = append(x.Line, l)
+ l.Comment().Before = comments
+ comments = nil
+ }
+ }
+}
+
+func (in *input) parseLine(sym *symType) *Line {
+ start := sym.pos
+ end := sym.endPos
+ token := []string{sym.text}
+ for {
+ tok := in.lex(sym)
+ switch tok {
+ case '\n', _EOF, _EOL:
+ return &Line{
+ Start: start,
+ Token: token,
+ End: end,
+ InBlock: true,
+ }
+ default:
+ token = append(token, sym.text)
+ end = sym.endPos
+ }
+ }
+}
+
+const (
+ _EOF = -(1 + iota)
+ _EOL
+ _IDENT
+ _STRING
+ _COMMENT
+)
+
+var (
+ slashSlash = []byte("//")
+ moduleStr = []byte("module")
+)
+
+// ModulePath returns the module path from the gomod file text.
+// If it cannot find a module path, it returns an empty string.
+// It is tolerant of unrelated problems in the go.mod file.
+func ModulePath(mod []byte) string {
+ for len(mod) > 0 {
+ line := mod
+ mod = nil
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, mod = line[:i], line[i+1:]
+ }
+ if i := bytes.Index(line, slashSlash); i >= 0 {
+ line = line[:i]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, moduleStr) {
+ continue
+ }
+ line = line[len(moduleStr):]
+ n := len(line)
+ line = bytes.TrimSpace(line)
+ if len(line) == n || len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '"' || line[0] == '`' {
+ p, err := strconv.Unquote(string(line))
+ if err != nil {
+ return "" // malformed quoted string or multiline module path
+ }
+ return p
+ }
+
+ return string(line)
+ }
+ return "" // missing module path
+}
diff --git a/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/rule.go b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/rule.go
new file mode 100644
index 00000000..24d275f1
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/modfile/rule.go
@@ -0,0 +1,724 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modfile
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/rogpeppe/go-internal/module"
+ "github.com/rogpeppe/go-internal/semver"
+)
+
+// A File is the parsed, interpreted form of a go.mod file.
+type File struct {
+ Module *Module
+ Go *Go
+ Require []*Require
+ Exclude []*Exclude
+ Replace []*Replace
+
+ Syntax *FileSyntax
+}
+
+// A Module is the module statement.
+type Module struct {
+ Mod module.Version
+ Syntax *Line
+}
+
+// A Go is the go statement.
+type Go struct {
+ Version string // "1.23"
+ Syntax *Line
+}
+
+// A Require is a single require statement.
+type Require struct {
+ Mod module.Version
+ Indirect bool // has "// indirect" comment
+ Syntax *Line
+}
+
+// An Exclude is a single exclude statement.
+type Exclude struct {
+ Mod module.Version
+ Syntax *Line
+}
+
+// A Replace is a single replace statement.
+type Replace struct {
+ Old module.Version
+ New module.Version
+ Syntax *Line
+}
+
+func (f *File) AddModuleStmt(path string) error {
+ if f.Syntax == nil {
+ f.Syntax = new(FileSyntax)
+ }
+ if f.Module == nil {
+ f.Module = &Module{
+ Mod: module.Version{Path: path},
+ Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)),
+ }
+ } else {
+ f.Module.Mod.Path = path
+ f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path))
+ }
+ return nil
+}
+
+func (f *File) AddComment(text string) {
+ if f.Syntax == nil {
+ f.Syntax = new(FileSyntax)
+ }
+ f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{
+ Comments: Comments{
+ Before: []Comment{
+ {
+ Token: text,
+ },
+ },
+ },
+ })
+}
+
+type VersionFixer func(path, version string) (string, error)
+
+// Parse parses the data, reported in errors as being from file,
+// into a File struct. It applies fix, if non-nil, to canonicalize all module versions found.
+func Parse(file string, data []byte, fix VersionFixer) (*File, error) {
+ return parseToFile(file, data, fix, true)
+}
+
+// ParseLax is like Parse but ignores unknown statements.
+// It is used when parsing go.mod files other than the main module,
+// under the theory that most statement types we add in the future will
+// only apply in the main module, like exclude and replace,
+// and so we get better gradual deployments if old go commands
+// simply ignore those statements when found in go.mod files
+// in dependencies.
+func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) {
+ return parseToFile(file, data, fix, false)
+}
+
+func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File, error) {
+ fs, err := parse(file, data)
+ if err != nil {
+ return nil, err
+ }
+ f := &File{
+ Syntax: fs,
+ }
+
+ var errs bytes.Buffer
+ for _, x := range fs.Stmt {
+ switch x := x.(type) {
+ case *Line:
+ f.add(&errs, x, x.Token[0], x.Token[1:], fix, strict)
+
+ case *LineBlock:
+ if len(x.Token) > 1 {
+ if strict {
+ fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " "))
+ }
+ continue
+ }
+ switch x.Token[0] {
+ default:
+ if strict {
+ fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " "))
+ }
+ continue
+ case "module", "require", "exclude", "replace":
+ for _, l := range x.Line {
+ f.add(&errs, l, x.Token[0], l.Token, fix, strict)
+ }
+ }
+ }
+ }
+
+ if errs.Len() > 0 {
+ return nil, errors.New(strings.TrimRight(errs.String(), "\n"))
+ }
+ return f, nil
+}
+
+var goVersionRE = regexp.MustCompile(`([1-9][0-9]*)\.(0|[1-9][0-9]*)`)
+
+func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, fix VersionFixer, strict bool) {
+ // If strict is false, this module is a dependency.
+ // We ignore all unknown directives as well as main-module-only
+ // directives like replace and exclude. It will work better for
+ // forward compatibility if we can depend on modules that have unknown
+ // statements (presumed relevant only when acting as the main module)
+ // and simply ignore those statements.
+ if !strict {
+ switch verb {
+ case "module", "require", "go":
+ // want these even for dependency go.mods
+ default:
+ return
+ }
+ }
+
+ switch verb {
+ default:
+ fmt.Fprintf(errs, "%s:%d: unknown directive: %s\n", f.Syntax.Name, line.Start.Line, verb)
+
+ case "go":
+ if f.Go != nil {
+ fmt.Fprintf(errs, "%s:%d: repeated go statement\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ if len(args) != 1 || !goVersionRE.MatchString(args[0]) {
+ fmt.Fprintf(errs, "%s:%d: usage: go 1.23\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ f.Go = &Go{Syntax: line}
+ f.Go.Version = args[0]
+ case "module":
+ if f.Module != nil {
+ fmt.Fprintf(errs, "%s:%d: repeated module statement\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ f.Module = &Module{Syntax: line}
+ if len(args) != 1 {
+
+ fmt.Fprintf(errs, "%s:%d: usage: module module/path [version]\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ f.Module.Mod = module.Version{Path: s}
+ case "require", "exclude":
+ if len(args) != 2 {
+ fmt.Fprintf(errs, "%s:%d: usage: %s module/path v1.2.3\n", f.Syntax.Name, line.Start.Line, verb)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ old := args[1]
+ v, err := parseVersion(s, &args[1], fix)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid module version %q: %v\n", f.Syntax.Name, line.Start.Line, old, err)
+ return
+ }
+ pathMajor, err := modulePathMajor(s)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ if !module.MatchPathMajor(v, pathMajor) {
+ if pathMajor == "" {
+ pathMajor = "v0 or v1"
+ }
+ fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v)
+ return
+ }
+ if verb == "require" {
+ f.Require = append(f.Require, &Require{
+ Mod: module.Version{Path: s, Version: v},
+ Syntax: line,
+ Indirect: isIndirect(line),
+ })
+ } else {
+ f.Exclude = append(f.Exclude, &Exclude{
+ Mod: module.Version{Path: s, Version: v},
+ Syntax: line,
+ })
+ }
+ case "replace":
+ arrow := 2
+ if len(args) >= 2 && args[1] == "=>" {
+ arrow = 1
+ }
+ if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" {
+ fmt.Fprintf(errs, "%s:%d: usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory\n", f.Syntax.Name, line.Start.Line, verb, verb)
+ return
+ }
+ s, err := parseString(&args[0])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ pathMajor, err := modulePathMajor(s)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ var v string
+ if arrow == 2 {
+ old := args[1]
+ v, err = parseVersion(s, &args[1], fix)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err)
+ return
+ }
+ if !module.MatchPathMajor(v, pathMajor) {
+ if pathMajor == "" {
+ pathMajor = "v0 or v1"
+ }
+ fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v)
+ return
+ }
+ }
+ ns, err := parseString(&args[arrow+1])
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err)
+ return
+ }
+ nv := ""
+ if len(args) == arrow+2 {
+ if !IsDirectoryPath(ns) {
+ fmt.Fprintf(errs, "%s:%d: replacement module without version must be directory path (rooted or starting with ./ or ../)\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ if filepath.Separator == '/' && strings.Contains(ns, `\`) {
+ fmt.Fprintf(errs, "%s:%d: replacement directory appears to be Windows path (on a non-windows system)\n", f.Syntax.Name, line.Start.Line)
+ return
+ }
+ }
+ if len(args) == arrow+3 {
+ old := args[arrow+1]
+ nv, err = parseVersion(ns, &args[arrow+2], fix)
+ if err != nil {
+ fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err)
+ return
+ }
+ if IsDirectoryPath(ns) {
+ fmt.Fprintf(errs, "%s:%d: replacement module directory path %q cannot have version\n", f.Syntax.Name, line.Start.Line, ns)
+ return
+ }
+ }
+ f.Replace = append(f.Replace, &Replace{
+ Old: module.Version{Path: s, Version: v},
+ New: module.Version{Path: ns, Version: nv},
+ Syntax: line,
+ })
+ }
+}
+
+// isIndirect reports whether line has a "// indirect" comment,
+// meaning it is in go.mod only for its effect on indirect dependencies,
+// so that it can be dropped entirely once the effective version of the
+// indirect dependency reaches the given minimum version.
+func isIndirect(line *Line) bool {
+ if len(line.Suffix) == 0 {
+ return false
+ }
+ f := strings.Fields(line.Suffix[0].Token)
+ return (len(f) == 2 && f[1] == "indirect" || len(f) > 2 && f[1] == "indirect;") && f[0] == "//"
+}
+
+// setIndirect sets line to have (or not have) a "// indirect" comment.
+func setIndirect(line *Line, indirect bool) {
+ if isIndirect(line) == indirect {
+ return
+ }
+ if indirect {
+ // Adding comment.
+ if len(line.Suffix) == 0 {
+ // New comment.
+ line.Suffix = []Comment{{Token: "// indirect", Suffix: true}}
+ return
+ }
+ // Insert at beginning of existing comment.
+ com := &line.Suffix[0]
+ space := " "
+ if len(com.Token) > 2 && com.Token[2] == ' ' || com.Token[2] == '\t' {
+ space = ""
+ }
+ com.Token = "// indirect;" + space + com.Token[2:]
+ return
+ }
+
+ // Removing comment.
+ f := strings.Fields(line.Suffix[0].Token)
+ if len(f) == 2 {
+ // Remove whole comment.
+ line.Suffix = nil
+ return
+ }
+
+ // Remove comment prefix.
+ com := &line.Suffix[0]
+ i := strings.Index(com.Token, "indirect;")
+ com.Token = "//" + com.Token[i+len("indirect;"):]
+}
+
+// IsDirectoryPath reports whether the given path should be interpreted
+// as a directory path. Just like on the go command line, relative paths
+// and rooted paths are directory paths; the rest are module paths.
+func IsDirectoryPath(ns string) bool {
+ // Because go.mod files can move from one system to another,
+ // we check all known path syntaxes, both Unix and Windows.
+ return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") ||
+ strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) ||
+ len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':'
+}
+
+// MustQuote reports whether s must be quoted in order to appear as
+// a single token in a go.mod line.
+func MustQuote(s string) bool {
+ for _, r := range s {
+ if !unicode.IsPrint(r) || r == ' ' || r == '"' || r == '\'' || r == '`' {
+ return true
+ }
+ }
+ return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*")
+}
+
+// AutoQuote returns s or, if quoting is required for s to appear in a go.mod,
+// the quotation of s.
+func AutoQuote(s string) string {
+ if MustQuote(s) {
+ return strconv.Quote(s)
+ }
+ return s
+}
+
+func parseString(s *string) (string, error) {
+ t := *s
+ if strings.HasPrefix(t, `"`) {
+ var err error
+ if t, err = strconv.Unquote(t); err != nil {
+ return "", err
+ }
+ } else if strings.ContainsAny(t, "\"'`") {
+ // Other quotes are reserved both for possible future expansion
+ // and to avoid confusion. For example if someone types 'x'
+ // we want that to be a syntax error and not a literal x in literal quotation marks.
+ return "", fmt.Errorf("unquoted string cannot contain quote")
+ }
+ *s = AutoQuote(t)
+ return t, nil
+}
+
+func parseVersion(path string, s *string, fix VersionFixer) (string, error) {
+ t, err := parseString(s)
+ if err != nil {
+ return "", err
+ }
+ if fix != nil {
+ var err error
+ t, err = fix(path, t)
+ if err != nil {
+ return "", err
+ }
+ }
+ if v := module.CanonicalVersion(t); v != "" {
+ *s = v
+ return *s, nil
+ }
+ return "", fmt.Errorf("version must be of the form v1.2.3")
+}
+
+func modulePathMajor(path string) (string, error) {
+ _, major, ok := module.SplitPathVersion(path)
+ if !ok {
+ return "", fmt.Errorf("invalid module path")
+ }
+ return major, nil
+}
+
+func (f *File) Format() ([]byte, error) {
+ return Format(f.Syntax), nil
+}
+
+// Cleanup cleans up the file f after any edit operations.
+// To avoid quadratic behavior, modifications like DropRequire
+// clear the entry but do not remove it from the slice.
+// Cleanup cleans out all the cleared entries.
+func (f *File) Cleanup() {
+ w := 0
+ for _, r := range f.Require {
+ if r.Mod.Path != "" {
+ f.Require[w] = r
+ w++
+ }
+ }
+ f.Require = f.Require[:w]
+
+ w = 0
+ for _, x := range f.Exclude {
+ if x.Mod.Path != "" {
+ f.Exclude[w] = x
+ w++
+ }
+ }
+ f.Exclude = f.Exclude[:w]
+
+ w = 0
+ for _, r := range f.Replace {
+ if r.Old.Path != "" {
+ f.Replace[w] = r
+ w++
+ }
+ }
+ f.Replace = f.Replace[:w]
+
+ f.Syntax.Cleanup()
+}
+
+func (f *File) AddRequire(path, vers string) error {
+ need := true
+ for _, r := range f.Require {
+ if r.Mod.Path == path {
+ if need {
+ r.Mod.Version = vers
+ f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers)
+ need = false
+ } else {
+ f.Syntax.removeLine(r.Syntax)
+ *r = Require{}
+ }
+ }
+ }
+
+ if need {
+ f.AddNewRequire(path, vers, false)
+ }
+ return nil
+}
+
+func (f *File) AddNewRequire(path, vers string, indirect bool) {
+ line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers)
+ setIndirect(line, indirect)
+ f.Require = append(f.Require, &Require{module.Version{Path: path, Version: vers}, indirect, line})
+}
+
+func (f *File) SetRequire(req []*Require) {
+ need := make(map[string]string)
+ indirect := make(map[string]bool)
+ for _, r := range req {
+ need[r.Mod.Path] = r.Mod.Version
+ indirect[r.Mod.Path] = r.Indirect
+ }
+
+ for _, r := range f.Require {
+ if v, ok := need[r.Mod.Path]; ok {
+ r.Mod.Version = v
+ r.Indirect = indirect[r.Mod.Path]
+ }
+ }
+
+ var newStmts []Expr
+ for _, stmt := range f.Syntax.Stmt {
+ switch stmt := stmt.(type) {
+ case *LineBlock:
+ if len(stmt.Token) > 0 && stmt.Token[0] == "require" {
+ var newLines []*Line
+ for _, line := range stmt.Line {
+ if p, err := parseString(&line.Token[0]); err == nil && need[p] != "" {
+ line.Token[1] = need[p]
+ delete(need, p)
+ setIndirect(line, indirect[p])
+ newLines = append(newLines, line)
+ }
+ }
+ if len(newLines) == 0 {
+ continue // drop stmt
+ }
+ stmt.Line = newLines
+ }
+
+ case *Line:
+ if len(stmt.Token) > 0 && stmt.Token[0] == "require" {
+ if p, err := parseString(&stmt.Token[1]); err == nil && need[p] != "" {
+ stmt.Token[2] = need[p]
+ delete(need, p)
+ setIndirect(stmt, indirect[p])
+ } else {
+ continue // drop stmt
+ }
+ }
+ }
+ newStmts = append(newStmts, stmt)
+ }
+ f.Syntax.Stmt = newStmts
+
+ for path, vers := range need {
+ f.AddNewRequire(path, vers, indirect[path])
+ }
+ f.SortBlocks()
+}
+
+func (f *File) DropRequire(path string) error {
+ for _, r := range f.Require {
+ if r.Mod.Path == path {
+ f.Syntax.removeLine(r.Syntax)
+ *r = Require{}
+ }
+ }
+ return nil
+}
+
+func (f *File) AddExclude(path, vers string) error {
+ var hint *Line
+ for _, x := range f.Exclude {
+ if x.Mod.Path == path && x.Mod.Version == vers {
+ return nil
+ }
+ if x.Mod.Path == path {
+ hint = x.Syntax
+ }
+ }
+
+ f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)})
+ return nil
+}
+
+func (f *File) DropExclude(path, vers string) error {
+ for _, x := range f.Exclude {
+ if x.Mod.Path == path && x.Mod.Version == vers {
+ f.Syntax.removeLine(x.Syntax)
+ *x = Exclude{}
+ }
+ }
+ return nil
+}
+
+func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error {
+ need := true
+ old := module.Version{Path: oldPath, Version: oldVers}
+ new := module.Version{Path: newPath, Version: newVers}
+ tokens := []string{"replace", AutoQuote(oldPath)}
+ if oldVers != "" {
+ tokens = append(tokens, oldVers)
+ }
+ tokens = append(tokens, "=>", AutoQuote(newPath))
+ if newVers != "" {
+ tokens = append(tokens, newVers)
+ }
+
+ var hint *Line
+ for _, r := range f.Replace {
+ if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) {
+ if need {
+ // Found replacement for old; update to use new.
+ r.New = new
+ f.Syntax.updateLine(r.Syntax, tokens...)
+ need = false
+ continue
+ }
+ // Already added; delete other replacements for same.
+ f.Syntax.removeLine(r.Syntax)
+ *r = Replace{}
+ }
+ if r.Old.Path == oldPath {
+ hint = r.Syntax
+ }
+ }
+ if need {
+ f.Replace = append(f.Replace, &Replace{Old: old, New: new, Syntax: f.Syntax.addLine(hint, tokens...)})
+ }
+ return nil
+}
+
+func (f *File) DropReplace(oldPath, oldVers string) error {
+ for _, r := range f.Replace {
+ if r.Old.Path == oldPath && r.Old.Version == oldVers {
+ f.Syntax.removeLine(r.Syntax)
+ *r = Replace{}
+ }
+ }
+ return nil
+}
+
+func (f *File) SortBlocks() {
+ f.removeDups() // otherwise sorting is unsafe
+
+ for _, stmt := range f.Syntax.Stmt {
+ block, ok := stmt.(*LineBlock)
+ if !ok {
+ continue
+ }
+ sort.Slice(block.Line, func(i, j int) bool {
+ li := block.Line[i]
+ lj := block.Line[j]
+ for k := 0; k < len(li.Token) && k < len(lj.Token); k++ {
+ if li.Token[k] != lj.Token[k] {
+ return li.Token[k] < lj.Token[k]
+ }
+ }
+ return len(li.Token) < len(lj.Token)
+ })
+ }
+}
+
+func (f *File) removeDups() {
+ have := make(map[module.Version]bool)
+ kill := make(map[*Line]bool)
+ for _, x := range f.Exclude {
+ if have[x.Mod] {
+ kill[x.Syntax] = true
+ continue
+ }
+ have[x.Mod] = true
+ }
+ var excl []*Exclude
+ for _, x := range f.Exclude {
+ if !kill[x.Syntax] {
+ excl = append(excl, x)
+ }
+ }
+ f.Exclude = excl
+
+ have = make(map[module.Version]bool)
+ // Later replacements take priority over earlier ones.
+ for i := len(f.Replace) - 1; i >= 0; i-- {
+ x := f.Replace[i]
+ if have[x.Old] {
+ kill[x.Syntax] = true
+ continue
+ }
+ have[x.Old] = true
+ }
+ var repl []*Replace
+ for _, x := range f.Replace {
+ if !kill[x.Syntax] {
+ repl = append(repl, x)
+ }
+ }
+ f.Replace = repl
+
+ var stmts []Expr
+ for _, stmt := range f.Syntax.Stmt {
+ switch stmt := stmt.(type) {
+ case *Line:
+ if kill[stmt] {
+ continue
+ }
+ case *LineBlock:
+ var lines []*Line
+ for _, line := range stmt.Line {
+ if !kill[line] {
+ lines = append(lines, line)
+ }
+ }
+ stmt.Line = lines
+ if len(lines) == 0 {
+ continue
+ }
+ }
+ stmts = append(stmts, stmt)
+ }
+ f.Syntax.Stmt = stmts
+}
diff --git a/src/margo.sh/vendor/github.com/rogpeppe/go-internal/module/module.go b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/module/module.go
new file mode 100644
index 00000000..3ff6d9bf
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/module/module.go
@@ -0,0 +1,540 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package module defines the module.Version type
+// along with support code.
+package module
+
+// IMPORTANT NOTE
+//
+// This file essentially defines the set of valid import paths for the go command.
+// There are many subtle considerations, including Unicode ambiguity,
+// security, network, and file system representations.
+//
+// This file also defines the set of valid module path and version combinations,
+// another topic with many subtle considerations.
+//
+// Changes to the semantics in this file require approval from rsc.
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/rogpeppe/go-internal/semver"
+)
+
+// A Version is defined by a module path and version pair.
+type Version struct {
+ Path string
+
+ // Version is usually a semantic version in canonical form.
+ // There are two exceptions to this general rule.
+ // First, the top-level target of a build has no specific version
+ // and uses Version = "".
+ // Second, during MVS calculations the version "none" is used
+ // to represent the decision to take no version of a given module.
+ Version string `json:",omitempty"`
+}
+
+// Check checks that a given module path, version pair is valid.
+// In addition to the path being a valid module path
+// and the version being a valid semantic version,
+// the two must correspond.
+// For example, the path "yaml/v2" only corresponds to
+// semantic versions beginning with "v2.".
+func Check(path, version string) error {
+ if err := CheckPath(path); err != nil {
+ return err
+ }
+ if !semver.IsValid(version) {
+ return fmt.Errorf("malformed semantic version %v", version)
+ }
+ _, pathMajor, _ := SplitPathVersion(path)
+ if !MatchPathMajor(version, pathMajor) {
+ if pathMajor == "" {
+ pathMajor = "v0 or v1"
+ }
+ if pathMajor[0] == '.' { // .v1
+ pathMajor = pathMajor[1:]
+ }
+ return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor)
+ }
+ return nil
+}
+
+// firstPathOK reports whether r can appear in the first element of a module path.
+// The first element of the path must be an LDH domain name, at least for now.
+// To avoid case ambiguity, the domain name must be entirely lower case.
+func firstPathOK(r rune) bool {
+ return r == '-' || r == '.' ||
+ '0' <= r && r <= '9' ||
+ 'a' <= r && r <= 'z'
+}
+
+// pathOK reports whether r can appear in an import path element.
+// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
+// This matches what "go get" has historically recognized in import paths.
+// TODO(rsc): We would like to allow Unicode letters, but that requires additional
+// care in the safe encoding (see note below).
+func pathOK(r rune) bool {
+ if r < utf8.RuneSelf {
+ return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
+ '0' <= r && r <= '9' ||
+ 'A' <= r && r <= 'Z' ||
+ 'a' <= r && r <= 'z'
+ }
+ return false
+}
+
+// fileNameOK reports whether r can appear in a file name.
+// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
+// If we expand the set of allowed characters here, we have to
+// work harder at detecting potential case-folding and normalization collisions.
+// See note about "safe encoding" below.
+func fileNameOK(r rune) bool {
+ if r < utf8.RuneSelf {
+ // Entire set of ASCII punctuation, from which we remove characters:
+ // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
+ // We disallow some shell special characters: " ' * < > ? ` |
+ // (Note that some of those are disallowed by the Windows file system as well.)
+ // We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
+ // We allow spaces (U+0020) in file names.
+ const allowed = "!#$%&()+,-.=@[]^_{}~ "
+ if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
+ return true
+ }
+ for i := 0; i < len(allowed); i++ {
+ if rune(allowed[i]) == r {
+ return true
+ }
+ }
+ return false
+ }
+ // It may be OK to add more ASCII punctuation here, but only carefully.
+ // For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
+ return unicode.IsLetter(r)
+}
+
+// CheckPath checks that a module path is valid.
+func CheckPath(path string) error {
+ if err := checkPath(path, false); err != nil {
+ return fmt.Errorf("malformed module path %q: %v", path, err)
+ }
+ i := strings.Index(path, "/")
+ if i < 0 {
+ i = len(path)
+ }
+ if i == 0 {
+ return fmt.Errorf("malformed module path %q: leading slash", path)
+ }
+ if !strings.Contains(path[:i], ".") {
+ return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
+ }
+ if path[0] == '-' {
+ return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
+ }
+ for _, r := range path[:i] {
+ if !firstPathOK(r) {
+ return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
+ }
+ }
+ if _, _, ok := SplitPathVersion(path); !ok {
+ return fmt.Errorf("malformed module path %q: invalid version", path)
+ }
+ return nil
+}
+
+// CheckImportPath checks that an import path is valid.
+func CheckImportPath(path string) error {
+ if err := checkPath(path, false); err != nil {
+ return fmt.Errorf("malformed import path %q: %v", path, err)
+ }
+ return nil
+}
+
+// checkPath checks that a general path is valid.
+// It returns an error describing why but not mentioning path.
+// Because these checks apply to both module paths and import paths,
+// the caller is expected to add the "malformed ___ path %q: " prefix.
+// fileName indicates whether the final element of the path is a file name
+// (as opposed to a directory name).
+func checkPath(path string, fileName bool) error {
+ if !utf8.ValidString(path) {
+ return fmt.Errorf("invalid UTF-8")
+ }
+ if path == "" {
+ return fmt.Errorf("empty string")
+ }
+ if strings.Contains(path, "..") {
+ return fmt.Errorf("double dot")
+ }
+ if strings.Contains(path, "//") {
+ return fmt.Errorf("double slash")
+ }
+ if path[len(path)-1] == '/' {
+ return fmt.Errorf("trailing slash")
+ }
+ elemStart := 0
+ for i, r := range path {
+ if r == '/' {
+ if err := checkElem(path[elemStart:i], fileName); err != nil {
+ return err
+ }
+ elemStart = i + 1
+ }
+ }
+ if err := checkElem(path[elemStart:], fileName); err != nil {
+ return err
+ }
+ return nil
+}
+
+// checkElem checks whether an individual path element is valid.
+// fileName indicates whether the element is a file name (not a directory name).
+func checkElem(elem string, fileName bool) error {
+ if elem == "" {
+ return fmt.Errorf("empty path element")
+ }
+ if strings.Count(elem, ".") == len(elem) {
+ return fmt.Errorf("invalid path element %q", elem)
+ }
+ if elem[0] == '.' && !fileName {
+ return fmt.Errorf("leading dot in path element")
+ }
+ if elem[len(elem)-1] == '.' {
+ return fmt.Errorf("trailing dot in path element")
+ }
+ charOK := pathOK
+ if fileName {
+ charOK = fileNameOK
+ }
+ for _, r := range elem {
+ if !charOK(r) {
+ return fmt.Errorf("invalid char %q", r)
+ }
+ }
+
+ // Windows disallows a bunch of path elements, sadly.
+ // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
+ short := elem
+ if i := strings.Index(short, "."); i >= 0 {
+ short = short[:i]
+ }
+ for _, bad := range badWindowsNames {
+ if strings.EqualFold(bad, short) {
+ return fmt.Errorf("disallowed path element %q", elem)
+ }
+ }
+ return nil
+}
+
+// CheckFilePath checks whether a slash-separated file path is valid.
+func CheckFilePath(path string) error {
+ if err := checkPath(path, true); err != nil {
+ return fmt.Errorf("malformed file path %q: %v", path, err)
+ }
+ return nil
+}
+
+// badWindowsNames are the reserved file path elements on Windows.
+// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
+var badWindowsNames = []string{
+ "CON",
+ "PRN",
+ "AUX",
+ "NUL",
+ "COM1",
+ "COM2",
+ "COM3",
+ "COM4",
+ "COM5",
+ "COM6",
+ "COM7",
+ "COM8",
+ "COM9",
+ "LPT1",
+ "LPT2",
+ "LPT3",
+ "LPT4",
+ "LPT5",
+ "LPT6",
+ "LPT7",
+ "LPT8",
+ "LPT9",
+}
+
+// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
+// and version is either empty or "/vN" for N >= 2.
+// As a special case, gopkg.in paths are recognized directly;
+// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
+func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
+ if strings.HasPrefix(path, "gopkg.in/") {
+ return splitGopkgIn(path)
+ }
+
+ i := len(path)
+ dot := false
+ for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
+ if path[i-1] == '.' {
+ dot = true
+ }
+ i--
+ }
+ if i <= 1 || path[i-1] != 'v' || path[i-2] != '/' {
+ return path, "", true
+ }
+ prefix, pathMajor = path[:i-2], path[i-2:]
+ if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
+ return path, "", false
+ }
+ return prefix, pathMajor, true
+}
+
+// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
+func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
+ if !strings.HasPrefix(path, "gopkg.in/") {
+ return path, "", false
+ }
+ i := len(path)
+ if strings.HasSuffix(path, "-unstable") {
+ i -= len("-unstable")
+ }
+ for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
+ i--
+ }
+ if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
+ // All gopkg.in paths must end in vN for some N.
+ return path, "", false
+ }
+ prefix, pathMajor = path[:i-2], path[i-2:]
+ if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
+ return path, "", false
+ }
+ return prefix, pathMajor, true
+}
+
+// MatchPathMajor reports whether the semantic version v
+// matches the path major version pathMajor.
+func MatchPathMajor(v, pathMajor string) bool {
+ if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
+ pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
+ }
+ if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
+ // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
+ // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
+ return true
+ }
+ m := semver.Major(v)
+ if pathMajor == "" {
+ return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible"
+ }
+ return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:]
+}
+
+// CanonicalVersion returns the canonical form of the version string v.
+// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
+func CanonicalVersion(v string) string {
+ cv := semver.Canonical(v)
+ if semver.Build(v) == "+incompatible" {
+ cv += "+incompatible"
+ }
+ return cv
+}
+
+// Sort sorts the list by Path, breaking ties by comparing Versions.
+func Sort(list []Version) {
+ sort.Slice(list, func(i, j int) bool {
+ mi := list[i]
+ mj := list[j]
+ if mi.Path != mj.Path {
+ return mi.Path < mj.Path
+ }
+ // To help go.sum formatting, allow version/file.
+ // Compare semver prefix by semver rules,
+ // file by string order.
+ vi := mi.Version
+ vj := mj.Version
+ var fi, fj string
+ if k := strings.Index(vi, "/"); k >= 0 {
+ vi, fi = vi[:k], vi[k:]
+ }
+ if k := strings.Index(vj, "/"); k >= 0 {
+ vj, fj = vj[:k], vj[k:]
+ }
+ if vi != vj {
+ return semver.Compare(vi, vj) < 0
+ }
+ return fi < fj
+ })
+}
+
+// Safe encodings
+//
+// Module paths appear as substrings of file system paths
+// (in the download cache) and of web server URLs in the proxy protocol.
+// In general we cannot rely on file systems to be case-sensitive,
+// nor can we rely on web servers, since they read from file systems.
+// That is, we cannot rely on the file system to keep rsc.io/QUOTE
+// and rsc.io/quote separate. Windows and macOS don't.
+// Instead, we must never require two different casings of a file path.
+// Because we want the download cache to match the proxy protocol,
+// and because we want the proxy protocol to be possible to serve
+// from a tree of static files (which might be stored on a case-insensitive
+// file system), the proxy protocol must never require two different casings
+// of a URL path either.
+//
+// One possibility would be to make the safe encoding be the lowercase
+// hexadecimal encoding of the actual path bytes. This would avoid ever
+// needing different casings of a file path, but it would be fairly illegible
+// to most programmers when those paths appeared in the file system
+// (including in file paths in compiler errors and stack traces)
+// in web server logs, and so on. Instead, we want a safe encoding that
+// leaves most paths unaltered.
+//
+// The safe encoding is this:
+// replace every uppercase letter with an exclamation mark
+// followed by the letter's lowercase equivalent.
+//
+// For example,
+// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
+// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
+// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
+//
+// Import paths that avoid upper-case letters are left unchanged.
+// Note that because import paths are ASCII-only and avoid various
+// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
+// and avoids the same problematic punctuation.
+//
+// Import paths have never allowed exclamation marks, so there is no
+// need to define how to encode a literal !.
+//
+// Although paths are disallowed from using Unicode (see pathOK above),
+// the eventual plan is to allow Unicode letters as well, to assume that
+// file systems and URLs are Unicode-safe (storing UTF-8), and apply
+// the !-for-uppercase convention. Note however that not all runes that
+// are different but case-fold equivalent are an upper/lower pair.
+// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
+// are considered to case-fold to each other. When we do add Unicode
+// letters, we must not assume that upper/lower are the only case-equivalent pairs.
+// Perhaps the Kelvin symbol would be disallowed entirely, for example.
+// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
+//
+// Also, it would be nice to allow Unicode marks as well as letters,
+// but marks include combining marks, and then we must deal not
+// only with case folding but also normalization: both U+00E9 ('é')
+// and U+0065 U+0301 ('e' followed by combining acute accent)
+// look the same on the page and are treated by some file systems
+// as the same path. If we do allow Unicode marks in paths, there
+// must be some kind of normalization to allow only one canonical
+// encoding of any character used in an import path.
+
+// EncodePath returns the safe encoding of the given module path.
+// It fails if the module path is invalid.
+func EncodePath(path string) (encoding string, err error) {
+ if err := CheckPath(path); err != nil {
+ return "", err
+ }
+
+ return encodeString(path)
+}
+
+// EncodeVersion returns the safe encoding of the given module version.
+// Versions are allowed to be in non-semver form but must be valid file names
+// and not contain exclamation marks.
+func EncodeVersion(v string) (encoding string, err error) {
+ if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
+ return "", fmt.Errorf("disallowed version string %q", v)
+ }
+ return encodeString(v)
+}
+
+func encodeString(s string) (encoding string, err error) {
+ haveUpper := false
+ for _, r := range s {
+ if r == '!' || r >= utf8.RuneSelf {
+ // This should be disallowed by CheckPath, but diagnose anyway.
+ // The correctness of the encoding loop below depends on it.
+ return "", fmt.Errorf("internal error: inconsistency in EncodePath")
+ }
+ if 'A' <= r && r <= 'Z' {
+ haveUpper = true
+ }
+ }
+
+ if !haveUpper {
+ return s, nil
+ }
+
+ var buf []byte
+ for _, r := range s {
+ if 'A' <= r && r <= 'Z' {
+ buf = append(buf, '!', byte(r+'a'-'A'))
+ } else {
+ buf = append(buf, byte(r))
+ }
+ }
+ return string(buf), nil
+}
+
+// DecodePath returns the module path of the given safe encoding.
+// It fails if the encoding is invalid or encodes an invalid path.
+func DecodePath(encoding string) (path string, err error) {
+ path, ok := decodeString(encoding)
+ if !ok {
+ return "", fmt.Errorf("invalid module path encoding %q", encoding)
+ }
+ if err := CheckPath(path); err != nil {
+ return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
+ }
+ return path, nil
+}
+
+// DecodeVersion returns the version string for the given safe encoding.
+// It fails if the encoding is invalid or encodes an invalid version.
+// Versions are allowed to be in non-semver form but must be valid file names
+// and not contain exclamation marks.
+func DecodeVersion(encoding string) (v string, err error) {
+ v, ok := decodeString(encoding)
+ if !ok {
+ return "", fmt.Errorf("invalid version encoding %q", encoding)
+ }
+ if err := checkElem(v, true); err != nil {
+ return "", fmt.Errorf("disallowed version string %q", v)
+ }
+ return v, nil
+}
+
+func decodeString(encoding string) (string, bool) {
+ var buf []byte
+
+ bang := false
+ for _, r := range encoding {
+ if r >= utf8.RuneSelf {
+ return "", false
+ }
+ if bang {
+ bang = false
+ if r < 'a' || 'z' < r {
+ return "", false
+ }
+ buf = append(buf, byte(r+'A'-'a'))
+ continue
+ }
+ if r == '!' {
+ bang = true
+ continue
+ }
+ if 'A' <= r && r <= 'Z' {
+ return "", false
+ }
+ buf = append(buf, byte(r))
+ }
+ if bang {
+ return "", false
+ }
+ return string(buf), true
+}
diff --git a/src/margo.sh/vendor/github.com/rogpeppe/go-internal/semver/semver.go b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/semver/semver.go
new file mode 100644
index 00000000..4af7118e
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/rogpeppe/go-internal/semver/semver.go
@@ -0,0 +1,388 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package semver implements comparison of semantic version strings.
+// In this package, semantic version strings must begin with a leading "v",
+// as in "v1.0.0".
+//
+// The general form of a semantic version string accepted by this package is
+//
+// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
+//
+// where square brackets indicate optional parts of the syntax;
+// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
+// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
+// using only alphanumeric characters and hyphens; and
+// all-numeric PRERELEASE identifiers must not have leading zeros.
+//
+// This package follows Semantic Versioning 2.0.0 (see semver.org)
+// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
+// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
+// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
+package semver
+
+// parsed returns the parsed form of a semantic version string.
+type parsed struct {
+ major string
+ minor string
+ patch string
+ short string
+ prerelease string
+ build string
+ err string
+}
+
+// IsValid reports whether v is a valid semantic version string.
+func IsValid(v string) bool {
+ _, ok := parse(v)
+ return ok
+}
+
+// Canonical returns the canonical formatting of the semantic version v.
+// It fills in any missing .MINOR or .PATCH and discards build metadata.
+// Two semantic versions compare equal only if their canonical formattings
+// are identical strings.
+// The canonical invalid semantic version is the empty string.
+func Canonical(v string) string {
+ p, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ if p.build != "" {
+ return v[:len(v)-len(p.build)]
+ }
+ if p.short != "" {
+ return v + p.short
+ }
+ return v
+}
+
+// Major returns the major version prefix of the semantic version v.
+// For example, Major("v2.1.0") == "v2".
+// If v is an invalid semantic version string, Major returns the empty string.
+func Major(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return v[:1+len(pv.major)]
+}
+
+// MajorMinor returns the major.minor version prefix of the semantic version v.
+// For example, MajorMinor("v2.1.0") == "v2.1".
+// If v is an invalid semantic version string, MajorMinor returns the empty string.
+func MajorMinor(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ i := 1 + len(pv.major)
+ if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
+ return v[:j]
+ }
+ return v[:i] + "." + pv.minor
+}
+
+// Prerelease returns the prerelease suffix of the semantic version v.
+// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
+// If v is an invalid semantic version string, Prerelease returns the empty string.
+func Prerelease(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.prerelease
+}
+
+// Build returns the build suffix of the semantic version v.
+// For example, Build("v2.1.0+meta") == "+meta".
+// If v is an invalid semantic version string, Build returns the empty string.
+func Build(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.build
+}
+
+// Compare returns an integer comparing two versions according to
+// according to semantic version precedence.
+// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
+//
+// An invalid semantic version string is considered less than a valid one.
+// All invalid semantic version strings compare equal to each other.
+func Compare(v, w string) int {
+ pv, ok1 := parse(v)
+ pw, ok2 := parse(w)
+ if !ok1 && !ok2 {
+ return 0
+ }
+ if !ok1 {
+ return -1
+ }
+ if !ok2 {
+ return +1
+ }
+ if c := compareInt(pv.major, pw.major); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.minor, pw.minor); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.patch, pw.patch); c != 0 {
+ return c
+ }
+ return comparePrerelease(pv.prerelease, pw.prerelease)
+}
+
+// Max canonicalizes its arguments and then returns the version string
+// that compares greater.
+func Max(v, w string) string {
+ v = Canonical(v)
+ w = Canonical(w)
+ if Compare(v, w) > 0 {
+ return v
+ }
+ return w
+}
+
+func parse(v string) (p parsed, ok bool) {
+ if v == "" || v[0] != 'v' {
+ p.err = "missing v prefix"
+ return
+ }
+ p.major, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad major version"
+ return
+ }
+ if v == "" {
+ p.minor = "0"
+ p.patch = "0"
+ p.short = ".0.0"
+ return
+ }
+ if v[0] != '.' {
+ p.err = "bad minor prefix"
+ ok = false
+ return
+ }
+ p.minor, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad minor version"
+ return
+ }
+ if v == "" {
+ p.patch = "0"
+ p.short = ".0"
+ return
+ }
+ if v[0] != '.' {
+ p.err = "bad patch prefix"
+ ok = false
+ return
+ }
+ p.patch, v, ok = parseInt(v[1:])
+ if !ok {
+ p.err = "bad patch version"
+ return
+ }
+ if len(v) > 0 && v[0] == '-' {
+ p.prerelease, v, ok = parsePrerelease(v)
+ if !ok {
+ p.err = "bad prerelease"
+ return
+ }
+ }
+ if len(v) > 0 && v[0] == '+' {
+ p.build, v, ok = parseBuild(v)
+ if !ok {
+ p.err = "bad build"
+ return
+ }
+ }
+ if v != "" {
+ p.err = "junk on end"
+ ok = false
+ return
+ }
+ ok = true
+ return
+}
+
+func parseInt(v string) (t, rest string, ok bool) {
+ if v == "" {
+ return
+ }
+ if v[0] < '0' || '9' < v[0] {
+ return
+ }
+ i := 1
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ if v[0] == '0' && i != 1 {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parsePrerelease(v string) (t, rest string, ok bool) {
+ // "A pre-release version MAY be denoted by appending a hyphen and
+ // a series of dot separated identifiers immediately following the patch version.
+ // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
+ // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
+ if v == "" || v[0] != '-' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) && v[i] != '+' {
+ if !isIdentChar(v[i]) && v[i] != '.' {
+ return
+ }
+ if v[i] == '.' {
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parseBuild(v string) (t, rest string, ok bool) {
+ if v == "" || v[0] != '+' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) {
+ if !isIdentChar(v[i]) {
+ return
+ }
+ if v[i] == '.' {
+ if start == i {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func isIdentChar(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
+}
+
+func isBadNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v) && i > 1 && v[0] == '0'
+}
+
+func isNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v)
+}
+
+func compareInt(x, y string) int {
+ if x == y {
+ return 0
+ }
+ if len(x) < len(y) {
+ return -1
+ }
+ if len(x) > len(y) {
+ return +1
+ }
+ if x < y {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func comparePrerelease(x, y string) int {
+ // "When major, minor, and patch are equal, a pre-release version has
+ // lower precedence than a normal version.
+ // Example: 1.0.0-alpha < 1.0.0.
+ // Precedence for two pre-release versions with the same major, minor,
+ // and patch version MUST be determined by comparing each dot separated
+ // identifier from left to right until a difference is found as follows:
+ // identifiers consisting of only digits are compared numerically and
+ // identifiers with letters or hyphens are compared lexically in ASCII
+ // sort order. Numeric identifiers always have lower precedence than
+ // non-numeric identifiers. A larger set of pre-release fields has a
+ // higher precedence than a smaller set, if all of the preceding
+ // identifiers are equal.
+ // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
+ // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
+ if x == y {
+ return 0
+ }
+ if x == "" {
+ return +1
+ }
+ if y == "" {
+ return -1
+ }
+ for x != "" && y != "" {
+ x = x[1:] // skip - or .
+ y = y[1:] // skip - or .
+ var dx, dy string
+ dx, x = nextIdent(x)
+ dy, y = nextIdent(y)
+ if dx != dy {
+ ix := isNum(dx)
+ iy := isNum(dy)
+ if ix != iy {
+ if ix {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ if ix {
+ if len(dx) < len(dy) {
+ return -1
+ }
+ if len(dx) > len(dy) {
+ return +1
+ }
+ }
+ if dx < dy {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ }
+ if x == "" {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func nextIdent(x string) (dx, rest string) {
+ i := 0
+ for i < len(x) && x[i] != '.' {
+ i++
+ }
+ return x[:i], x[i:]
+}
diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/.gitignore b/src/margo.sh/vendor/github.com/russross/blackfriday/.gitignore
new file mode 100644
index 00000000..75623dcc
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/russross/blackfriday/.gitignore
@@ -0,0 +1,8 @@
+*.out
+*.swp
+*.8
+*.6
+_obj
+_test*
+markdown
+tags
diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/.travis.yml b/src/margo.sh/vendor/github.com/russross/blackfriday/.travis.yml
new file mode 100644
index 00000000..2f3351d7
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/russross/blackfriday/.travis.yml
@@ -0,0 +1,17 @@
+sudo: false
+language: go
+go:
+ - "1.9.x"
+ - "1.10.x"
+ - tip
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+install:
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d -s .)
+ - go tool vet .
+ - go test -v -race ./...
diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/LICENSE.txt b/src/margo.sh/vendor/github.com/russross/blackfriday/LICENSE.txt
new file mode 100644
index 00000000..2885af36
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/russross/blackfriday/LICENSE.txt
@@ -0,0 +1,29 @@
+Blackfriday is distributed under the Simplified BSD License:
+
+> Copyright © 2011 Russ Ross
+> All rights reserved.
+>
+> Redistribution and use in source and binary forms, with or without
+> modification, are permitted provided that the following conditions
+> are met:
+>
+> 1. Redistributions of source code must retain the above copyright
+> notice, this list of conditions and the following disclaimer.
+>
+> 2. Redistributions in binary form must reproduce the above
+> copyright notice, this list of conditions and the following
+> disclaimer in the documentation and/or other materials provided with
+> the distribution.
+>
+> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+> POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/README.md b/src/margo.sh/vendor/github.com/russross/blackfriday/README.md
new file mode 100644
index 00000000..3c62e137
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/russross/blackfriday/README.md
@@ -0,0 +1,369 @@
+Blackfriday
+[![Build Status][BuildSVG]][BuildURL]
+[![Godoc][GodocV2SVG]][GodocV2URL]
+===========
+
+Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
+is paranoid about its input (so you can safely feed it user-supplied
+data), it is fast, it supports common extensions (tables, smart
+punctuation substitutions, etc.), and it is safe for all utf-8
+(unicode) input.
+
+HTML output is currently supported, along with Smartypants
+extensions.
+
+It started as a translation from C of [Sundown][3].
+
+
+Installation
+------------
+
+Blackfriday is compatible with any modern Go release. With Go and git installed:
+
+ go get -u gopkg.in/russross/blackfriday.v2
+
+will download, compile, and install the package into your `$GOPATH` directory
+hierarchy.
+
+
+Versions
+--------
+
+Currently maintained and recommended version of Blackfriday is `v2`. It's being
+developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
+documentation is available at
+https://godoc.org/gopkg.in/russross/blackfriday.v2.
+
+It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`,
+but we highly recommend using package management tool like [dep][7] or
+[Glide][8] and make use of semantic versioning. With package management you
+should import `github.com/russross/blackfriday` and specify that you're using
+version 2.0.0.
+
+Version 2 offers a number of improvements over v1:
+
+* Cleaned up API
+* A separate call to [`Parse`][4], which produces an abstract syntax tree for
+ the document
+* Latest bug fixes
+* Flexibility to easily add your own rendering extensions
+
+Potential drawbacks:
+
+* Our benchmarks show v2 to be slightly slower than v1. Currently in the
+ ballpark of around 15%.
+* API breakage. If you can't afford modifying your code to adhere to the new API
+ and don't care too much about the new features, v2 is probably not for you.
+* Several bug fixes are trailing behind and still need to be forward-ported to
+ v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
+ tracking.
+
+If you are still interested in the legacy `v1`, you can import it from
+`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found
+here: https://godoc.org/github.com/russross/blackfriday
+
+### Known issue with `dep`
+
+There is a known problem with using Blackfriday v1 _transitively_ and `dep`.
+Currently `dep` prioritizes semver versions over anything else, and picks the
+latest one, plus it does not apply a `[[constraint]]` specifier to transitively
+pulled in packages. So if you're using something that uses Blackfriday v1, but
+that something does not use `dep` yet, you will get Blackfriday v2 pulled in and
+your first dependency will fail to build.
+
+There are couple of fixes for it, documented here:
+https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version
+
+Meanwhile, `dep` team is working on a more general solution to the constraints
+on transitive dependencies problem: https://github.com/golang/dep/issues/1124.
+
+
+Usage
+-----
+
+### v1
+
+For basic usage, it is as simple as getting your input into a byte
+slice and calling:
+
+ output := blackfriday.MarkdownBasic(input)
+
+This renders it with no extensions enabled. To get a more useful
+feature set, use this instead:
+
+ output := blackfriday.MarkdownCommon(input)
+
+### v2
+
+For the most sensible markdown processing, it is as simple as getting your input
+into a byte slice and calling:
+
+```go
+output := blackfriday.Run(input)
+```
+
+Your input will be parsed and the output rendered with a set of most popular
+extensions enabled. If you want the most basic feature set, corresponding with
+the bare Markdown specification, use:
+
+```go
+output := blackfriday.Run(input, blackfriday.WithNoExtensions())
+```
+
+### Sanitize untrusted content
+
+Blackfriday itself does nothing to protect against malicious content. If you are
+dealing with user-supplied markdown, we recommend running Blackfriday's output
+through HTML sanitizer such as [Bluemonday][5].
+
+Here's an example of simple usage of Blackfriday together with Bluemonday:
+
+```go
+import (
+ "github.com/microcosm-cc/bluemonday"
+ "gopkg.in/russross/blackfriday.v2"
+)
+
+// ...
+unsafe := blackfriday.Run(input)
+html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
+```
+
+### Custom options, v1
+
+If you want to customize the set of options, first get a renderer
+(currently only the HTML output engine), then use it to
+call the more general `Markdown` function. For examples, see the
+implementations of `MarkdownBasic` and `MarkdownCommon` in
+`markdown.go`.
+
+### Custom options, v2
+
+If you want to customize the set of options, use `blackfriday.WithExtensions`,
+`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
+
+### `blackfriday-tool`
+
+You can also check out `blackfriday-tool` for a more complete example
+of how to use it. Download and install it using:
+
+ go get github.com/russross/blackfriday-tool
+
+This is a simple command-line tool that allows you to process a
+markdown file using a standalone program. You can also browse the
+source directly on github if you are just looking for some example
+code:
+
+*
+
+Note that if you have not already done so, installing
+`blackfriday-tool` will be sufficient to download and install
+blackfriday in addition to the tool itself. The tool binary will be
+installed in `$GOPATH/bin`. This is a statically-linked binary that
+can be copied to wherever you need it without worrying about
+dependencies and library versions.
+
+### Sanitized anchor names
+
+Blackfriday includes an algorithm for creating sanitized anchor names
+corresponding to a given input text. This algorithm is used to create
+anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The
+algorithm has a specification, so that other packages can create
+compatible anchor names and links to those anchors.
+
+The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names.
+
+[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to
+create compatible links to the anchor names generated by blackfriday.
+This algorithm is also implemented in a small standalone package at
+[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients
+that want a small package and don't need full functionality of blackfriday.
+
+
+Features
+--------
+
+All features of Sundown are supported, including:
+
+* **Compatibility**. The Markdown v1.0.3 test suite passes with
+ the `--tidy` option. Without `--tidy`, the differences are
+ mostly in whitespace and entity escaping, where blackfriday is
+ more consistent and cleaner.
+
+* **Common extensions**, including table support, fenced code
+ blocks, autolinks, strikethroughs, non-strict emphasis, etc.
+
+* **Safety**. Blackfriday is paranoid when parsing, making it safe
+ to feed untrusted user input without fear of bad things
+ happening. The test suite stress tests this and there are no
+ known inputs that make it crash. If you find one, please let me
+ know and send me the input that does it.
+
+ NOTE: "safety" in this context means *runtime safety only*. In order to
+ protect yourself against JavaScript injection in untrusted content, see
+ [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
+
+* **Fast processing**. It is fast enough to render on-demand in
+ most web applications without having to cache the output.
+
+* **Thread safety**. You can run multiple parsers in different
+ goroutines without ill effect. There is no dependence on global
+ shared state.
+
+* **Minimal dependencies**. Blackfriday only depends on standard
+ library packages in Go. The source code is pretty
+ self-contained, so it is easy to add to any project, including
+ Google App Engine projects.
+
+* **Standards compliant**. Output successfully validates using the
+ W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
+
+
+Extensions
+----------
+
+In addition to the standard markdown syntax, this package
+implements the following extensions:
+
+* **Intra-word emphasis supression**. The `_` character is
+ commonly used inside words when discussing code, so having
+ markdown interpret it as an emphasis command is usually the
+ wrong thing. Blackfriday lets you treat all emphasis markers as
+ normal characters when they occur inside a word.
+
+* **Tables**. Tables can be created by drawing them in the input
+ using a simple syntax:
+
+ ```
+ Name | Age
+ --------|------
+ Bob | 27
+ Alice | 23
+ ```
+
+* **Fenced code blocks**. In addition to the normal 4-space
+ indentation to mark code blocks, you can explicitly mark them
+ and supply a language (to make syntax highlighting simple). Just
+ mark it like this:
+
+ ``` go
+ func getTrue() bool {
+ return true
+ }
+ ```
+
+ You can use 3 or more backticks to mark the beginning of the
+ block, and the same number to mark the end of the block.
+
+ To preserve classes of fenced code blocks while using the bluemonday
+ HTML sanitizer, use the following policy:
+
+ ``` go
+ p := bluemonday.UGCPolicy()
+ p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code")
+ html := p.SanitizeBytes(unsafe)
+ ```
+
+* **Definition lists**. A simple definition list is made of a single-line
+ term followed by a colon and the definition for that term.
+
+ Cat
+ : Fluffy animal everyone likes
+
+ Internet
+ : Vector of transmission for pictures of cats
+
+ Terms must be separated from the previous definition by a blank line.
+
+* **Footnotes**. A marker in the text that will become a superscript number;
+ a footnote definition that will be placed in a list of footnotes at the
+ end of the document. A footnote looks like this:
+
+ This is a footnote.[^1]
+
+ [^1]: the footnote text.
+
+* **Autolinking**. Blackfriday can find URLs that have not been
+ explicitly marked as links and turn them into links.
+
+* **Strikethrough**. Use two tildes (`~~`) to mark text that
+ should be crossed out.
+
+* **Hard line breaks**. With this extension enabled (it is off by
+ default in the `MarkdownBasic` and `MarkdownCommon` convenience
+ functions), newlines in the input translate into line breaks in
+ the output.
+
+* **Smart quotes**. Smartypants-style punctuation substitution is
+ supported, turning normal double- and single-quote marks into
+ curly quotes, etc.
+
+* **LaTeX-style dash parsing** is an additional option, where `--`
+ is translated into `–`, and `---` is translated into
+ `—`. This differs from most smartypants processors, which
+ turn a single hyphen into an ndash and a double hyphen into an
+ mdash.
+
+* **Smart fractions**, where anything that looks like a fraction
+ is translated into suitable HTML (instead of just a few special
+ cases like most smartypant processors). For example, `4/5`
+ becomes `4 ⁄5 `, which renders as
+ 4 ⁄5 .
+
+
+Other renderers
+---------------
+
+Blackfriday is structured to allow alternative rendering engines. Here
+are a few of note:
+
+* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
+ provides a GitHub Flavored Markdown renderer with fenced code block
+ highlighting, clickable heading anchor links.
+
+ It's not customizable, and its goal is to produce HTML output
+ equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
+ except the rendering is performed locally.
+
+* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
+ but for markdown.
+
+* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex):
+ renders output as LaTeX.
+
+* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience
+ integration with the [Chroma](https://github.com/alecthomas/chroma) code
+ highlighting library. bfchroma is only compatible with v2 of Blackfriday and
+ provides a drop-in renderer ready to use with Blackfriday, as well as
+ options and means for further customization.
+
+
+TODO
+----
+
+* More unit testing
+* Improve Unicode support. It does not understand all Unicode
+ rules (about what constitutes a letter, a punctuation symbol,
+ etc.), so it may fail to detect word boundaries correctly in
+ some instances. It is safe on all UTF-8 input.
+
+
+License
+-------
+
+[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
+
+
+ [1]: https://daringfireball.net/projects/markdown/ "Markdown"
+ [2]: https://golang.org/ "Go Language"
+ [3]: https://github.com/vmg/sundown "Sundown"
+ [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func"
+ [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
+ [6]: https://labix.org/gopkg.in "gopkg.in"
+ [7]: https://github.com/golang/dep/ "dep"
+ [8]: https://github.com/Masterminds/glide "Glide"
+
+ [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master
+ [BuildURL]: https://travis-ci.org/russross/blackfriday
+ [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg
+ [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2
diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/block.go b/src/margo.sh/vendor/github.com/russross/blackfriday/block.go
new file mode 100644
index 00000000..45c21a6c
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/russross/blackfriday/block.go
@@ -0,0 +1,1474 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross .
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+// Functions to parse block-level elements.
+//
+
+package blackfriday
+
+import (
+ "bytes"
+ "strings"
+ "unicode"
+)
+
+// Parse block-level data.
+// Note: this function and many that it calls assume that
+// the input buffer ends with a newline.
+func (p *parser) block(out *bytes.Buffer, data []byte) {
+ if len(data) == 0 || data[len(data)-1] != '\n' {
+ panic("block input is missing terminating newline")
+ }
+
+ // this is called recursively: enforce a maximum depth
+ if p.nesting >= p.maxNesting {
+ return
+ }
+ p.nesting++
+
+ // parse out one block-level construct at a time
+ for len(data) > 0 {
+ // prefixed header:
+ //
+ // # Header 1
+ // ## Header 2
+ // ...
+ // ###### Header 6
+ if p.isPrefixHeader(data) {
+ data = data[p.prefixHeader(out, data):]
+ continue
+ }
+
+ // block of preformatted HTML:
+ //
+ //
+ // ...
+ //
+ if data[0] == '<' {
+ if i := p.html(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // title block
+ //
+ // % stuff
+ // % more stuff
+ // % even more stuff
+ if p.flags&EXTENSION_TITLEBLOCK != 0 {
+ if data[0] == '%' {
+ if i := p.titleBlock(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+ }
+
+ // blank lines. note: returns the # of bytes to skip
+ if i := p.isEmpty(data); i > 0 {
+ data = data[i:]
+ continue
+ }
+
+ // indented code block:
+ //
+ // func max(a, b int) int {
+ // if a > b {
+ // return a
+ // }
+ // return b
+ // }
+ if p.codePrefix(data) > 0 {
+ data = data[p.code(out, data):]
+ continue
+ }
+
+ // fenced code block:
+ //
+ // ``` go info string here
+ // func fact(n int) int {
+ // if n <= 1 {
+ // return n
+ // }
+ // return n * fact(n-1)
+ // }
+ // ```
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if i := p.fencedCodeBlock(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // horizontal rule:
+ //
+ // ------
+ // or
+ // ******
+ // or
+ // ______
+ if p.isHRule(data) {
+ p.r.HRule(out)
+ var i int
+ for i = 0; data[i] != '\n'; i++ {
+ }
+ data = data[i:]
+ continue
+ }
+
+ // block quote:
+ //
+ // > A big quote I found somewhere
+ // > on the web
+ if p.quotePrefix(data) > 0 {
+ data = data[p.quote(out, data):]
+ continue
+ }
+
+ // table:
+ //
+ // Name | Age | Phone
+ // ------|-----|---------
+ // Bob | 31 | 555-1234
+ // Alice | 27 | 555-4321
+ if p.flags&EXTENSION_TABLES != 0 {
+ if i := p.table(out, data); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // an itemized/unordered list:
+ //
+ // * Item 1
+ // * Item 2
+ //
+ // also works with + or -
+ if p.uliPrefix(data) > 0 {
+ data = data[p.list(out, data, 0):]
+ continue
+ }
+
+ // a numbered/ordered list:
+ //
+ // 1. Item 1
+ // 2. Item 2
+ if p.oliPrefix(data) > 0 {
+ data = data[p.list(out, data, LIST_TYPE_ORDERED):]
+ continue
+ }
+
+ // definition lists:
+ //
+ // Term 1
+ // : Definition a
+ // : Definition b
+ //
+ // Term 2
+ // : Definition c
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if p.dliPrefix(data) > 0 {
+ data = data[p.list(out, data, LIST_TYPE_DEFINITION):]
+ continue
+ }
+ }
+
+ // anything else must look like a normal paragraph
+ // note: this finds underlined headers, too
+ data = data[p.paragraph(out, data):]
+ }
+
+ p.nesting--
+}
+
+func (p *parser) isPrefixHeader(data []byte) bool {
+ if data[0] != '#' {
+ return false
+ }
+
+ if p.flags&EXTENSION_SPACE_HEADERS != 0 {
+ level := 0
+ for level < 6 && data[level] == '#' {
+ level++
+ }
+ if data[level] != ' ' {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int {
+ level := 0
+ for level < 6 && data[level] == '#' {
+ level++
+ }
+ i := skipChar(data, level, ' ')
+ end := skipUntilChar(data, i, '\n')
+ skip := end
+ id := ""
+ if p.flags&EXTENSION_HEADER_IDS != 0 {
+ j, k := 0, 0
+ // find start/end of header id
+ for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
+ }
+ for k = j + 1; k < end && data[k] != '}'; k++ {
+ }
+ // extract header id iff found
+ if j < end && k < end {
+ id = string(data[j+2 : k])
+ end = j
+ skip = k + 1
+ for end > 0 && data[end-1] == ' ' {
+ end--
+ }
+ }
+ }
+ for end > 0 && data[end-1] == '#' {
+ if isBackslashEscaped(data, end-1) {
+ break
+ }
+ end--
+ }
+ for end > 0 && data[end-1] == ' ' {
+ end--
+ }
+ if end > i {
+ if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+ id = SanitizedAnchorName(string(data[i:end]))
+ }
+ work := func() bool {
+ p.inline(out, data[i:end])
+ return true
+ }
+ p.r.Header(out, work, level, id)
+ }
+ return skip
+}
+
+func (p *parser) isUnderlinedHeader(data []byte) int {
+ // test of level 1 header
+ if data[0] == '=' {
+ i := skipChar(data, 1, '=')
+ i = skipChar(data, i, ' ')
+ if data[i] == '\n' {
+ return 1
+ } else {
+ return 0
+ }
+ }
+
+ // test of level 2 header
+ if data[0] == '-' {
+ i := skipChar(data, 1, '-')
+ i = skipChar(data, i, ' ')
+ if data[i] == '\n' {
+ return 2
+ } else {
+ return 0
+ }
+ }
+
+ return 0
+}
+
+func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int {
+ if data[0] != '%' {
+ return 0
+ }
+ splitData := bytes.Split(data, []byte("\n"))
+ var i int
+ for idx, b := range splitData {
+ if !bytes.HasPrefix(b, []byte("%")) {
+ i = idx // - 1
+ break
+ }
+ }
+
+ data = bytes.Join(splitData[0:i], []byte("\n"))
+ p.r.TitleBlock(out, data)
+
+ return len(data)
+}
+
+func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int {
+ var i, j int
+
+ // identify the opening tag
+ if data[0] != '<' {
+ return 0
+ }
+ curtag, tagfound := p.htmlFindTag(data[1:])
+
+ // handle special cases
+ if !tagfound {
+ // check for an HTML comment
+ if size := p.htmlComment(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // check for an tag
+ if size := p.htmlHr(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // check for HTML CDATA
+ if size := p.htmlCDATA(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // no special case recognized
+ return 0
+ }
+
+ // look for an unindented matching closing tag
+ // followed by a blank line
+ found := false
+ /*
+ closetag := []byte("\n" + curtag + ">")
+ j = len(curtag) + 1
+ for !found {
+ // scan for a closing tag at the beginning of a line
+ if skip := bytes.Index(data[j:], closetag); skip >= 0 {
+ j += skip + len(closetag)
+ } else {
+ break
+ }
+
+ // see if it is the only thing on the line
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ // see if it is followed by a blank line/eof
+ j += skip
+ if j >= len(data) {
+ found = true
+ i = j
+ } else {
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ j += skip
+ found = true
+ i = j
+ }
+ }
+ }
+ }
+ */
+
+ // if not found, try a second pass looking for indented match
+ // but not if tag is "ins" or "del" (following original Markdown.pl)
+ if !found && curtag != "ins" && curtag != "del" {
+ i = 1
+ for i < len(data) {
+ i++
+ for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
+ i++
+ }
+
+ if i+2+len(curtag) >= len(data) {
+ break
+ }
+
+ j = p.htmlFindEnd(curtag, data[i-1:])
+
+ if j > 0 {
+ i += j - 1
+ found = true
+ break
+ }
+ }
+ }
+
+ if !found {
+ return 0
+ }
+
+ // the end of the block has been found
+ if doRender {
+ // trim newlines
+ end := i
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+
+ return i
+}
+
+func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int {
+ // html block needs to end with a blank line
+ if i := p.isEmpty(data[start:]); i > 0 {
+ size := start + i
+ if doRender {
+ // trim trailing newlines
+ end := size
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+ return size
+ }
+ return 0
+}
+
+// HTML comment, lax form
+func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
+ i := p.inlineHTMLComment(out, data)
+ return p.renderHTMLBlock(out, data, i, doRender)
+}
+
+// HTML CDATA section
+func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int {
+ const cdataTag = "') {
+ i++
+ }
+ i++
+ // no end-of-comment marker
+ if i >= len(data) {
+ return 0
+ }
+ return p.renderHTMLBlock(out, data, i, doRender)
+}
+
+// HR, which is the only self-closing block tag considered
+func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
+ if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
+ return 0
+ }
+ if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
+ // not an tag after all; at least not a valid one
+ return 0
+ }
+
+ i := 3
+ for data[i] != '>' && data[i] != '\n' {
+ i++
+ }
+
+ if data[i] == '>' {
+ return p.renderHTMLBlock(out, data, i+1, doRender)
+ }
+
+ return 0
+}
+
+func (p *parser) htmlFindTag(data []byte) (string, bool) {
+ i := 0
+ for isalnum(data[i]) {
+ i++
+ }
+ key := string(data[:i])
+ if _, ok := blockTags[key]; ok {
+ return key, true
+ }
+ return "", false
+}
+
+func (p *parser) htmlFindEnd(tag string, data []byte) int {
+ // assume data[0] == '<' && data[1] == '/' already tested
+
+ // check if tag is a match
+ closetag := []byte("" + tag + ">")
+ if !bytes.HasPrefix(data, closetag) {
+ return 0
+ }
+ i := len(closetag)
+
+ // check that the rest of the line is blank
+ skip := 0
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ return 0
+ }
+ i += skip
+ skip = 0
+
+ if i >= len(data) {
+ return i
+ }
+
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ return i
+ }
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ // following line must be blank
+ return 0
+ }
+
+ return i + skip
+}
+
+func (*parser) isEmpty(data []byte) int {
+ // it is okay to call isEmpty on an empty buffer
+ if len(data) == 0 {
+ return 0
+ }
+
+ var i int
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
+ if data[i] != ' ' && data[i] != '\t' {
+ return 0
+ }
+ }
+ return i + 1
+}
+
+func (*parser) isHRule(data []byte) bool {
+ i := 0
+
+ // skip up to three spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // look at the hrule char
+ if data[i] != '*' && data[i] != '-' && data[i] != '_' {
+ return false
+ }
+ c := data[i]
+
+ // the whole line must be the char or whitespace
+ n := 0
+ for data[i] != '\n' {
+ switch {
+ case data[i] == c:
+ n++
+ case data[i] != ' ':
+ return false
+ }
+ i++
+ }
+
+ return n >= 3
+}
+
+// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
+// and returns the end index if so, or 0 otherwise. It also returns the marker found.
+// If syntax is not nil, it gets set to the syntax specified in the fence line.
+// A final newline is mandatory to recognize the fence line, unless newlineOptional is true.
+func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) {
+ i, size := 0, 0
+
+ // skip up to three spaces
+ for i < len(data) && i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // check for the marker characters: ~ or `
+ if i >= len(data) {
+ return 0, ""
+ }
+ if data[i] != '~' && data[i] != '`' {
+ return 0, ""
+ }
+
+ c := data[i]
+
+ // the whole line must be the same char or whitespace
+ for i < len(data) && data[i] == c {
+ size++
+ i++
+ }
+
+ // the marker char must occur at least 3 times
+ if size < 3 {
+ return 0, ""
+ }
+ marker = string(data[i-size : i])
+
+ // if this is the end marker, it must match the beginning marker
+ if oldmarker != "" && marker != oldmarker {
+ return 0, ""
+ }
+
+ // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
+ // into one, always get the info string, and discard it if the caller doesn't care.
+ if info != nil {
+ infoLength := 0
+ i = skipChar(data, i, ' ')
+
+ if i >= len(data) {
+ if newlineOptional && i == len(data) {
+ return i, marker
+ }
+ return 0, ""
+ }
+
+ infoStart := i
+
+ if data[i] == '{' {
+ i++
+ infoStart++
+
+ for i < len(data) && data[i] != '}' && data[i] != '\n' {
+ infoLength++
+ i++
+ }
+
+ if i >= len(data) || data[i] != '}' {
+ return 0, ""
+ }
+
+ // strip all whitespace at the beginning and the end
+ // of the {} block
+ for infoLength > 0 && isspace(data[infoStart]) {
+ infoStart++
+ infoLength--
+ }
+
+ for infoLength > 0 && isspace(data[infoStart+infoLength-1]) {
+ infoLength--
+ }
+
+ i++
+ } else {
+ for i < len(data) && !isverticalspace(data[i]) {
+ infoLength++
+ i++
+ }
+ }
+
+ *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength]))
+ }
+
+ i = skipChar(data, i, ' ')
+ if i >= len(data) || data[i] != '\n' {
+ if newlineOptional && i == len(data) {
+ return i, marker
+ }
+ return 0, ""
+ }
+
+ return i + 1, marker // Take newline into account.
+}
+
+// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
+// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
+// If doRender is true, a final newline is mandatory to recognize the fenced code block.
+func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int {
+ var infoString string
+ beg, marker := isFenceLine(data, &infoString, "", false)
+ if beg == 0 || beg >= len(data) {
+ return 0
+ }
+
+ var work bytes.Buffer
+
+ for {
+ // safe to assume beg < len(data)
+
+ // check for the end of the code block
+ newlineOptional := !doRender
+ fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional)
+ if fenceEnd != 0 {
+ beg += fenceEnd
+ break
+ }
+
+ // copy the current line
+ end := skipUntilChar(data, beg, '\n') + 1
+
+ // did we reach the end of the buffer without a closing marker?
+ if end >= len(data) {
+ return 0
+ }
+
+ // verbatim copy to the working buffer
+ if doRender {
+ work.Write(data[beg:end])
+ }
+ beg = end
+ }
+
+ if doRender {
+ p.r.BlockCode(out, work.Bytes(), infoString)
+ }
+
+ return beg
+}
+
+func (p *parser) table(out *bytes.Buffer, data []byte) int {
+ var header bytes.Buffer
+ i, columns := p.tableHeader(&header, data)
+ if i == 0 {
+ return 0
+ }
+
+ var body bytes.Buffer
+
+ for i < len(data) {
+ pipes, rowStart := 0, i
+ for ; data[i] != '\n'; i++ {
+ if data[i] == '|' {
+ pipes++
+ }
+ }
+
+ if pipes == 0 {
+ i = rowStart
+ break
+ }
+
+ // include the newline in data sent to tableRow
+ i++
+ p.tableRow(&body, data[rowStart:i], columns, false)
+ }
+
+ p.r.Table(out, header.Bytes(), body.Bytes(), columns)
+
+ return i
+}
+
+// check if the specified position is preceded by an odd number of backslashes
+func isBackslashEscaped(data []byte, i int) bool {
+ backslashes := 0
+ for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
+ backslashes++
+ }
+ return backslashes&1 == 1
+}
+
+func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) {
+ i := 0
+ colCount := 1
+ for i = 0; data[i] != '\n'; i++ {
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ colCount++
+ }
+ }
+
+ // doesn't look like a table header
+ if colCount == 1 {
+ return
+ }
+
+ // include the newline in the data sent to tableRow
+ header := data[:i+1]
+
+ // column count ignores pipes at beginning or end of line
+ if data[0] == '|' {
+ colCount--
+ }
+ if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
+ colCount--
+ }
+
+ columns = make([]int, colCount)
+
+ // move on to the header underline
+ i++
+ if i >= len(data) {
+ return
+ }
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+ i = skipChar(data, i, ' ')
+
+ // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
+ // and trailing | optional on last column
+ col := 0
+ for data[i] != '\n' {
+ dashes := 0
+
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_LEFT
+ dashes++
+ }
+ for data[i] == '-' {
+ i++
+ dashes++
+ }
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_RIGHT
+ dashes++
+ }
+ for data[i] == ' ' {
+ i++
+ }
+
+ // end of column test is messy
+ switch {
+ case dashes < 3:
+ // not a valid column
+ return
+
+ case data[i] == '|' && !isBackslashEscaped(data, i):
+ // marker found, now skip past trailing whitespace
+ col++
+ i++
+ for data[i] == ' ' {
+ i++
+ }
+
+ // trailing junk found after last column
+ if col >= colCount && data[i] != '\n' {
+ return
+ }
+
+ case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
+ // something else found where marker was required
+ return
+
+ case data[i] == '\n':
+ // marker is optional for the last column
+ col++
+
+ default:
+ // trailing junk found after last column
+ return
+ }
+ }
+ if col != colCount {
+ return
+ }
+
+ p.tableRow(out, header, columns, true)
+ size = i + 1
+ return
+}
+
+func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) {
+ i, col := 0, 0
+ var rowWork bytes.Buffer
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+
+ for col = 0; col < len(columns) && i < len(data); col++ {
+ for data[i] == ' ' {
+ i++
+ }
+
+ cellStart := i
+
+ for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
+ i++
+ }
+
+ cellEnd := i
+
+ // skip the end-of-cell marker, possibly taking us past end of buffer
+ i++
+
+ for cellEnd > cellStart && data[cellEnd-1] == ' ' {
+ cellEnd--
+ }
+
+ var cellWork bytes.Buffer
+ p.inline(&cellWork, data[cellStart:cellEnd])
+
+ if header {
+ p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col])
+ } else {
+ p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col])
+ }
+ }
+
+ // pad it out with empty columns to get the right number
+ for ; col < len(columns); col++ {
+ if header {
+ p.r.TableHeaderCell(&rowWork, nil, columns[col])
+ } else {
+ p.r.TableCell(&rowWork, nil, columns[col])
+ }
+ }
+
+ // silently ignore rows with too many cells
+
+ p.r.TableRow(out, rowWork.Bytes())
+}
+
+// returns blockquote prefix length
+func (p *parser) quotePrefix(data []byte) int {
+ i := 0
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+ if data[i] == '>' {
+ if data[i+1] == ' ' {
+ return i + 2
+ }
+ return i + 1
+ }
+ return 0
+}
+
+// blockquote ends with at least one blank line
+// followed by something without a blockquote prefix
+func (p *parser) terminateBlockquote(data []byte, beg, end int) bool {
+ if p.isEmpty(data[beg:]) <= 0 {
+ return false
+ }
+ if end >= len(data) {
+ return true
+ }
+ return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
+}
+
+// parse a blockquote fragment
+func (p *parser) quote(out *bytes.Buffer, data []byte) int {
+ var raw bytes.Buffer
+ beg, end := 0, 0
+ for beg < len(data) {
+ end = beg
+ // Step over whole lines, collecting them. While doing that, check for
+ // fenced code and if one's found, incorporate it altogether,
+ // irregardless of any contents inside it
+ for data[end] != '\n' {
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if i := p.fencedCodeBlock(out, data[end:], false); i > 0 {
+ // -1 to compensate for the extra end++ after the loop:
+ end += i - 1
+ break
+ }
+ }
+ end++
+ }
+ end++
+
+ if pre := p.quotePrefix(data[beg:]); pre > 0 {
+ // skip the prefix
+ beg += pre
+ } else if p.terminateBlockquote(data, beg, end) {
+ break
+ }
+
+ // this line is part of the blockquote
+ raw.Write(data[beg:end])
+ beg = end
+ }
+
+ var cooked bytes.Buffer
+ p.block(&cooked, raw.Bytes())
+ p.r.BlockQuote(out, cooked.Bytes())
+ return end
+}
+
+// returns prefix length for block code
+func (p *parser) codePrefix(data []byte) int {
+ if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
+ return 4
+ }
+ return 0
+}
+
+func (p *parser) code(out *bytes.Buffer, data []byte) int {
+ var work bytes.Buffer
+
+ i := 0
+ for i < len(data) {
+ beg := i
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+
+ blankline := p.isEmpty(data[beg:i]) > 0
+ if pre := p.codePrefix(data[beg:i]); pre > 0 {
+ beg += pre
+ } else if !blankline {
+ // non-empty, non-prefixed line breaks the pre
+ i = beg
+ break
+ }
+
+ // verbatim copy to the working buffeu
+ if blankline {
+ work.WriteByte('\n')
+ } else {
+ work.Write(data[beg:i])
+ }
+ }
+
+ // trim all the \n off the end of work
+ workbytes := work.Bytes()
+ eol := len(workbytes)
+ for eol > 0 && workbytes[eol-1] == '\n' {
+ eol--
+ }
+ if eol != len(workbytes) {
+ work.Truncate(eol)
+ }
+
+ work.WriteByte('\n')
+
+ p.r.BlockCode(out, work.Bytes(), "")
+
+ return i
+}
+
+// returns unordered list item prefix
+func (p *parser) uliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // need a *, +, or - followed by a space
+ if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
+ data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns ordered list item prefix
+func (p *parser) oliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // count the digits
+ start := i
+ for data[i] >= '0' && data[i] <= '9' {
+ i++
+ }
+
+ // we need >= 1 digits followed by a dot and a space
+ if start == i || data[i] != '.' || data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns definition list item prefix
+func (p *parser) dliPrefix(data []byte) int {
+ i := 0
+
+ // need a : followed by a spaces
+ if data[i] != ':' || data[i+1] != ' ' {
+ return 0
+ }
+ for data[i] == ' ' {
+ i++
+ }
+ return i + 2
+}
+
+// parse ordered or unordered list block
+func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int {
+ i := 0
+ flags |= LIST_ITEM_BEGINNING_OF_LIST
+ work := func() bool {
+ for i < len(data) {
+ skip := p.listItem(out, data[i:], &flags)
+ i += skip
+
+ if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {
+ break
+ }
+ flags &= ^LIST_ITEM_BEGINNING_OF_LIST
+ }
+ return true
+ }
+
+ p.r.List(out, work, flags)
+ return i
+}
+
+// Parse a single list item.
+// Assumes initial prefix is already removed if this is a sublist.
+func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
+ // keep track of the indentation of the first line
+ itemIndent := 0
+ for itemIndent < 3 && data[itemIndent] == ' ' {
+ itemIndent++
+ }
+
+ i := p.uliPrefix(data)
+ if i == 0 {
+ i = p.oliPrefix(data)
+ }
+ if i == 0 {
+ i = p.dliPrefix(data)
+ // reset definition term flag
+ if i > 0 {
+ *flags &= ^LIST_TYPE_TERM
+ }
+ }
+ if i == 0 {
+ // if in defnition list, set term flag and continue
+ if *flags&LIST_TYPE_DEFINITION != 0 {
+ *flags |= LIST_TYPE_TERM
+ } else {
+ return 0
+ }
+ }
+
+ // skip leading whitespace on first line
+ for data[i] == ' ' {
+ i++
+ }
+
+ // find the end of the line
+ line := i
+ for i > 0 && data[i-1] != '\n' {
+ i++
+ }
+
+ // get working buffer
+ var raw bytes.Buffer
+
+ // put the first line into the working buffer
+ raw.Write(data[line:i])
+ line = i
+
+ // process the following lines
+ containsBlankLine := false
+ sublist := 0
+ codeBlockMarker := ""
+
+gatherlines:
+ for line < len(data) {
+ i++
+
+ // find the end of this line
+ for data[i-1] != '\n' {
+ i++
+ }
+
+ // if it is an empty line, guess that it is part of this item
+ // and move on to the next line
+ if p.isEmpty(data[line:i]) > 0 {
+ containsBlankLine = true
+ raw.Write(data[line:i])
+ line = i
+ continue
+ }
+
+ // calculate the indentation
+ indent := 0
+ for indent < 4 && line+indent < i && data[line+indent] == ' ' {
+ indent++
+ }
+
+ chunk := data[line+indent : i]
+
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ // determine if in or out of codeblock
+ // if in codeblock, ignore normal list processing
+ _, marker := isFenceLine(chunk, nil, codeBlockMarker, false)
+ if marker != "" {
+ if codeBlockMarker == "" {
+ // start of codeblock
+ codeBlockMarker = marker
+ } else {
+ // end of codeblock.
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ codeBlockMarker = ""
+ }
+ }
+ // we are in a codeblock, write line, and continue
+ if codeBlockMarker != "" || marker != "" {
+ raw.Write(data[line+indent : i])
+ line = i
+ continue gatherlines
+ }
+ }
+
+ // evaluate how this line fits in
+ switch {
+ // is this a nested list item?
+ case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
+ p.oliPrefix(chunk) > 0 ||
+ p.dliPrefix(chunk) > 0:
+
+ if containsBlankLine {
+ // end the list if the type changed after a blank line
+ if indent <= itemIndent &&
+ ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) ||
+ (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) {
+
+ *flags |= LIST_ITEM_END_OF_LIST
+ break gatherlines
+ }
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ // to be a nested list, it must be indented more
+ // if not, it is the next item in the same list
+ if indent <= itemIndent {
+ break gatherlines
+ }
+
+ // is this the first item in the nested list?
+ if sublist == 0 {
+ sublist = raw.Len()
+ }
+
+ // is this a nested prefix header?
+ case p.isPrefixHeader(chunk):
+ // if the header is not indented, it is not nested in the list
+ // and thus ends the list
+ if containsBlankLine && indent < 4 {
+ *flags |= LIST_ITEM_END_OF_LIST
+ break gatherlines
+ }
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+
+ // anything following an empty line is only part
+ // of this item if it is indented 4 spaces
+ // (regardless of the indentation of the beginning of the item)
+ case containsBlankLine && indent < 4:
+ if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 {
+ // is the next item still a part of this list?
+ next := i
+ for data[next] != '\n' {
+ next++
+ }
+ for next < len(data)-1 && data[next] == '\n' {
+ next++
+ }
+ if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ } else {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ break gatherlines
+
+ // a blank line means this should be parsed as a block
+ case containsBlankLine:
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ containsBlankLine = false
+
+ // add the line into the working buffer without prefix
+ raw.Write(data[line+indent : i])
+
+ line = i
+ }
+
+ // If reached end of data, the Renderer.ListItem call we're going to make below
+ // is definitely the last in the list.
+ if line >= len(data) {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+
+ rawBytes := raw.Bytes()
+
+ // render the contents of the list item
+ var cooked bytes.Buffer
+ if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 {
+ // intermediate render of block item, except for definition term
+ if sublist > 0 {
+ p.block(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.block(&cooked, rawBytes)
+ }
+ } else {
+ // intermediate render of inline item
+ if sublist > 0 {
+ p.inline(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.inline(&cooked, rawBytes)
+ }
+ }
+
+ // render the actual list item
+ cookedBytes := cooked.Bytes()
+ parsedEnd := len(cookedBytes)
+
+ // strip trailing newlines
+ for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' {
+ parsedEnd--
+ }
+ p.r.ListItem(out, cookedBytes[:parsedEnd], *flags)
+
+ return line
+}
+
+// render a single paragraph that has already been parsed out
+func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) {
+ if len(data) == 0 {
+ return
+ }
+
+ // trim leading spaces
+ beg := 0
+ for data[beg] == ' ' {
+ beg++
+ }
+
+ // trim trailing newline
+ end := len(data) - 1
+
+ // trim trailing spaces
+ for end > beg && data[end-1] == ' ' {
+ end--
+ }
+
+ work := func() bool {
+ p.inline(out, data[beg:end])
+ return true
+ }
+ p.r.Paragraph(out, work)
+}
+
+func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
+ // prev: index of 1st char of previous line
+ // line: index of 1st char of current line
+ // i: index of cursor/end of current line
+ var prev, line, i int
+
+ // keep going until we find something to mark the end of the paragraph
+ for i < len(data) {
+ // mark the beginning of the current line
+ prev = line
+ current := data[i:]
+ line = i
+
+ // did we find a blank line marking the end of the paragraph?
+ if n := p.isEmpty(current); n > 0 {
+ // did this blank line followed by a definition list item?
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if i < len(data)-1 && data[i+1] == ':' {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i + n
+ }
+
+ // an underline under some text marks a header, so our paragraph ended on prev line
+ if i > 0 {
+ if level := p.isUnderlinedHeader(current); level > 0 {
+ // render the paragraph
+ p.renderParagraph(out, data[:prev])
+
+ // ignore leading and trailing whitespace
+ eol := i - 1
+ for prev < eol && data[prev] == ' ' {
+ prev++
+ }
+ for eol > prev && data[eol-1] == ' ' {
+ eol--
+ }
+
+ // render the header
+ // this ugly double closure avoids forcing variables onto the heap
+ work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool {
+ return func() bool {
+ pp.inline(o, d)
+ return true
+ }
+ }(out, p, data[prev:eol])
+
+ id := ""
+ if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+ id = SanitizedAnchorName(string(data[prev:eol]))
+ }
+
+ p.r.Header(out, work, level, id)
+
+ // find the end of the underline
+ for data[i] != '\n' {
+ i++
+ }
+ return i
+ }
+ }
+
+ // if the next line starts a block of HTML, then the paragraph ends here
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ if data[i] == '<' && p.html(out, current, false) > 0 {
+ // rewind to before the HTML block
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a prefixed header or a horizontal rule after this, paragraph is over
+ if p.isPrefixHeader(current) || p.isHRule(current) {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+
+ // if there's a fenced code block, paragraph is over
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if p.fencedCodeBlock(out, current, false) > 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a definition list item, prev line is a definition term
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if p.dliPrefix(current) != 0 {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ // if there's a list after this, paragraph is over
+ if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 {
+ if p.uliPrefix(current) != 0 ||
+ p.oliPrefix(current) != 0 ||
+ p.quotePrefix(current) != 0 ||
+ p.codePrefix(current) != 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // otherwise, scan to the beginning of the next line
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i
+}
+
+// SanitizedAnchorName returns a sanitized anchor name for the given text.
+//
+// It implements the algorithm specified in the package comment.
+func SanitizedAnchorName(text string) string {
+ var anchorName []rune
+ futureDash := false
+ for _, r := range text {
+ switch {
+ case unicode.IsLetter(r) || unicode.IsNumber(r):
+ if futureDash && len(anchorName) > 0 {
+ anchorName = append(anchorName, '-')
+ }
+ futureDash = false
+ anchorName = append(anchorName, unicode.ToLower(r))
+ default:
+ futureDash = true
+ }
+ }
+ return string(anchorName)
+}
diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/doc.go b/src/margo.sh/vendor/github.com/russross/blackfriday/doc.go
new file mode 100644
index 00000000..9656c42a
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/russross/blackfriday/doc.go
@@ -0,0 +1,32 @@
+// Package blackfriday is a Markdown processor.
+//
+// It translates plain text with simple formatting rules into HTML or LaTeX.
+//
+// Sanitized Anchor Names
+//
+// Blackfriday includes an algorithm for creating sanitized anchor names
+// corresponding to a given input text. This algorithm is used to create
+// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The
+// algorithm is specified below, so that other packages can create
+// compatible anchor names and links to those anchors.
+//
+// The algorithm iterates over the input text, interpreted as UTF-8,
+// one Unicode code point (rune) at a time. All runes that are letters (category L)
+// or numbers (category N) are considered valid characters. They are mapped to
+// lower case, and included in the output. All other runes are considered
+// invalid characters. Invalid characters that preceed the first valid character,
+// as well as invalid character that follow the last valid character
+// are dropped completely. All other sequences of invalid characters
+// between two valid characters are replaced with a single dash character '-'.
+//
+// SanitizedAnchorName exposes this functionality, and can be used to
+// create compatible links to the anchor names generated by blackfriday.
+// This algorithm is also implemented in a small standalone package at
+// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients
+// that want a small package and don't need full functionality of blackfriday.
+package blackfriday
+
+// NOTE: Keep Sanitized Anchor Name algorithm in sync with package
+// github.com/shurcooL/sanitized_anchor_name.
+// Otherwise, users of sanitized_anchor_name will get anchor names
+// that are incompatible with those generated by blackfriday.
diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/go.mod b/src/margo.sh/vendor/github.com/russross/blackfriday/go.mod
new file mode 100644
index 00000000..b05561a0
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/russross/blackfriday/go.mod
@@ -0,0 +1 @@
+module github.com/russross/blackfriday
diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/html.go b/src/margo.sh/vendor/github.com/russross/blackfriday/html.go
new file mode 100644
index 00000000..e0a6c69c
--- /dev/null
+++ b/src/margo.sh/vendor/github.com/russross/blackfriday/html.go
@@ -0,0 +1,938 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross .
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// HTML rendering backend
+//
+//
+
+package blackfriday
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// Html renderer configuration options.
+const (
+ HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks
+ HTML_SKIP_STYLE // skip embedded