From ffffb9bf24a1ed765c5e91db1f593c02b30b894e Mon Sep 17 00:00:00 2001 From: Shammi Shailaj Date: Sat, 28 Nov 2020 22:59:51 +0530 Subject: [PATCH] moving to go modules and vendoring dependencies --- go.mod | 20 + go.sum | 43 + vendor/github.com/fragmenta/assets/.gitignore | 24 + vendor/github.com/fragmenta/assets/LICENSE | 22 + vendor/github.com/fragmenta/assets/README.md | 24 + vendor/github.com/fragmenta/assets/assets.go | 245 + vendor/github.com/fragmenta/assets/file.go | 114 + vendor/github.com/fragmenta/assets/group.go | 215 + vendor/github.com/fragmenta/assets/helpers.go | 78 + .../assets/internal/cssmin/cssmin.go | 143 + .../fragmenta/assets/internal/jsmin/jsmin.go | 352 + vendor/github.com/fragmenta/auth/LICENSE | 22 + vendor/github.com/fragmenta/auth/README.md | 93 + vendor/github.com/fragmenta/auth/auth.go | 111 + .../github.com/fragmenta/auth/can/ability.go | 95 + vendor/github.com/fragmenta/auth/can/can.go | 88 + .../github.com/fragmenta/auth/deprecated.go | 41 + vendor/github.com/fragmenta/auth/encode.go | 90 + vendor/github.com/fragmenta/auth/encrypt.go | 73 + vendor/github.com/fragmenta/auth/random.go | 29 + vendor/github.com/fragmenta/auth/session.go | 276 + vendor/github.com/fragmenta/mux/LICENSE | 21 + vendor/github.com/fragmenta/mux/README.md | 121 + vendor/github.com/fragmenta/mux/handlers.go | 33 + vendor/github.com/fragmenta/mux/mux.go | 226 + vendor/github.com/fragmenta/mux/params.go | 386 + vendor/github.com/fragmenta/mux/route.go | 272 + vendor/github.com/fragmenta/query/LICENSE | 22 + vendor/github.com/fragmenta/query/README.md | 116 + .../fragmenta/query/adapters/database.go | 152 + .../query/adapters/database_mysql.go | 131 + .../fragmenta/query/adapters/database_psql.go | 124 + .../query/adapters/database_sqlite.go | 101 + vendor/github.com/fragmenta/query/database.go | 84 + vendor/github.com/fragmenta/query/query.go | 798 ++ vendor/github.com/fragmenta/query/textual.go | 134 + vendor/github.com/fragmenta/server/LICENSE | 22 + vendor/github.com/fragmenta/server/README.md | 84 + .../fragmenta/server/config/config.go | 140 + .../github.com/fragmenta/server/deprecated.go | 162 + vendor/github.com/fragmenta/server/errors.go | 97 + vendor/github.com/fragmenta/server/headers.go | 28 + .../fragmenta/server/log/default.go | 141 + .../fragmenta/server/log/deprecated.go | 85 + .../github.com/fragmenta/server/log/file.go | 46 + vendor/github.com/fragmenta/server/log/log.go | 131 + .../fragmenta/server/log/middleware.go | 91 + .../github.com/fragmenta/server/redirects.go | 40 + vendor/github.com/fragmenta/server/server.go | 212 + vendor/github.com/fragmenta/view/LICENSE | 22 + vendor/github.com/fragmenta/view/README.md | 31 + .../github.com/fragmenta/view/deprecated.go | 49 + .../fragmenta/view/helpers/forms.go | 297 + .../fragmenta/view/helpers/helpers.go | 210 + .../github.com/fragmenta/view/helpers/html.go | 77 + .../fragmenta/view/helpers/maths.go | 142 + .../fragmenta/view/parser/parser.go | 17 + .../fragmenta/view/parser/scanner.go | 219 + .../fragmenta/view/parser/template.go | 175 + .../fragmenta/view/parser/template.html.go | 108 + .../fragmenta/view/parser/template.json.go | 103 + .../fragmenta/view/parser/template.text.go | 95 + vendor/github.com/fragmenta/view/render.go | 384 + vendor/github.com/fragmenta/view/view.go | 151 + .../github.com/go-sql-driver/mysql/.gitignore | 9 + .../go-sql-driver/mysql/.travis.yml | 129 + vendor/github.com/go-sql-driver/mysql/AUTHORS | 105 + .../go-sql-driver/mysql/CHANGELOG.md | 206 + vendor/github.com/go-sql-driver/mysql/LICENSE | 373 + .../github.com/go-sql-driver/mysql/README.md | 501 ++ vendor/github.com/go-sql-driver/mysql/auth.go | 422 + .../github.com/go-sql-driver/mysql/buffer.go | 182 + .../go-sql-driver/mysql/collations.go | 265 + .../go-sql-driver/mysql/conncheck.go | 54 + .../go-sql-driver/mysql/conncheck_dummy.go | 17 + .../go-sql-driver/mysql/connection.go | 651 ++ .../go-sql-driver/mysql/connector.go | 146 + .../github.com/go-sql-driver/mysql/const.go | 174 + .../github.com/go-sql-driver/mysql/driver.go | 107 + vendor/github.com/go-sql-driver/mysql/dsn.go | 560 ++ .../github.com/go-sql-driver/mysql/errors.go | 65 + .../github.com/go-sql-driver/mysql/fields.go | 194 + vendor/github.com/go-sql-driver/mysql/go.mod | 3 + .../github.com/go-sql-driver/mysql/infile.go | 182 + .../go-sql-driver/mysql/nulltime.go | 50 + .../go-sql-driver/mysql/nulltime_go113.go | 31 + .../go-sql-driver/mysql/nulltime_legacy.go | 34 + .../github.com/go-sql-driver/mysql/packets.go | 1342 +++ .../github.com/go-sql-driver/mysql/result.go | 22 + vendor/github.com/go-sql-driver/mysql/rows.go | 223 + .../go-sql-driver/mysql/statement.go | 204 + .../go-sql-driver/mysql/transaction.go | 31 + .../github.com/go-sql-driver/mysql/utils.go | 701 ++ .../github.com/kennygrant/sanitize/.gitignore | 22 + .../kennygrant/sanitize/.travis.yml | 1 + vendor/github.com/kennygrant/sanitize/LICENSE | 27 + .../github.com/kennygrant/sanitize/README.md | 62 + .../kennygrant/sanitize/sanitize.go | 388 + vendor/github.com/lib/pq/.gitignore | 4 + vendor/github.com/lib/pq/.travis.sh | 73 + vendor/github.com/lib/pq/.travis.yml | 44 + vendor/github.com/lib/pq/LICENSE.md | 8 + vendor/github.com/lib/pq/README.md | 30 + vendor/github.com/lib/pq/TESTS.md | 33 + vendor/github.com/lib/pq/array.go | 756 ++ vendor/github.com/lib/pq/buf.go | 91 + vendor/github.com/lib/pq/conn.go | 1996 +++++ vendor/github.com/lib/pq/conn_go18.go | 149 + vendor/github.com/lib/pq/connector.go | 115 + vendor/github.com/lib/pq/copy.go | 307 + vendor/github.com/lib/pq/doc.go | 268 + vendor/github.com/lib/pq/encode.go | 622 ++ vendor/github.com/lib/pq/error.go | 515 ++ vendor/github.com/lib/pq/go.mod | 3 + vendor/github.com/lib/pq/krb.go | 27 + vendor/github.com/lib/pq/notice.go | 71 + vendor/github.com/lib/pq/notify.go | 858 ++ vendor/github.com/lib/pq/oid/doc.go | 6 + vendor/github.com/lib/pq/oid/types.go | 343 + vendor/github.com/lib/pq/rows.go | 93 + vendor/github.com/lib/pq/scram/scram.go | 264 + vendor/github.com/lib/pq/ssl.go | 175 + vendor/github.com/lib/pq/ssl_permissions.go | 20 + vendor/github.com/lib/pq/ssl_windows.go | 9 + vendor/github.com/lib/pq/url.go | 76 + vendor/github.com/lib/pq/user_posix.go | 24 + vendor/github.com/lib/pq/user_windows.go | 27 + vendor/github.com/lib/pq/uuid.go | 23 + vendor/github.com/sendgrid/rest/.env_sample | 1 + vendor/github.com/sendgrid/rest/.gitignore | 28 + vendor/github.com/sendgrid/rest/.travis.yml | 27 + vendor/github.com/sendgrid/rest/CHANGELOG.md | 105 + .../sendgrid/rest/CODE_OF_CONDUCT.md | 73 + .../github.com/sendgrid/rest/CONTRIBUTING.md | 169 + .../github.com/sendgrid/rest/FIRST_TIMERS.md | 79 + .../sendgrid/rest/ISSUE_TEMPLATE.md | 30 + vendor/github.com/sendgrid/rest/LICENSE.md | 21 + vendor/github.com/sendgrid/rest/Makefile | 7 + .../sendgrid/rest/PULL_REQUEST_TEMPLATE.md | 31 + vendor/github.com/sendgrid/rest/README.md | 199 + .../sendgrid/rest/TROUBLESHOOTING.md | 62 + vendor/github.com/sendgrid/rest/USAGE.md | 211 + .../sendgrid/rest/docker-compose.yml | 6 + vendor/github.com/sendgrid/rest/rest.go | 148 + .../sendgrid/rest/twilio_sendgrid_logo.png | Bin 0 -> 14596 bytes .../sendgrid/sendgrid-go/.env_sample | 1 + .../sendgrid/sendgrid-go/.gitignore | 8 + .../sendgrid/sendgrid-go/.travis.yml | 28 + .../sendgrid/sendgrid-go/CHANGELOG.md | 270 + .../sendgrid/sendgrid-go/CODE_OF_CONDUCT.md | 73 + .../sendgrid/sendgrid-go/CONTRIBUTING.md | 176 + .../sendgrid/sendgrid-go/Dockerfile | 17 + .../sendgrid/sendgrid-go/FIRST_TIMERS.md | 79 + .../sendgrid/sendgrid-go/ISSUE_TEMPLATE.md | 30 + .../github.com/sendgrid/sendgrid-go/LICENSE | 21 + .../github.com/sendgrid/sendgrid-go/Makefile | 15 + .../sendgrid-go/PULL_REQUEST_TEMPLATE.md | 31 + .../github.com/sendgrid/sendgrid-go/README.md | 254 + .../sendgrid/sendgrid-go/TROUBLESHOOTING.md | 110 + .../github.com/sendgrid/sendgrid-go/USAGE.md | 6661 ++++++++++++++ .../sendgrid/sendgrid-go/base_interface.go | 130 + .../sendgrid/sendgrid-go/go.coverage.sh | 12 + .../sendgrid-go/helpers/mail/README.md | 31 + .../sendgrid-go/helpers/mail/mail_v3.go | 702 ++ .../sendgrid/sendgrid-go/sendgrid.go | 57 + .../sendgrid/sendgrid-go/twilio_email.go | 41 + .../sendgrid-go/twilio_sendgrid_logo.png | Bin 0 -> 14596 bytes vendor/golang.org/x/crypto/AUTHORS | 3 + vendor/golang.org/x/crypto/CONTRIBUTORS | 3 + vendor/golang.org/x/crypto/LICENSE | 27 + vendor/golang.org/x/crypto/PATENTS | 22 + vendor/golang.org/x/crypto/acme/acme.go | 1098 +++ .../x/crypto/acme/autocert/autocert.go | 1249 +++ .../x/crypto/acme/autocert/cache.go | 136 + .../x/crypto/acme/autocert/listener.go | 155 + .../x/crypto/acme/autocert/renewal.go | 141 + vendor/golang.org/x/crypto/acme/http.go | 321 + vendor/golang.org/x/crypto/acme/jws.go | 187 + vendor/golang.org/x/crypto/acme/rfc8555.go | 392 + vendor/golang.org/x/crypto/acme/types.go | 560 ++ .../golang.org/x/crypto/acme/version_go112.go | 27 + vendor/golang.org/x/crypto/bcrypt/base64.go | 35 + vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 295 + vendor/golang.org/x/crypto/blowfish/block.go | 159 + vendor/golang.org/x/crypto/blowfish/cipher.go | 99 + vendor/golang.org/x/crypto/blowfish/const.go | 199 + vendor/golang.org/x/net/AUTHORS | 3 + vendor/golang.org/x/net/CONTRIBUTORS | 3 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/html/atom/atom.go | 78 + vendor/golang.org/x/net/html/atom/table.go | 783 ++ vendor/golang.org/x/net/html/const.go | 111 + vendor/golang.org/x/net/html/doc.go | 106 + vendor/golang.org/x/net/html/doctype.go | 156 + vendor/golang.org/x/net/html/entity.go | 2253 +++++ vendor/golang.org/x/net/html/escape.go | 258 + vendor/golang.org/x/net/html/foreign.go | 222 + vendor/golang.org/x/net/html/node.go | 225 + vendor/golang.org/x/net/html/parse.go | 2438 ++++++ vendor/golang.org/x/net/html/render.go | 273 + vendor/golang.org/x/net/html/token.go | 1224 +++ vendor/golang.org/x/net/idna/idna10.0.0.go | 734 ++ vendor/golang.org/x/net/idna/idna9.0.0.go | 682 ++ vendor/golang.org/x/net/idna/punycode.go | 203 + vendor/golang.org/x/net/idna/tables10.0.0.go | 4559 ++++++++++ vendor/golang.org/x/net/idna/tables11.0.0.go | 4653 ++++++++++ vendor/golang.org/x/net/idna/tables12.0.0.go | 4733 ++++++++++ vendor/golang.org/x/net/idna/tables13.0.0.go | 4839 ++++++++++ vendor/golang.org/x/net/idna/tables9.0.0.go | 4486 ++++++++++ vendor/golang.org/x/net/idna/trie.go | 72 + vendor/golang.org/x/net/idna/trieval.go | 119 + vendor/golang.org/x/text/AUTHORS | 3 + vendor/golang.org/x/text/CONTRIBUTORS | 3 + vendor/golang.org/x/text/LICENSE | 27 + vendor/golang.org/x/text/PATENTS | 22 + .../x/text/secure/bidirule/bidirule.go | 336 + .../x/text/secure/bidirule/bidirule10.0.0.go | 11 + .../x/text/secure/bidirule/bidirule9.0.0.go | 14 + .../golang.org/x/text/transform/transform.go | 709 ++ vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 + .../golang.org/x/text/unicode/bidi/bracket.go | 335 + vendor/golang.org/x/text/unicode/bidi/core.go | 1058 +++ vendor/golang.org/x/text/unicode/bidi/prop.go | 206 + .../x/text/unicode/bidi/tables10.0.0.go | 1815 ++++ .../x/text/unicode/bidi/tables11.0.0.go | 1887 ++++ .../x/text/unicode/bidi/tables12.0.0.go | 1923 ++++ .../x/text/unicode/bidi/tables13.0.0.go | 1955 +++++ .../x/text/unicode/bidi/tables9.0.0.go | 1781 ++++ .../golang.org/x/text/unicode/bidi/trieval.go | 60 + .../x/text/unicode/norm/composition.go | 512 ++ .../x/text/unicode/norm/forminfo.go | 278 + .../golang.org/x/text/unicode/norm/input.go | 109 + vendor/golang.org/x/text/unicode/norm/iter.go | 458 + .../x/text/unicode/norm/normalize.go | 609 ++ .../x/text/unicode/norm/readwriter.go | 125 + .../x/text/unicode/norm/tables10.0.0.go | 7657 ++++++++++++++++ .../x/text/unicode/norm/tables11.0.0.go | 7693 ++++++++++++++++ .../x/text/unicode/norm/tables12.0.0.go | 7710 ++++++++++++++++ .../x/text/unicode/norm/tables13.0.0.go | 7760 +++++++++++++++++ .../x/text/unicode/norm/tables9.0.0.go | 7637 ++++++++++++++++ .../x/text/unicode/norm/transform.go | 88 + vendor/golang.org/x/text/unicode/norm/trie.go | 54 + vendor/modules.txt | 61 + 244 files changed, 124199 insertions(+) create mode 100644 go.mod create mode 100644 go.sum create mode 100644 vendor/github.com/fragmenta/assets/.gitignore create mode 100644 vendor/github.com/fragmenta/assets/LICENSE create mode 100644 vendor/github.com/fragmenta/assets/README.md create mode 100644 vendor/github.com/fragmenta/assets/assets.go create mode 100644 vendor/github.com/fragmenta/assets/file.go create mode 100644 vendor/github.com/fragmenta/assets/group.go create mode 100644 vendor/github.com/fragmenta/assets/helpers.go create mode 100644 vendor/github.com/fragmenta/assets/internal/cssmin/cssmin.go create mode 100644 vendor/github.com/fragmenta/assets/internal/jsmin/jsmin.go create mode 100644 vendor/github.com/fragmenta/auth/LICENSE create mode 100644 vendor/github.com/fragmenta/auth/README.md create mode 100644 vendor/github.com/fragmenta/auth/auth.go create mode 100644 vendor/github.com/fragmenta/auth/can/ability.go create mode 100644 vendor/github.com/fragmenta/auth/can/can.go create mode 100644 vendor/github.com/fragmenta/auth/deprecated.go create mode 100644 vendor/github.com/fragmenta/auth/encode.go create mode 100644 vendor/github.com/fragmenta/auth/encrypt.go create mode 100644 vendor/github.com/fragmenta/auth/random.go create mode 100644 vendor/github.com/fragmenta/auth/session.go create mode 100644 vendor/github.com/fragmenta/mux/LICENSE create mode 100644 vendor/github.com/fragmenta/mux/README.md create mode 100644 vendor/github.com/fragmenta/mux/handlers.go create mode 100644 vendor/github.com/fragmenta/mux/mux.go create mode 100644 vendor/github.com/fragmenta/mux/params.go create mode 100644 vendor/github.com/fragmenta/mux/route.go create mode 100644 vendor/github.com/fragmenta/query/LICENSE create mode 100644 vendor/github.com/fragmenta/query/README.md create mode 100644 vendor/github.com/fragmenta/query/adapters/database.go create mode 100644 vendor/github.com/fragmenta/query/adapters/database_mysql.go create mode 100644 vendor/github.com/fragmenta/query/adapters/database_psql.go create mode 100644 vendor/github.com/fragmenta/query/adapters/database_sqlite.go create mode 100644 vendor/github.com/fragmenta/query/database.go create mode 100644 vendor/github.com/fragmenta/query/query.go create mode 100644 vendor/github.com/fragmenta/query/textual.go create mode 100644 vendor/github.com/fragmenta/server/LICENSE create mode 100644 vendor/github.com/fragmenta/server/README.md create mode 100644 vendor/github.com/fragmenta/server/config/config.go create mode 100644 vendor/github.com/fragmenta/server/deprecated.go create mode 100644 vendor/github.com/fragmenta/server/errors.go create mode 100644 vendor/github.com/fragmenta/server/headers.go create mode 100644 vendor/github.com/fragmenta/server/log/default.go create mode 100644 vendor/github.com/fragmenta/server/log/deprecated.go create mode 100644 vendor/github.com/fragmenta/server/log/file.go create mode 100644 vendor/github.com/fragmenta/server/log/log.go create mode 100644 vendor/github.com/fragmenta/server/log/middleware.go create mode 100644 vendor/github.com/fragmenta/server/redirects.go create mode 100644 vendor/github.com/fragmenta/server/server.go create mode 100644 vendor/github.com/fragmenta/view/LICENSE create mode 100644 vendor/github.com/fragmenta/view/README.md create mode 100644 vendor/github.com/fragmenta/view/deprecated.go create mode 100644 vendor/github.com/fragmenta/view/helpers/forms.go create mode 100644 vendor/github.com/fragmenta/view/helpers/helpers.go create mode 100644 vendor/github.com/fragmenta/view/helpers/html.go create mode 100644 vendor/github.com/fragmenta/view/helpers/maths.go create mode 100644 vendor/github.com/fragmenta/view/parser/parser.go create mode 100644 vendor/github.com/fragmenta/view/parser/scanner.go create mode 100644 vendor/github.com/fragmenta/view/parser/template.go create mode 100644 vendor/github.com/fragmenta/view/parser/template.html.go create mode 100644 vendor/github.com/fragmenta/view/parser/template.json.go create mode 100644 vendor/github.com/fragmenta/view/parser/template.text.go create mode 100644 vendor/github.com/fragmenta/view/render.go create mode 100644 vendor/github.com/fragmenta/view/view.go create mode 100644 vendor/github.com/go-sql-driver/mysql/.gitignore create mode 100644 vendor/github.com/go-sql-driver/mysql/.travis.yml create mode 100644 vendor/github.com/go-sql-driver/mysql/AUTHORS create mode 100644 vendor/github.com/go-sql-driver/mysql/CHANGELOG.md create mode 100644 vendor/github.com/go-sql-driver/mysql/LICENSE create mode 100644 vendor/github.com/go-sql-driver/mysql/README.md create mode 100644 vendor/github.com/go-sql-driver/mysql/auth.go create mode 100644 vendor/github.com/go-sql-driver/mysql/buffer.go create mode 100644 vendor/github.com/go-sql-driver/mysql/collations.go create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck.go create mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go create mode 100644 vendor/github.com/go-sql-driver/mysql/connection.go create mode 100644 vendor/github.com/go-sql-driver/mysql/connector.go create mode 100644 vendor/github.com/go-sql-driver/mysql/const.go create mode 100644 vendor/github.com/go-sql-driver/mysql/driver.go create mode 100644 vendor/github.com/go-sql-driver/mysql/dsn.go create mode 100644 vendor/github.com/go-sql-driver/mysql/errors.go create mode 100644 vendor/github.com/go-sql-driver/mysql/fields.go create mode 100644 vendor/github.com/go-sql-driver/mysql/go.mod create mode 100644 vendor/github.com/go-sql-driver/mysql/infile.go create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime.go create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime_go113.go create mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go create mode 100644 vendor/github.com/go-sql-driver/mysql/packets.go create mode 100644 vendor/github.com/go-sql-driver/mysql/result.go create mode 100644 vendor/github.com/go-sql-driver/mysql/rows.go create mode 100644 vendor/github.com/go-sql-driver/mysql/statement.go create mode 100644 vendor/github.com/go-sql-driver/mysql/transaction.go create mode 100644 vendor/github.com/go-sql-driver/mysql/utils.go create mode 100644 vendor/github.com/kennygrant/sanitize/.gitignore create mode 100644 vendor/github.com/kennygrant/sanitize/.travis.yml create mode 100644 vendor/github.com/kennygrant/sanitize/LICENSE create mode 100644 vendor/github.com/kennygrant/sanitize/README.md create mode 100644 vendor/github.com/kennygrant/sanitize/sanitize.go create mode 100644 vendor/github.com/lib/pq/.gitignore create mode 100644 vendor/github.com/lib/pq/.travis.sh create mode 100644 vendor/github.com/lib/pq/.travis.yml create mode 100644 vendor/github.com/lib/pq/LICENSE.md create mode 100644 vendor/github.com/lib/pq/README.md create mode 100644 vendor/github.com/lib/pq/TESTS.md create mode 100644 vendor/github.com/lib/pq/array.go create mode 100644 vendor/github.com/lib/pq/buf.go create mode 100644 vendor/github.com/lib/pq/conn.go create mode 100644 vendor/github.com/lib/pq/conn_go18.go create mode 100644 vendor/github.com/lib/pq/connector.go create mode 100644 vendor/github.com/lib/pq/copy.go create mode 100644 vendor/github.com/lib/pq/doc.go create mode 100644 vendor/github.com/lib/pq/encode.go create mode 100644 vendor/github.com/lib/pq/error.go create mode 100644 vendor/github.com/lib/pq/go.mod create mode 100644 vendor/github.com/lib/pq/krb.go create mode 100644 vendor/github.com/lib/pq/notice.go create mode 100644 vendor/github.com/lib/pq/notify.go create mode 100644 vendor/github.com/lib/pq/oid/doc.go create mode 100644 vendor/github.com/lib/pq/oid/types.go create mode 100644 vendor/github.com/lib/pq/rows.go create mode 100644 vendor/github.com/lib/pq/scram/scram.go create mode 100644 vendor/github.com/lib/pq/ssl.go create mode 100644 vendor/github.com/lib/pq/ssl_permissions.go create mode 100644 vendor/github.com/lib/pq/ssl_windows.go create mode 100644 vendor/github.com/lib/pq/url.go create mode 100644 vendor/github.com/lib/pq/user_posix.go create mode 100644 vendor/github.com/lib/pq/user_windows.go create mode 100644 vendor/github.com/lib/pq/uuid.go create mode 100644 vendor/github.com/sendgrid/rest/.env_sample create mode 100644 vendor/github.com/sendgrid/rest/.gitignore create mode 100644 vendor/github.com/sendgrid/rest/.travis.yml create mode 100644 vendor/github.com/sendgrid/rest/CHANGELOG.md create mode 100644 vendor/github.com/sendgrid/rest/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/sendgrid/rest/CONTRIBUTING.md create mode 100644 vendor/github.com/sendgrid/rest/FIRST_TIMERS.md create mode 100644 vendor/github.com/sendgrid/rest/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/sendgrid/rest/LICENSE.md create mode 100644 vendor/github.com/sendgrid/rest/Makefile create mode 100644 vendor/github.com/sendgrid/rest/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/sendgrid/rest/README.md create mode 100644 vendor/github.com/sendgrid/rest/TROUBLESHOOTING.md create mode 100644 vendor/github.com/sendgrid/rest/USAGE.md create mode 100644 vendor/github.com/sendgrid/rest/docker-compose.yml create mode 100644 vendor/github.com/sendgrid/rest/rest.go create mode 100644 vendor/github.com/sendgrid/rest/twilio_sendgrid_logo.png create mode 100644 vendor/github.com/sendgrid/sendgrid-go/.env_sample create mode 100644 vendor/github.com/sendgrid/sendgrid-go/.gitignore create mode 100644 vendor/github.com/sendgrid/sendgrid-go/.travis.yml create mode 100644 vendor/github.com/sendgrid/sendgrid-go/CHANGELOG.md create mode 100644 vendor/github.com/sendgrid/sendgrid-go/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/sendgrid/sendgrid-go/CONTRIBUTING.md create mode 100644 vendor/github.com/sendgrid/sendgrid-go/Dockerfile create mode 100644 vendor/github.com/sendgrid/sendgrid-go/FIRST_TIMERS.md create mode 100644 vendor/github.com/sendgrid/sendgrid-go/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/sendgrid/sendgrid-go/LICENSE create mode 100644 vendor/github.com/sendgrid/sendgrid-go/Makefile create mode 100644 vendor/github.com/sendgrid/sendgrid-go/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/sendgrid/sendgrid-go/README.md create mode 100644 vendor/github.com/sendgrid/sendgrid-go/TROUBLESHOOTING.md create mode 100644 vendor/github.com/sendgrid/sendgrid-go/USAGE.md create mode 100644 vendor/github.com/sendgrid/sendgrid-go/base_interface.go create mode 100644 vendor/github.com/sendgrid/sendgrid-go/go.coverage.sh create mode 100644 vendor/github.com/sendgrid/sendgrid-go/helpers/mail/README.md create mode 100644 vendor/github.com/sendgrid/sendgrid-go/helpers/mail/mail_v3.go create mode 100644 vendor/github.com/sendgrid/sendgrid-go/sendgrid.go create mode 100644 vendor/github.com/sendgrid/sendgrid-go/twilio_email.go create mode 100644 vendor/github.com/sendgrid/sendgrid-go/twilio_sendgrid_logo.png create mode 100644 vendor/golang.org/x/crypto/AUTHORS create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/acme/acme.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/autocert.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/cache.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/listener.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/renewal.go create mode 100644 vendor/golang.org/x/crypto/acme/http.go create mode 100644 vendor/golang.org/x/crypto/acme/jws.go create mode 100644 vendor/golang.org/x/crypto/acme/rfc8555.go create mode 100644 vendor/golang.org/x/crypto/acme/types.go create mode 100644 vendor/golang.org/x/crypto/acme/version_go112.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go create mode 100644 vendor/golang.org/x/net/AUTHORS create mode 100644 vendor/golang.org/x/net/CONTRIBUTORS create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/html/atom/atom.go create mode 100644 vendor/golang.org/x/net/html/atom/table.go create mode 100644 vendor/golang.org/x/net/html/const.go create mode 100644 vendor/golang.org/x/net/html/doc.go create mode 100644 vendor/golang.org/x/net/html/doctype.go create mode 100644 vendor/golang.org/x/net/html/entity.go create mode 100644 vendor/golang.org/x/net/html/escape.go create mode 100644 vendor/golang.org/x/net/html/foreign.go create mode 100644 vendor/golang.org/x/net/html/node.go create mode 100644 vendor/golang.org/x/net/html/parse.go create mode 100644 vendor/golang.org/x/net/html/render.go create mode 100644 vendor/golang.org/x/net/html/token.go create mode 100644 vendor/golang.org/x/net/idna/idna10.0.0.go create mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go create mode 100644 vendor/golang.org/x/net/idna/punycode.go create mode 100644 vendor/golang.org/x/net/idna/tables10.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables12.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go create mode 100644 vendor/golang.org/x/net/idna/trie.go create mode 100644 vendor/golang.org/x/net/idna/trieval.go create mode 100644 vendor/golang.org/x/text/AUTHORS create mode 100644 vendor/golang.org/x/text/CONTRIBUTORS create mode 100644 vendor/golang.org/x/text/LICENSE create mode 100644 vendor/golang.org/x/text/PATENTS create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go create mode 100644 vendor/golang.org/x/text/transform/transform.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 vendor/modules.txt diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..a074f54 --- /dev/null +++ b/go.mod @@ -0,0 +1,20 @@ +module github.com/fragmenta/fragmenta-cms + +go 1.15 + +require ( + github.com/fragmenta/assets v0.0.0-20170106151417-365a66dd5f52 + github.com/fragmenta/auth v1.5.6 + github.com/fragmenta/mux v1.6.7 + github.com/fragmenta/query v1.5.3 + github.com/fragmenta/server v1.5.9 + github.com/fragmenta/view v1.6.2 + github.com/go-sql-driver/mysql v1.5.0 // indirect + github.com/kennygrant/sanitize v1.2.4 // indirect + github.com/lib/pq v1.8.0 // indirect + github.com/sendgrid/rest v2.6.2+incompatible // indirect + github.com/sendgrid/sendgrid-go v3.7.2+incompatible + golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392 // indirect + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b // indirect + golang.org/x/text v0.3.4 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..cbfb4e4 --- /dev/null +++ b/go.sum @@ -0,0 +1,43 @@ +github.com/fragmenta/assets v0.0.0-20170106151417-365a66dd5f52 h1:x40LCd8spD6gcZHBPWSUnjjscbM/O75NmvXJslZ4+Ak= +github.com/fragmenta/assets v0.0.0-20170106151417-365a66dd5f52/go.mod h1:jjJP6iPyc6uv5TmUxzabP6qtwuWqMw3k32Zne3vW6Zc= +github.com/fragmenta/auth v1.5.6 h1:cBxZYDfbtMj75tPkr9PVOnM0hSKj7P5Tbm/1a8XcXys= +github.com/fragmenta/auth v1.5.6/go.mod h1:vBhR7wvfdmF3HSxcFzkjkkcjpqhSXGOGe5FMEE737HU= +github.com/fragmenta/mux v1.6.7 h1:7TCYtg7ttMziuCGPRHQF0IhO8bSSGf/x7TdjRLyUFS8= +github.com/fragmenta/mux v1.6.7/go.mod h1:kZalXJ8HxNttNH7XF6mGehgfElEAO0ZubavkPHHmRvQ= +github.com/fragmenta/query v1.5.3 h1:kMMpwJU1+YRrQj4tjkaRRTv5vCTCp1QKZEyZvHM1fw8= +github.com/fragmenta/query v1.5.3/go.mod h1:ABMWhk/Yi+Pr94ZXneHzNW7V/hfWHuQM4i75+Z9ilLc= +github.com/fragmenta/server v1.5.9 h1:zFb5bXjDy6Qf62vX0sPlEoZHhkKIa8cyIEBm+IjfP9M= +github.com/fragmenta/server v1.5.9/go.mod h1:ibY0vYkJYQTjKxRIKtnm264TYkAkarTvA+mSJcHfZJ4= +github.com/fragmenta/view v1.6.2 h1:jkbFo18ajkNW0+Z/H94F7p7bmX7W7kpHgqMxuWTFF3g= +github.com/fragmenta/view v1.6.2/go.mod h1:emCHewclrV4sA4O8j7z63RyEzQRJXSmW+SUxHqzrNEk= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o= +github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak= +github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/sendgrid/rest v1.0.2 h1:xdfALkR1m9eqf41/zEnUmV0fw4b31ZzGZ4Dj5f2/w04= +github.com/sendgrid/rest v2.6.2+incompatible h1:zGMNhccsPkIc8SvU9x+qdDz2qhFoGUPGGC4mMvTondA= +github.com/sendgrid/rest v2.6.2+incompatible/go.mod h1:kXX7q3jZtJXK5c5qK83bSGMdV6tsOE70KbHoqJls4lE= +github.com/sendgrid/sendgrid-go v1.2.0 h1:2K3teZdhaPe12ftFyFL4AWDH4QmNPc+sCi6mWFx5+oo= +github.com/sendgrid/sendgrid-go v3.7.2+incompatible h1:ePQr9ns8so+28whk+gLKRYiyI5IiCESkDIqy7cjiwLg= +github.com/sendgrid/sendgrid-go v3.7.2+incompatible/go.mod h1:QRQt+LX/NmgVEvmdRw0VT/QgUn499+iza2FnDca9fg8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392 h1:xYJJ3S178yv++9zXV/hnr29plCAGO9vAFG9dorqaFQc= +golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/fragmenta/assets/.gitignore b/vendor/github.com/fragmenta/assets/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/fragmenta/assets/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/fragmenta/assets/LICENSE b/vendor/github.com/fragmenta/assets/LICENSE new file mode 100644 index 0000000..f3cb26d --- /dev/null +++ b/vendor/github.com/fragmenta/assets/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Fragmenta + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/fragmenta/assets/README.md b/vendor/github.com/fragmenta/assets/README.md new file mode 100644 index 0000000..b105000 --- /dev/null +++ b/vendor/github.com/fragmenta/assets/README.md @@ -0,0 +1,24 @@ +# Assets +Assets provides asset compilation, concatenation and fingerprinting. Asset details are stored in a file at secrets/assets.json by default. + +### Usage + +Use the assets package to organize assets however you like within your src folder, and output them in compressed form in your public/assets folder when you come to deploy your app. + +```Go + // Load asset details from json file on each run + err := appAssets.Load() + if err != nil { + // If no assets loaded, compile for the first time (produces files in public/assets) + err := appAssets.Compile("src", "public") + if err != nil { + server.Fatalf("#error compiling assets %s", err) + } + } +``` + +// Use the asset helpers to generate fingerprinted assets (either one fingerprinted file in production or a list of all files in development) - this is similar to the Rails asset pipeline. +```Go + view.Helpers["style"] = appAssets.StyleLink + view.Helpers["script"] = appAssets.ScriptLink +``` \ No newline at end of file diff --git a/vendor/github.com/fragmenta/assets/assets.go b/vendor/github.com/fragmenta/assets/assets.go new file mode 100644 index 0000000..72d487e --- /dev/null +++ b/vendor/github.com/fragmenta/assets/assets.go @@ -0,0 +1,245 @@ +// Package assets provides asset compilation, concatenation and fingerprinting. +package assets + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "path" + "path/filepath" + "sort" +) + +// TODO: remove assumptions about location of assets.json file - this should be configurable + +// Collection holds the complete list of groups +type Collection struct { + serveCompiled bool + path string + groups []*Group +} + +// New returns a new assets.Collection +func New(compiled bool) *Collection { + c := &Collection{ + serveCompiled: compiled, + path: "secrets/assets.json", + } + return c +} + +// File returns the first asset file matching name - this assumes files have unique names between groups +func (c *Collection) File(name string) *File { + for _, g := range c.groups { + for _, f := range g.files { + if f.name == name { + return f + } + } + } + return nil +} + +// Group returns the named group if it exists or an empty group if not +func (c *Collection) Group(name string) *Group { + for _, g := range c.groups { + if g.name == name { + return g + } + } + return &Group{name: name} // Should this return nil instead? +} + +// FetchOrCreateGroup returns the named group if it exists, or creates it if not +func (c *Collection) FetchOrCreateGroup(name string) *Group { + for _, g := range c.groups { + if g.name == name { + return g + } + } + g := &Group{name: name} + c.groups = append(c.groups, g) + return g +} + +// MarshalJSON generates json for this collection, of the form {group:{file:hash}} +func (c *Collection) MarshalJSON() ([]byte, error) { + var b bytes.Buffer + + b.WriteString("{") + + for i, g := range c.groups { + gb, err := g.MarshalJSON() + if err != nil { + return nil, err + } + b.Write(gb) + if i+1 < len(c.groups) { + b.WriteString(",") + } + } + + b.WriteString("}") + + return b.Bytes(), nil +} + +// Save the assets to a file after compilation +func (c *Collection) Save() error { + + // Get a representation of each file and group as json + data, err := json.MarshalIndent(c, "", "\t") + if err != nil { + return fmt.Errorf("Error marshalling assets file %s %v", c.path, err) + } + + // Write our assets json file to the path + err = ioutil.WriteFile(c.path, data, 0644) + if err != nil { + return fmt.Errorf("Error writing assets file %s %v", c.path, err) + } + + return nil +} + +// Load the asset groups from the assets json file +// Call this on startup from your app to read the asset details after assets are compiled +func (c *Collection) Load() error { + + // Make sure we reset groups, in case we compiled + c.groups = make([]*Group, 0) + + // Read our assets json file from the path + file, err := ioutil.ReadFile(c.path) + if err != nil { + return fmt.Errorf("Error opening assets file %s %v", c.path, err) + } + + // Unmarshal json Groups/sections/Files + var data map[string]map[string]interface{} + err = json.Unmarshal(file, &data) + if err != nil { + return fmt.Errorf("Error reading assets %s %v", c.path, err) + } + + // Walk through data groups, creating our groups from it + // or fetching existing ones + for d, dv := range data { + g := c.FetchOrCreateGroup(d) + for k, v := range dv { + + switch k { + case "scripts": + g.scripthash = v.(string) + case "styles": + g.stylehash = v.(string) + case "files": + for p, h := range v.(map[string]interface{}) { + g.AddAsset(p, h.(string)) + } + } + + } + + } + + // For all our groups, sort files in name order + for _, g := range c.groups { + sort.Sort(g.files) + } + + return nil +} + +// Compile images, styles and scripts asset folders from src into dst (minifying and amalgamating) +func (c *Collection) Compile(src string, dst string) error { + + // First scan the directory for files we're interested in + files, err := collectAssets(filepath.Clean(src), []string{"js", "css", ".jpg", ".png"}) + if err != nil { + return err + } + + // Handle each asset by adding it to a group + // For now we only handle one group - the app group + // later we might create groups for any folders with assets/images/xxx etc + for _, f := range files { + g := c.FetchOrCreateGroup("app") + + // Load the file bytes and generate a hash + // copying it out to dst if require + g.ParseFile(f, dst) + + } + + // For all our groups, compile them to one file, calculate global hash + for _, g := range c.groups { + + // Remove old compiled files for this group + err = g.RemoveFiles(dst) + if err != nil { + return err + } + // Sort files first for group before compile + sort.Sort(g.files) + + err := g.Compile(dst) + if err != nil { + return err + } + + } + + // Now save a representation of the groups/files to our json file + err = c.Save() + if err != nil { + return err + } + + return nil +} + +// collectAssets collects the assets with this extension under src +func collectAssets(src string, extensions []string) ([]string, error) { + + assets := []string{} + + // TODO: perhaps use filepath.Walk instead + // filepath.Glob doesn't appear to support ** or {} + // this should catch + // src/app/images/img.png + // src/app/assets/images/img.png + // src/app/assets/images/group/img.png + for _, e := range extensions { + pattern := path.Join(src, "*/*/*."+e) + files, err := filepath.Glob(pattern) + if err != nil { + return assets, err + } + assets = append(assets, files...) + pattern = path.Join(src, "*/*/*/*."+e) + files, err = filepath.Glob(pattern) + if err != nil { + return assets, err + } + assets = append(assets, files...) + pattern = path.Join(src, "*/*/*/*/*."+e) + files, err = filepath.Glob(pattern) + if err != nil { + return assets, err + } + assets = append(assets, files...) + } + + return assets, nil + +} + +// bytesHash returns the sha hash of some bytes +func bytesHash(bytes []byte) string { + sum := sha1.Sum(bytes) + return hex.EncodeToString([]byte(sum[:])) +} diff --git a/vendor/github.com/fragmenta/assets/file.go b/vendor/github.com/fragmenta/assets/file.go new file mode 100644 index 0000000..50a4a5b --- /dev/null +++ b/vendor/github.com/fragmenta/assets/file.go @@ -0,0 +1,114 @@ +package assets + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path" + "strings" +) + +const permissions = 0744 + +// File stores a filename and hash fingerprint for the asset file +type File struct { + name string + hash string + path string + bytes []byte +} + +// NewFile returns a new file object +func NewFile(p string) (*File, error) { + + // Load file from path to get bytes + bytes, err := ioutil.ReadFile(p) + if err != nil { + return &File{}, err + } + + // Calculate hash and save it + file := &File{ + path: p, + name: path.Base(p), + hash: bytesHash(bytes), + bytes: bytes, + } + return file, nil +} + +// Style returns true if this file is a CSS file +func (f *File) Style() bool { + return strings.HasSuffix(f.name, ".css") +} + +// Script returns true if this file is a js file +func (f *File) Script() bool { + return strings.HasSuffix(f.name, ".js") +} + +// MarshalJSON generates json for this file, of the form {group:{file:hash}} +func (f *File) MarshalJSON() ([]byte, error) { + var b bytes.Buffer + + s := fmt.Sprintf("\"%s\":\"%s\"", f.path, f.hash) + b.WriteString(s) + + return b.Bytes(), nil +} + +// Newer returns true if file exists at path +func (f *File) Newer(dst string) bool { + + // Check mtimes + stat, err := os.Stat(f.path) + if err != nil { + return false + } + srcM := stat.ModTime() + stat, err = os.Stat(dst) + + // If the file doesn't exist, return true + if os.IsNotExist(err) { + return true + } + + // Else check for other errors + if err != nil { + return false + } + + dstM := stat.ModTime() + + return srcM.After(dstM) + +} + +// Copy our bytes to dstpath +func (f *File) Copy(dst string) error { + err := ioutil.WriteFile(dst, f.bytes, permissions) + if err != nil { + return err + } + return nil +} + +// LocalPath returns the relative path of this file +func (f *File) LocalPath() string { + return f.path +} + +// AssetPath returns the path of this file within the assets folder +func (f *File) AssetPath(dst string) string { + folder := "styles" + if f.Script() { + folder = "scripts" + } + return path.Join(dst, "assets", folder, f.name) +} + +// String returns a string representation of this object +func (f *File) String() string { + return fmt.Sprintf("%s:%s", f.name, f.hash) +} diff --git a/vendor/github.com/fragmenta/assets/group.go b/vendor/github.com/fragmenta/assets/group.go new file mode 100644 index 0000000..e68e755 --- /dev/null +++ b/vendor/github.com/fragmenta/assets/group.go @@ -0,0 +1,215 @@ +package assets + +import ( + "bytes" + "fmt" + "github.com/fragmenta/assets/internal/cssmin" + "github.com/fragmenta/assets/internal/jsmin" + "io/ioutil" + "os" + "path" + "path/filepath" +) + +// A sortable file array +type fileArray []*File + +func (a fileArray) Len() int { return len(a) } +func (a fileArray) Less(b, c int) bool { return a[b].name < a[c].name } +func (a fileArray) Swap(b, c int) { a[b], a[c] = a[c], a[b] } + +// Group holds a name and a list of files (images, scripts, styles) +type Group struct { + name string + files fileArray + stylehash string // the hash of the compiled group css file (if any) + scripthash string // the hash of the compiled group js file (if any) +} + +// Styles returns an array of file names for styles +func (g *Group) Styles() []*File { + var styles []*File + + for _, f := range g.files { + if f.Style() { + styles = append(styles, f) + } + } + + return styles +} + +// Scripts returns an array of file names for styles +func (g *Group) Scripts() []*File { + var scripts []*File + + for _, f := range g.files { + if f.Script() { + scripts = append(scripts, f) + } + } + + return scripts +} + +// RemoveFiles removes old compiled files for this group from dst +func (g *Group) RemoveFiles(dst string) error { + + if dst == "" { + return fmt.Errorf("Empty destination string") + } + + var assets []string + + pattern := path.Join(dst, "assets", "scripts", g.name+"-*.min.js") + files, err := filepath.Glob(pattern) + if err != nil { + return err + } + + assets = append(assets, files...) + pattern = path.Join(dst, "assets", "styles", g.name+"-*.min.css") + files, err = filepath.Glob(pattern) + if err != nil { + return err + } + assets = append(assets, files...) + + for _, a := range assets { + err = os.Remove(a) + if err != nil { + return err + } + } + + return nil +} + +// Compile compiles all our files and calculates hashes from their contents +// The group hash is a hash of hashes +func (g *Group) Compile(dst string) error { + var scriptHashes, styleHashes string + var scriptWriter, styleWriter bytes.Buffer + + for _, f := range g.files { + if f.Script() { + scriptHashes += f.hash + scriptWriter.Write(f.bytes) + scriptWriter.WriteString("\n\n") + } else if f.Style() { + styleHashes += f.hash + styleWriter.Write(f.bytes) + styleWriter.WriteString("\n\n") + } + } + // Generate hashes for the files concatted using our existing file hashes as input + // NB this is not the hash of the minified file + g.scripthash = bytesHash([]byte(scriptHashes)) + g.stylehash = bytesHash([]byte(styleHashes)) + + // Write out this group's minified concatted files + err := g.writeFiles(dst, scriptWriter, styleWriter) + + // Reset the buffers on our files, which we no longer need + for _, f := range g.files { + f.bytes = nil + } + + return err +} + +// writeScript +func (g *Group) writeFiles(dst string, scriptWriter, styleWriter bytes.Buffer) error { + var err error + + // Minify CSS + miniCSS := cssmin.Minify(styleWriter.Bytes()) + err = ioutil.WriteFile(g.StylePath(dst), miniCSS, permissions) + if err != nil { + return err + } + + // Minify JS + minijs, err := jsmin.Minify(scriptWriter.Bytes()) + if err != nil { + return err + } + + err = ioutil.WriteFile(g.ScriptPath(dst), minijs, permissions) + if err != nil { + return err + } + + // Now reset our bytes buffers + scriptWriter.Reset() + styleWriter.Reset() + + return nil +} + +// AddAsset adds this asset to the group +func (g *Group) AddAsset(p, h string) { + file := &File{name: path.Base(p), path: p, hash: h} + g.files = append(g.files, file) +} + +// ParseFile adds this asset to our list of files, along with a fingerprint based on the content +func (g *Group) ParseFile(p string, dst string) error { + + // Create the file + file, err := NewFile(p) + if err != nil { + return err + } + g.files = append(g.files, file) + + return nil +} + +// String returns a string represention of group +func (g *Group) String() string { + return fmt.Sprintf("%s:%d", g.name, len(g.files)) +} + +// StyleName returns a fingerprinted group name for styles +func (g *Group) StyleName() string { + return fmt.Sprintf("%s-%s.min.css", g.name, g.stylehash) +} + +// StylePath returns a fingerprinted group path for styles +func (g *Group) StylePath(dst string) string { + return path.Join(dst, "assets", "styles", g.StyleName()) +} + +// ScriptName returns a fingerprinted group name for scripts +func (g *Group) ScriptName() string { + return fmt.Sprintf("%s-%s.min.js", g.name, g.scripthash) +} + +// ScriptPath returns a fingerprinted group path for scripts +func (g *Group) ScriptPath(dst string) string { + return path.Join(dst, "assets", "scripts", g.ScriptName()) +} + +// MarshalJSON generates json for this collection, of the form {group:{file:hash}} +func (g *Group) MarshalJSON() ([]byte, error) { + var b bytes.Buffer + + b.WriteString(fmt.Sprintf(`"%s":{"scripts":"%s","styles":"%s","files":{`, + g.name, g.scripthash, g.stylehash)) + + for i, f := range g.files { + fb, err := f.MarshalJSON() + if err != nil { + return nil, err + } + b.Write(fb) + if i+1 < len(g.files) { + b.WriteString(",") + } + } + + b.WriteString("}}") + + return b.Bytes(), nil +} diff --git a/vendor/github.com/fragmenta/assets/helpers.go b/vendor/github.com/fragmenta/assets/helpers.go new file mode 100644 index 0000000..9d0217d --- /dev/null +++ b/vendor/github.com/fragmenta/assets/helpers.go @@ -0,0 +1,78 @@ +package assets + +import ( + "fmt" + "html/template" + "strings" +) + +const ( + styleTemplate = `` + scriptTemplate = `` +) + +// StyleLink converts a set of group names to one style link tag (production) or to a list of style link tags (development) +func (c *Collection) StyleLink(names ...string) template.HTML { + var html template.HTML + + // Iterate through names, setting up links for each + // we link to groups if we have them, else we fall back to normal links + for _, name := range names { + g := c.Group(name) + if g.stylehash != "" { + if c.serveCompiled { + html = html + StyleLink(g.StyleName()) + } else { + for _, f := range g.Styles() { + html = html + StyleLink(f.name) + template.HTML("\n") + } + } + } else { + html = html + StyleLink(name) + } + + } + + return html +} + +// ScriptLink converts a set of group names to one script tag (production) or to a list of script tags (development) +func (c *Collection) ScriptLink(names ...string) template.HTML { + var html template.HTML + + // Iterate through names, setting up links for each + // we link to groups if we have them, else we fall back to normal links + for _, name := range names { + g := c.Group(name) + if g.stylehash != "" { + if c.serveCompiled { + html = html + ScriptLink(g.ScriptName()) + } else { + for _, f := range g.Scripts() { + html = html + ScriptLink(f.name) + template.HTML("\n") + } + } + } else { + html = html + ScriptLink(name) + } + + } + + return html +} + +// StyleLink returns an html tag for a given file path +func StyleLink(name string) template.HTML { + if !strings.HasSuffix(name, ".css") { + name = name + ".css" + } + return template.HTML(fmt.Sprintf(styleTemplate, template.URLQueryEscaper(name))) +} + +// ScriptLink returns an html tag for a given file path +func ScriptLink(name string) template.HTML { + if !strings.HasSuffix(name, ".js") { + name = name + ".js" + } + return template.HTML(fmt.Sprintf(scriptTemplate, template.URLQueryEscaper(name))) +} diff --git a/vendor/github.com/fragmenta/assets/internal/cssmin/cssmin.go b/vendor/github.com/fragmenta/assets/internal/cssmin/cssmin.go new file mode 100644 index 0000000..cf3883f --- /dev/null +++ b/vendor/github.com/fragmenta/assets/internal/cssmin/cssmin.go @@ -0,0 +1,143 @@ +// https://github.com/dchest/cssmin +// Go Port: +// Copyright (c) 2013 Dmitry Chestnykh +// +// Original: +// Copyright (c) 2008 Ryan Grove +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of this project nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package cssmin minifies CSS. It's a port of Ryan Grove's cssmin from Ruby. +package cssmin + +import ( + "bytes" + "fmt" + "regexp" + "strconv" +) + +var ( + rcomments = regexp.MustCompile(`\/\*[\s\S]*?\*\/`) + rwhitespace = regexp.MustCompile(`\s+`) + rbmh = regexp.MustCompile(`"\\"\}\\""`) + runspace1 = regexp.MustCompile(`(?:^|\})[^\{:]+\s+:+[^\{]*\{`) + runspace2 = regexp.MustCompile(`\s+([!\{\};:>+\(\)\],])`) + runspace3 = regexp.MustCompile(`([!\{\}:;>+\(\[,])\s+`) + rsemicolons = regexp.MustCompile(`([^;\}])\}`) + runits = regexp.MustCompile(`(?i)([\s:])([+-]?0)(?:%|em|ex|px|in|cm|mm|pt|pc)`) + rfourzero = regexp.MustCompile(`:(?:0 )+0;`) + rleadzero = regexp.MustCompile(`(:|\s)0+\.(\d+)`) + rrgb = regexp.MustCompile(`rgb\s*\(\s*([0-9,\s]+)\s*\)`) + rdigits = regexp.MustCompile(`\d+`) + rcompresshex = regexp.MustCompile(`(?i)([^"'=\s])(\s?)\s*#([0-9a-f]){6}`) + rhexval = regexp.MustCompile(`[0-9a-f]{2}`) + remptyrules = regexp.MustCompile(`[^\}]+\{;\}\n`) + rmediaspace = regexp.MustCompile(`\band\(`) + rredsemicolons = regexp.MustCompile(`;+\}`) +) + +func Minify(css []byte) (minified []byte) { + // Remove comments. + css = rcomments.ReplaceAll(css, []byte{}) + + // Compress all runs of whitespace to a single space to make things easier + // to work with. + css = rwhitespace.ReplaceAll(css, []byte(" ")) + + // Replace box model hacks with placeholders. + css = rbmh.ReplaceAll(css, []byte("___BMH___")) + + // Remove unnecessary spaces, but be careful not to turn "p :link {...}" + // into "p:link{...}". + css = runspace1.ReplaceAllFunc(css, func(match []byte) []byte { + return bytes.Replace(match, []byte(":"), []byte("___PSEUDOCLASSCOLON___"), -1) + }) + css = runspace2.ReplaceAll(css, []byte("$1")) + css = bytes.Replace(css, []byte("___PSEUDOCLASSCOLON___"), []byte(":"), -1) + css = runspace3.ReplaceAll(css, []byte("$1")) + + // Add missing semicolons. + css = rsemicolons.ReplaceAll(css, []byte("$1;}")) + + // Replace 0(%, em, ex, px, in, cm, mm, pt, pc) with just 0. + css = runits.ReplaceAll(css, []byte("$1$2")) + + // Replace 0 0 0 0; with 0. + css = rfourzero.ReplaceAll(css, []byte(":0;")) + + // Replace background-position:0; with background-position:0 0; + css = bytes.Replace(css, []byte("background-position:0;"), []byte("background-position:0 0;"), -1) + + // Replace 0.6 with .6, but only when preceded by : or a space. + css = rleadzero.ReplaceAll(css, []byte("$1.$2")) + + // Convert rgb color values to hex values. + css = rrgb.ReplaceAllFunc(css, func(match []byte) (out []byte) { + out = []byte{'#'} + for _, v := range rdigits.FindAll(match, -1) { + d, err := strconv.Atoi(string(v)) + if err != nil { + return match + } + out = append(out, []byte(fmt.Sprintf("%02x", d))...) + } + return out + }) + + // Compress color hex values, making sure not to touch values used in IE + // filters, since they would break. + css = rcompresshex.ReplaceAllFunc(css, func(match []byte) (out []byte) { + vals := rhexval.FindAll(match, -1) + if len(vals) != 3 { + return match + } + compressible := true + for _, v := range vals { + if v[0] != v[1] { + compressible = false + } + } + if !compressible { + return match + } + out = append(out, match[:bytes.IndexByte(match, '#')+1]...) + return append(out, vals[0][0], vals[1][0], vals[2][0]) + }) + + // Remove empty rules. + css = remptyrules.ReplaceAll(css, []byte{}) + + // Re-insert box model hacks. + css = bytes.Replace(css, []byte("___BMH___"), []byte(`"\"}\""`), -1) + + // Put the space back in for media queries + css = rmediaspace.ReplaceAll(css, []byte("and (")) + + // Prevent redundant semicolons. + css = rredsemicolons.ReplaceAll(css, []byte("}")) + + return bytes.TrimSpace(css) +} diff --git a/vendor/github.com/fragmenta/assets/internal/jsmin/jsmin.go b/vendor/github.com/fragmenta/assets/internal/jsmin/jsmin.go new file mode 100644 index 0000000..ec33471 --- /dev/null +++ b/vendor/github.com/fragmenta/assets/internal/jsmin/jsmin.go @@ -0,0 +1,352 @@ +// Ported to Go by Dmitry Chestnykh. Porting is public domain, +// but the code should be used according to the original license, +// as it's a derivative work: +/* jsmin.c + 2013-03-29 + +Copyright (c) 2002 Douglas Crockford (www.crockford.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +The Software shall be used for Good, not Evil. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package jsmin implements JavaScript minifier. It's a direct port of Doulas Crockford's JSMin. +package jsmin + +// ... or should I say, a direct braindead port of the ugly Crockford's code... + +import ( + "bufio" + "bytes" + "fmt" + "io" +) + +const eof = -1 + +type minifier struct { + buf *bytes.Buffer + r *bufio.Reader + w *bufio.Writer + theA int + theB int + theLookahead int + theX int + theY int + err error +} + +func (m *minifier) init(r *bufio.Reader, w *bufio.Writer) { + m.r = r + m.w = w + m.theLookahead = eof + m.theX = eof + m.theY = eof +} + +func (m *minifier) error(s string) error { + m.err = fmt.Errorf("JSMIN Error: %s", s) + return m.err +} + +/* isAlphanum -- return true if the character is a letter, digit, underscore, + dollar sign, or non-ASCII character. +*/ + +func isAlphanum(c int) bool { + return ((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || + (c >= 'A' && c <= 'Z') || c == '_' || c == '$' || c == '\\' || + c > 126) +} + +/* get -- return the next character from stdin. Watch out for lookahead. If + the character is a control character, translate it to a space or + linefeed. +*/ + +func (m *minifier) get() int { + c := m.theLookahead + m.theLookahead = eof + if c == eof { + b, err := m.r.ReadByte() + if err != nil { + if err == io.EOF { + c = eof + } else { + m.error(err.Error()) + return eof + } + } else { + c = int(b) + } + } + if c >= ' ' || c == '\n' || c == eof { + return c + } + if c == '\r' { + return '\n' + } + return ' ' +} + +/* peek -- get the next character without getting it. + */ + +func (m *minifier) peek() int { + m.theLookahead = m.get() + return m.theLookahead +} + +/* next -- get the next character, excluding comments. peek() is used to see + if a '/' is followed by a '/' or '*'. +*/ + +func (m *minifier) next() int { + c := m.get() + if c == '/' { + switch m.peek() { + case '/': + for { + c = m.get() + if c <= '\n' { + break + } + } + case '*': + m.get() + // Preserve license comments (/*!) + if m.peek() == '!' { + m.get() + m.putc('/') + m.putc('*') + m.putc('!') + for c != 0 { + c = m.get() + switch c { + case '*': + if m.peek() == '/' { + m.get() + c = 0 + } + break + case eof: + m.error("Unterminated comment.") + return eof + default: + m.putc(c) + } + } + m.putc('*') + m.putc('/') + } + // -- + for c != ' ' { + switch m.get() { + case '*': + if m.peek() == '/' { + m.get() + c = ' ' + } + break + case eof: + m.error("Unterminated comment.") + return eof + } + } + } + } + m.theY = m.theX + m.theX = c + return c +} + +/* action -- do something! What you do is determined by the argument: + 1 Output A. Copy B to A. Get the next B. + 2 Copy B to A. Get the next B. (Delete A). + 3 Get the next B. (Delete B). + action treats a string as a single character. Wow! + action recognizes a regular expression if it is preceded by ( or , or =. +*/ +func (m *minifier) putc(c int) { + m.w.WriteByte(byte(c)) +} + +func (m *minifier) action(d int) { + switch d { + case 1: + m.putc(m.theA) + if (m.theY == '\n' || m.theY == ' ') && + (m.theA == '+' || m.theA == '-' || m.theA == '*' || m.theA == '/') && + (m.theB == '+' || m.theB == '-' || m.theB == '*' || m.theB == '/') { + m.putc(m.theY) + } + fallthrough + case 2: + m.theA = m.theB + if m.theA == '\'' || m.theA == '"' || m.theA == '`' { + for { + m.putc(m.theA) + m.theA = m.get() + if m.theA == m.theB { + break + } + if m.theA == '\\' { + m.putc(m.theA) + m.theA = m.get() + } + if m.theA == eof { + m.error("Unterminated string literal.") + return + } + } + } + fallthrough + case 3: + m.theB = m.next() + if m.theB == '/' && (m.theA == '(' || m.theA == ',' || m.theA == '=' || m.theA == ':' || + m.theA == '[' || m.theA == '!' || m.theA == '&' || m.theA == '|' || + m.theA == '?' || m.theA == '+' || m.theA == '-' || m.theA == '~' || + m.theA == '*' || m.theA == '/' || m.theA == '{' || m.theA == '\n') { + m.putc(m.theA) + if m.theA == '/' || m.theA == '*' { + m.putc(' ') + } + m.putc(m.theB) + for { + m.theA = m.get() + if m.theA == '[' { + for { + m.putc(m.theA) + m.theA = m.get() + if m.theA == ']' { + break + } + if m.theA == '\\' { + m.putc(m.theA) + m.theA = m.get() + } + if m.theA == eof { + m.error("Unterminated set in Regular Expression literal.") + return + } + } + } else if m.theA == '/' { + switch m.peek() { + case '/', '*': + m.error("Unterminated set in Regular Expression literal.") + return + } + break + } else if m.theA == '\\' { + m.putc(m.theA) + m.theA = m.get() + } + if m.theA == eof { + m.error("Unterminated Regular Expression literal.") + return + } + m.putc(m.theA) + } + m.theB = m.next() + } + } +} + +/* jsmin -- Copy the input to the output, deleting the characters which are + insignificant to JavaScript. Comments will be removed. Tabs will be + replaced with spaces. Carriage returns will be replaced with linefeeds. + Most spaces and linefeeds will be removed. +*/ + +func (m *minifier) run() { + if m.peek() == 0xEF { + m.get() + m.get() + m.get() + } + m.theA = '\n' + m.action(3) + for m.theA != eof { + switch m.theA { + case ' ': + if isAlphanum(m.theB) { + m.action(1) + } else { + m.action(2) + } + case '\n': + switch m.theB { + case '{', '[', '(', '+', '-', '!', '~': + m.action(1) + case ' ': + m.action(3) + default: + if isAlphanum(m.theB) { + m.action(1) + } else { + m.action(2) + } + } + default: + switch m.theB { + case ' ': + if isAlphanum(m.theA) { + m.action(1) + } else { + m.action(3) + } + case '\n': + switch m.theA { + case '}', ']', ')', '+', '-', '"', '\'', '`': + m.action(1) + default: + if isAlphanum(m.theA) { + m.action(1) + } else { + m.action(3) + } + } + default: + m.action(1) + } + } + } +} + +// Minify returns a minified script or an error. +func Minify(script []byte) (minified []byte, err error) { + var buf bytes.Buffer + w := bufio.NewWriter(&buf) + r := bufio.NewReader(bytes.NewReader(script)) + + m := new(minifier) + m.init(r, w) + m.run() + if m.err != nil { + return nil, err + } + w.Flush() + + minified = buf.Bytes() + if len(minified) > 0 && minified[0] == '\n' { + minified = minified[1:] + } + return minified, nil +} diff --git a/vendor/github.com/fragmenta/auth/LICENSE b/vendor/github.com/fragmenta/auth/LICENSE new file mode 100644 index 0000000..c6efb8b --- /dev/null +++ b/vendor/github.com/fragmenta/auth/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Mechanism Design Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/fragmenta/auth/README.md b/vendor/github.com/fragmenta/auth/README.md new file mode 100644 index 0000000..1b19d5f --- /dev/null +++ b/vendor/github.com/fragmenta/auth/README.md @@ -0,0 +1,93 @@ +# Package Auth +Package auth provides helpers for encryption, hashing and encoding. + +### Setup + +Setup the package on startup + +```Go + auth.HMACKey = auth.HexToBytes("myhmac_key_from_config") + auth.SecretKey = auth.HexToBytes("my_secret_key_from_config") + auth.SessionName = "my_cookie_name" + auth.SecureCookies = true +``` + + +### Hashed Passwords + +Use auth.HashPassword to encrypt and auth.CheckPassword to check hashed passwords (with bcrypt) + +```Go + user.HashedPassword, err = auth.HashPassword(params.Get("password") + if err != nil { + return err + } + err = auth.CheckPassword(params.Get("password"), user.HashedPassword) +``` + +### Encrypted Sessions + +Use auth.Session to set and get values from cookies, encrypted with AES GCM. + +```Go + // Build the session from the secure cookie, or create a new one + session, err := auth.Session(writer, request) + if err != nil { + return err + } + + // Store something in the session + session.Set("my_key","my_value") + session.Save(writer) +``` + + +### Random Tokens + +Generate and compare random tokens in constant time using the crypto/rand and crypto/subtle packages. + +```Go +// Generate a new token +token := auth.RandomToken(32) + +// Check tokens +if auth.CheckRandomToken(tok1,tok2) { + // Tokens match +} +``` + +## Authorisation + +You can use auth/can (separately) to authorise access to resources. + +To authorise actions: + +```Go +// Add an authorisation for admins to manage the pages resource +can.Authorise(role.Admin, can.ManageResource, "pages") +``` + +To check authorisation in handlers: + +```Go +// Check whether resource (conforming to can.Resource) +// can be managed by user (conforming to can.User) +can.Manage(resource,user) +``` + + +```Go +// Interfaces for Users and Resources + +// User defines the interface for users which must have numeric roles +type User interface { + RoleID() int64 // for role check + UserID() int64 // for ownership check +} + +// Resource defines the interface for resources +type Resource interface { + OwnedBy(int64) bool // for ownership check, passed a UserID + ResourceID() string // for check against abilities registered on this resource +} +``` diff --git a/vendor/github.com/fragmenta/auth/auth.go b/vendor/github.com/fragmenta/auth/auth.go new file mode 100644 index 0000000..18270d4 --- /dev/null +++ b/vendor/github.com/fragmenta/auth/auth.go @@ -0,0 +1,111 @@ +// Package auth provides helpers for encryption, hashing and encoding. +package auth + +import ( + "fmt" + "golang.org/x/crypto/bcrypt" + "net/http" +) + +// TODO: Add rotating cyphers on login (move to scrypt instead of bcrypt) + +// HashCost sets the cost of bcrypt hashes +// - if this changes hashed passwords would need to be recalculated. +const HashCost = 10 + +// TokenLength sets the length of random tokens used for authenticity tokens. +const TokenLength = 32 + +// CheckPassword compares a password hashed with bcrypt. +func CheckPassword(pass, hash string) error { + return bcrypt.CompareHashAndPassword([]byte(hash), []byte(pass)) +} + +// HashPassword hashes a password with a random salt using bcrypt. +func HashPassword(pass string) (string, error) { + hash, err := bcrypt.GenerateFromPassword([]byte(pass), HashCost) + return string(hash), err +} + +// AuthenticityToken returns a new token for a request, +// and if necessary sets the cookie with our secret. +func AuthenticityToken(writer http.ResponseWriter, request *http.Request) (string, error) { + // Fetch the session store + session, err := Session(writer, request) + if err != nil { + return "", err + } + // Get the secret from the session, or generate if none found + secret := session.Get(SessionTokenKey) + if secret == "" { + secret = BytesToBase64(RandomToken(TokenLength)) + session.Set(SessionTokenKey, secret) + session.Save(writer) + } + + // Now from secret, generate a secure token for this request + token := AuthenticityTokenWithSecret(Base64ToBytes(secret)) + return BytesToBase64(token), nil +} + +// CheckAuthenticityToken checks the token against that +// stored in a session cookie, and returns an error if the check fails. +func CheckAuthenticityToken(token string, request *http.Request) error { + + // Fetch the session store + session, err := SessionGet(request) + if err != nil { + return err + } + + // Get the secret from the session + secret := session.Get(SessionTokenKey) + if secret == "" { + return fmt.Errorf("auth: error fetching authenticity secret from session") + } + + return CheckAuthenticityTokenWithSecret(Base64ToBytes(token), Base64ToBytes(secret)) +} + +// CheckAuthenticityTokenWithSecret checks +// an auth token against a secret. +func CheckAuthenticityTokenWithSecret(token, secret []byte) error { + + // Check token length + if len(token) != TokenLength*2 { + return fmt.Errorf("auth: error failed - invalid token length %d", len(token)) + } + + // Grab random byte prefix, xor suffix secret against it to get our secret out, + // and compare result to secret stored in cookie + s := safeXORBytes(token[TokenLength:], token[:TokenLength]) + if CheckRandomToken(s, secret) { + return nil + } + + // If we reach here, CheckRandomToken failed + return fmt.Errorf("auth: error failed with token") +} + +// AuthenticityTokenWithSecret generates a new authenticity token +// from the secret by xoring a new random token with it +// and prepending the random bytes +// See https://github.com/rails/rails/pull/16570 +// or gorilla/csrf for justification. +func AuthenticityTokenWithSecret(secret []byte) []byte { + random := RandomToken(TokenLength) + return append(random, safeXORBytes(random, secret)...) +} + +// safeXORBytes is from https://golang.org/src/crypto/cipher/xor.go. +func safeXORBytes(a, b []byte) []byte { + n := len(a) + if len(b) < n { + n = len(b) + } + dst := make([]byte, n) + for i := 0; i < n; i++ { + dst[i] = a[i] ^ b[i] + } + return dst +} diff --git a/vendor/github.com/fragmenta/auth/can/ability.go b/vendor/github.com/fragmenta/auth/can/ability.go new file mode 100644 index 0000000..fabaea7 --- /dev/null +++ b/vendor/github.com/fragmenta/auth/can/ability.go @@ -0,0 +1,95 @@ +// Package can implements basic role-based permissions for golang +// - controlling who can.Do certain actions for a given database table. +package can + +import ( + "errors" + "fmt" + "sync" +) + +// abilities is an array of abilities +var abilities []*Ability + +// mu protects the list of abilities during access +var mu sync.RWMutex + +// Authorise adds this ability to the list of abilities for this role. +// Usage: can.Authorise(role.Admin, can.ManageResource, "pages") +func Authorise(role int64, v Verb, id string) { + ability := &Ability{role: role, verb: v, identifier: id, ownership: false} + add(ability) +} + +// AuthoriseOwner adds this ability to the list of abilities for this role +// for resources owned by this user. +// Usage: can.AuthoriseOwner(role.Reader, can.ShowResource, "pages") +func AuthoriseOwner(role int64, v Verb, id string) { + ability := &Ability{role: role, verb: v, identifier: id, ownership: true} + add(ability) +} + +// add adds this ability +func add(a *Ability) { + mu.Lock() + abilities = append(abilities, a) + mu.Unlock() +} + +// Ability represents an authorisation for an action for a given role +type Ability struct { + ownership bool + role int64 + verb Verb + identifier string +} + +// Allow returns an error if the action is not allowed, or nil if it is +func (a *Ability) Allow(v Verb, r Resource, u User) error { + + // Fail if user role doesn't match + if u == nil || a.role != u.RoleID() { + return errors.New("can: role not authorised") + } + + // Fail if resource id doesn't match + if a.identifier != Anything && a.identifier != r.ResourceID() { + return errors.New("can: resource not authorised") + } + + // Check for verb match, fail if no match + if a.verb != ManageResource && a.verb != v { + return errors.New("can: action not authorised") + } + + // If we have an ability which doesn't require ownership, return now + if !a.CheckOwner() { + return nil + } + + // Now check ownership + if r == nil || !r.OwnedBy(u.UserID()) { + return errors.New("can: action not authorised") + } + + return nil +} + +// CheckOwner returns true if this ability should check ownership +func (a *Ability) CheckOwner() bool { + // If the verb is to create or list, we can do no ownership check + if a.verb == CreateResource || a.verb == ListResource { + return false + } + // If the resource is anything, we do not check ownership + if a.identifier == Anything { + return false + } + // If the ability does not require ownership, return false + return a.ownership +} + +// String returns a string description of this ability. +func (a *Ability) String() string { + return fmt.Sprintf("%v %d can %v on %s\n", a.ownership, a.role, a.verb, a.identifier) +} diff --git a/vendor/github.com/fragmenta/auth/can/can.go b/vendor/github.com/fragmenta/auth/can/can.go new file mode 100644 index 0000000..a7ad165 --- /dev/null +++ b/vendor/github.com/fragmenta/auth/can/can.go @@ -0,0 +1,88 @@ +package can + +import ( + "fmt" +) + +// User defines the interface for users which must have numeric roles +type User interface { + RoleID() int64 // for role check + UserID() int64 // for ownership check +} + +// Resource defines the interface for resources +type Resource interface { + OwnedBy(int64) bool // for ownership check, passed a UserID + ResourceID() string // for check against abilities registered on this resource +} + +// Verb represents the action taken on resources +type Verb int + +// Verbs used to authorise actions on resources. +// Manages allows any action on a resource, +// and all verbs after Creates check ownership of the resource with OwnedBy(). +const ( + ManageResource = iota + ListResource // Does not check ownership + CreateResource // Does not check ownership + ShowResource + UpdateResource + DestroyResource +) + +// Resource identifier used to short-circuit checks on resource identity in conjuction with ManageResource +const ( + Anything = "*" // Allow actions on any resource +) + +// Do returns an error if this action is not allowed, or nil if it is allowed +func Do(v Verb, r Resource, u User) error { + + // Check abilities for a match + mu.RLock() + for _, a := range abilities { + + // If no err, return nil to signify success + if a.Allow(v, r, u) == nil { + return nil + } + } + mu.RUnlock() + + // If we reach here, no matching authorisation was found - note u may be nil + return fmt.Errorf("can: no authorisation for action:%v %v %v", v, r, u) +} + +// The following are wrapper functions for can.Do to provide a more elegant interface +// i.e. calling can.Manage(u,r) + +// Manage returns an error if all actions are not authorised for this user +func Manage(r Resource, u User) error { + return Do(ManageResource, r, u) +} + +// Create returns an error if this action is not authorised for this user +func Create(r Resource, u User) error { + return Do(CreateResource, r, u) +} + +// List returns an error if this action is not authorised for this user +func List(r Resource, u User) error { + return Do(ListResource, r, u) +} + +// Show returns an error if this action is not authorised for this user +func Show(r Resource, u User) error { + return Do(ShowResource, r, u) +} + +// Update returns an error if this action is not authorised for this user +func Update(r Resource, u User) error { + return Do(UpdateResource, r, u) +} + +// Destroy returns an error if this action is not authorised for this user +func Destroy(r Resource, u User) error { + return Do(DestroyResource, r, u) +} diff --git a/vendor/github.com/fragmenta/auth/deprecated.go b/vendor/github.com/fragmenta/auth/deprecated.go new file mode 100644 index 0000000..4d22e91 --- /dev/null +++ b/vendor/github.com/fragmenta/auth/deprecated.go @@ -0,0 +1,41 @@ +package auth + +import ( + "encoding/base64" + "fmt" + "golang.org/x/crypto/bcrypt" +) + +// These DEPRECATED functions should not be used +// and will be removed in 2.0 + +// CheckCSRFToken DEPRECATED +// this function will be removed in 2.0 +func CheckCSRFToken(token, b64 string) error { + // First base64 decode the value + encrypted := make([]byte, 256) + _, err := base64.URLEncoding.Decode(encrypted, []byte(b64)) + if err != nil { + return err + } + + return bcrypt.CompareHashAndPassword(encrypted, []byte(token)) +} + +// CSRFToken DEPRECATED +// this function will be removed in 2.0 +func CSRFToken(token string) (string, error) { + b, err := bcrypt.GenerateFromPassword([]byte(token), HashCost) + if err != nil { + return "", err + } + + return base64.URLEncoding.EncodeToString(b), nil +} + +// EncryptPassword renamed and DEPRECATED +// this function will be removed in 2.0 +func EncryptPassword(pass string) (string, error) { + fmt.Printf("Please use HashPassword instead, auth.EncryptPassword is deprecated") + return HashPassword(pass) +} diff --git a/vendor/github.com/fragmenta/auth/encode.go b/vendor/github.com/fragmenta/auth/encode.go new file mode 100644 index 0000000..f52f31c --- /dev/null +++ b/vendor/github.com/fragmenta/auth/encode.go @@ -0,0 +1,90 @@ +package auth + +import ( + "bytes" + "crypto/subtle" + "encoding/base64" + "encoding/gob" + "encoding/hex" + "fmt" + "hash" +) + +// HexToBytes converts a hex string representation of bytes to a byte representation +func HexToBytes(h string) []byte { + s, err := hex.DecodeString(h) + if err != nil { + s = []byte("") + } + return s +} + +// BytesToHex converts bytes to a hex string representation of bytes +func BytesToHex(b []byte) string { + return hex.EncodeToString(b) +} + +// Base64ToBytes converts from a b64 string to bytes +func Base64ToBytes(h string) []byte { + s, err := base64.URLEncoding.DecodeString(h) + if err != nil { + s = []byte("") + } + return s +} + +// BytesToBase64 converts bytes to a base64 string representation +func BytesToBase64(b []byte) string { + return base64.URLEncoding.EncodeToString(b) +} + +// CreateMAC creates a MAC. +func CreateMAC(h hash.Hash, value []byte) []byte { + h.Write(value) + return h.Sum(nil) +} + +// VerifyMAC verifies the MAC is valid with ConstantTimeCompare. +func VerifyMAC(h hash.Hash, value []byte, mac []byte) error { + m := CreateMAC(h, value) + if subtle.ConstantTimeCompare(mac, m) == 1 { + return nil + } + return fmt.Errorf("Invalid MAC:%s", string(m)) +} + +// encodeBase64 encodes a value using base64. +func encodeBase64(value []byte) []byte { + encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value))) + base64.URLEncoding.Encode(encoded, value) + return encoded +} + +// decodeBase64 decodes a value using base64. +func decodeBase64(value []byte) ([]byte, error) { + decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value))) + b, err := base64.URLEncoding.Decode(decoded, value) + if err != nil { + return nil, err + } + return decoded[:b], nil +} + +// serialize encodes a value using gob. +func serialize(src interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + if err := enc.Encode(src); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// deserialize decodes a value using gob. +func deserialize(src []byte, dst interface{}) error { + dec := gob.NewDecoder(bytes.NewBuffer(src)) + if err := dec.Decode(dst); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/fragmenta/auth/encrypt.go b/vendor/github.com/fragmenta/auth/encrypt.go new file mode 100644 index 0000000..77f286b --- /dev/null +++ b/vendor/github.com/fragmenta/auth/encrypt.go @@ -0,0 +1,73 @@ +package auth + +// Provides symmetric authenticated encryption +// using 256-bit AES-GCM with a random nonce. + +// This code was taken from cryptopasta by George Tankersley +// https://github.com/gtank/cryptopasta +// +// Written in 2015 by George Tankersley +// +// To the extent possible under law, the author(s) have dedicated all copyright +// and related and neighboring rights to this software to the public domain +// worldwide. This software is distributed without any warranty. +// +// You should have received a copy of the CC0 Public Domain Dedication along +// with this software. If not, see // . +// This modified copy released under an MIT license (see LICENSE). + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + "io" +) + +// Encrypt encrypts data using 256-bit AES-GCM. This both hides the content of +// the data and provides a check that it hasn't been altered. Output takes the +// form nonce|ciphertext|tag where '|' indicates concatenation. +func Encrypt(plaintext []byte, key []byte) (ciphertext []byte, err error) { + block, err := aes.NewCipher(key[:]) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + nonce := make([]byte, gcm.NonceSize()) + _, err = io.ReadFull(rand.Reader, nonce) + if err != nil { + return nil, err + } + + return gcm.Seal(nonce, nonce, plaintext, nil), nil +} + +// Decrypt decrypts data using 256-bit AES-GCM. This both hides the content of +// the data and provides a check that it hasn't been altered. Expects input +// form nonce|ciphertext|tag where '|' indicates concatenation. +func Decrypt(ciphertext []byte, key []byte) (plaintext []byte, err error) { + block, err := aes.NewCipher(key[:]) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + if len(ciphertext) < gcm.NonceSize() { + return nil, errors.New("malformed ciphertext") + } + + return gcm.Open(nil, + ciphertext[:gcm.NonceSize()], + ciphertext[gcm.NonceSize():], + nil, + ) +} diff --git a/vendor/github.com/fragmenta/auth/random.go b/vendor/github.com/fragmenta/auth/random.go new file mode 100644 index 0000000..a09f922 --- /dev/null +++ b/vendor/github.com/fragmenta/auth/random.go @@ -0,0 +1,29 @@ +package auth + +import ( + "crypto/rand" + "crypto/subtle" + "fmt" +) + +// RandomToken generates a random token 32 bytes long, +// or at a specified length if arguments are provided. +func RandomToken(args ...int) []byte { + length := 32 + if len(args) > 0 && args[0] != 0 { + length = args[0] + } + b := make([]byte, length) + _, err := rand.Read(b) + if err != nil { + fmt.Println("error reading random token:", err) + return nil + } + return b +} + +// CheckRandomToken performs a comparison of two tokens +// resistant to timing attacks. +func CheckRandomToken(a, b []byte) bool { + return (subtle.ConstantTimeCompare(a, b) == 1) +} diff --git a/vendor/github.com/fragmenta/auth/session.go b/vendor/github.com/fragmenta/auth/session.go new file mode 100644 index 0000000..1487ead --- /dev/null +++ b/vendor/github.com/fragmenta/auth/session.go @@ -0,0 +1,276 @@ +package auth + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "errors" + "fmt" + "net/http" + "strconv" + "time" +) + +// This secure cookie code is based on Gorilla secure cookie +// but with mandatory AES-GCM encryption. + +// MaxAge is the age in seconds of a cookie before it expires, default 60 days. +var MaxAge = 86400 * 60 + +// MaxCookieSize is the maximum length of a cookie in bytes, defaults to 4096. +var MaxCookieSize = 4096 + +// HMACKey is a 32 byte key for generating HMAC distinct from SecretKey. +var HMACKey []byte + +// SecretKey is a 32 byte key for encrypting content with AES-GCM. +var SecretKey []byte + +// SessionName is the name of the ssions. +var SessionName = "fragmenta_session" + +// SessionUserKey is the session user key. +var SessionUserKey = "user_id" + +// SessionTokenKey is the session token key. +var SessionTokenKey = "authenticity_token" + +// SecureCookies is true if we use secure https cookies. +var SecureCookies = false + +// SessionStore is the interface for a session store. +type SessionStore interface { + Get(string) string + Set(string, string) + Load(request *http.Request) error + Save(http.ResponseWriter) error + Clear(http.ResponseWriter) +} + +// CookieSessionStore is a concrete version of SessionStore, +// which stores the information encrypted in cookies. +type CookieSessionStore struct { + values map[string]string +} + +// Session loads the current sesions or returns a new blank session. +func Session(writer http.ResponseWriter, request *http.Request) (SessionStore, error) { + + s, err := SessionGet(request) + if err != nil { + return s, nil + } + + return s, nil +} + +// SessionGet loads the current session (if any) +func SessionGet(request *http.Request) (SessionStore, error) { + + // Return the current session store from cookie or a new one if none found + s := &CookieSessionStore{ + values: make(map[string]string, 0), + } + + if len(HMACKey) == 0 || len(SecretKey) == 0 || len(SessionTokenKey) == 0 { + return s, errors.New("auth: secrets not initialised") + } + + // Check if the session exists and load it + err := s.Load(request) + if err != nil { + return s, fmt.Errorf("auth: error loading session: %s", err) // return blank session if none found + } + + return s, nil +} + +// ClearSession clears the current session cookie +func ClearSession(w http.ResponseWriter) { + // First delete all Set-Cookie headers so we only have one + w.Header().Del("Set-Cookie") + + cookie := &http.Cookie{ + Name: SessionName, + Value: "", + MaxAge: -1, + Path: "/", + } + + http.SetCookie(w, cookie) +} + +// Get a value from the session. +func (s *CookieSessionStore) Get(key string) string { + return s.values[key] +} + +// Set a value in the session, this does not save to the cookie. +func (s *CookieSessionStore) Set(key string, value string) { + s.values[key] = value +} + +// Load the session from cookie. +func (s *CookieSessionStore) Load(request *http.Request) error { + + // Return if session name not defined + if SessionName == "" { + return fmt.Errorf("auth: error session_name not set") + } + + cookie, err := request.Cookie(SessionName) + if err != nil { + return fmt.Errorf("auth: error getting cookie: %s", err) + } + + // Read the encrypted values back out into our values in the session. + err = s.Decode(SessionName, HMACKey, SecretKey, cookie.Value, &s.values) + if err != nil { + return fmt.Errorf("auth: error decoding session: %s", err) + } + + return nil +} + +// Save the session to a cookie. +func (s *CookieSessionStore) Save(writer http.ResponseWriter) error { + + // Return error if session name not defined + if SessionName == "" { + return fmt.Errorf("auth: error session_name not set") + } + + encrypted, err := s.Encode(SessionName, s.values, HMACKey, SecretKey) + if err != nil { + return fmt.Errorf("auth: error encoding session: %s", err) + } + + cookie := &http.Cookie{ + Name: SessionName, + Value: encrypted, + HttpOnly: true, + Secure: SecureCookies, + Path: "/", + Expires: time.Now().AddDate(0, 0, 7), // Expires in seven days + } + + http.SetCookie(writer, cookie) + + return nil +} + +// Clear the session values from the cookie. +func (s *CookieSessionStore) Clear(writer http.ResponseWriter) { + cookie := &http.Cookie{ + Name: SessionName, + Value: "", + MaxAge: -1, + Path: "/", + } + + http.SetCookie(writer, cookie) +} + +// Encode a given value in the session cookie. +func (s *CookieSessionStore) Encode(name string, value interface{}, hashKey []byte, secretKey []byte) (string, error) { + + if name == "" || hashKey == nil || secretKey == nil { + return "", errors.New("auth: encode keys not set") + } + + // Serialize + b, err := serialize(value) + if err != nil { + return "", fmt.Errorf("auth: error serializing value: %s", err) + } + + // Encrypt with AES/GCM + b, err = Encrypt(b, secretKey) + if err != nil { + return "", fmt.Errorf("auth: error encrypting value: %s", err) + } + + // Encode to base64 + b = encodeBase64(b) + + // Note Encrypt above also verifies now with GCM. + // Create MAC for "name|date|value". Extra pipe unused. + now := time.Now().UTC().Unix() + b = []byte(fmt.Sprintf("%s|%d|%s|", name, now, b)) + mac := CreateMAC(hmac.New(sha256.New, hashKey), b[:len(b)-1]) + + // Append mac, remove name + b = append(b, mac...)[len(name)+1:] + + // Encode to base64 again + b = encodeBase64(b) + + // Check length when encoded + if MaxCookieSize != 0 && len(b) > MaxCookieSize { + return "", fmt.Errorf("auth: error len over max cookie size: %d", MaxCookieSize) + } + + // Done, convert to string and return + return string(b), nil +} + +// Decode the value in the session cookie. +func (s *CookieSessionStore) Decode(name string, hashKey []byte, secretKey []byte, value string, dst interface{}) error { + + if name == "" || hashKey == nil || secretKey == nil { + return errors.New("auth: decode keys not set") + } + + if MaxCookieSize != 0 && len(value) > MaxCookieSize { + return errors.New("auth: cookie value is too long") + } + + // Decode from base64 + b, err := decodeBase64([]byte(value)) + if err != nil { + return fmt.Errorf("auth: error decoding base 64 value: %s", err) + } + + // Verify MAC - value is "date|value|mac" + parts := bytes.SplitN(b, []byte("|"), 3) + if len(parts) != 3 { + return errors.New("auth: MAC invalid") + } + h := hmac.New(sha256.New, hashKey) + b = append([]byte(name+"|"), b[:len(b)-len(parts[2])-1]...) + err = VerifyMAC(h, b, parts[2]) + if err != nil { + return err + } + + // Verify date ranges + timestamp, err := strconv.ParseInt(string(parts[0]), 10, 64) + if err != nil { + return errors.New("auth: timestamp invalid") + } + now := time.Now().UTC().Unix() + if MaxAge != 0 && timestamp < now-int64(MaxAge) { + return errors.New("auth: timestamp expired") + } + + // Decode from base64 + b, err = decodeBase64(parts[1]) + if err != nil { + return fmt.Errorf("auth: error decoding value: %s", err) + } + + // Derypt with AES + b, err = Decrypt(b, secretKey) + if err != nil { + return fmt.Errorf("auth: error decrypting value: %s", err) + } + + // Deserialize + err = deserialize(b, dst) + if err != nil { + return fmt.Errorf("auth: error deserializing value: %s", err) + } + + // Done. + return nil +} diff --git a/vendor/github.com/fragmenta/mux/LICENSE b/vendor/github.com/fragmenta/mux/LICENSE new file mode 100644 index 0000000..2c12fd6 --- /dev/null +++ b/vendor/github.com/fragmenta/mux/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Mechanism Design Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fragmenta/mux/README.md b/vendor/github.com/fragmenta/mux/README.md new file mode 100644 index 0000000..ecd00fa --- /dev/null +++ b/vendor/github.com/fragmenta/mux/README.md @@ -0,0 +1,121 @@ +# Fragmenta Multiplexer (mux) + +Fragmenta mux is a replacement the standard http.ServeMux which offers a few additional features and improved efficiency. Features are very similar to gorilla/mux but with a few additions, and it is compatible with the standard http.Handler interface or handlers returning error. + +It offers the following features: + +* Named paramaters including regexp matches for params (e.g. {id:\d+} to match id only to one or more numerals) +* Delayed param parsing (url,query,form) with utility functions for extracting Int, Bool, Float params. +* Routes are evaluated strictly in order - add important routes first and catch-alls at the end +* Zero allocations when matching means low-memory use and responses as fast as httprouter for static routes +* A cache in front of route matching speeds up responses (under 100ns/op in a simple static case) +* Low memory usage (even with cache) +* Accepts either the standard http.Handler interface or mux.Handler (same but with error return) +* Add middleware http.HandlerFunc for chaining standard Go middleware for auth, logging etc. + +It does not offer: + +* Nested routes or groups + + +## Install + +Perform the usual incantation: + +```sh + go get -u github.com/fragmenta/mux +``` + +## Usage + +Usage is as you'd expect if you've used the stdlib mux or gorilla mux. You can use the mux.Add/Get/Post to add handlers which return an error, or mux.AddHandler to add a stdlib http.HandlerFunc. + +```go + +func main() { + m := mux.New() + m.Get(`/`,homeHandler) + m.Get(`/users`,users.HandleIndex) + m.Post(`/users`,users.HandleCreate) + m.Post(`/users/{id:\d+}/update`,users.HandleUpdate) + http.Handle("/", r) +} + + +``` + +## Errors + +Because of the handler signature returning errors, you can set an ErrorHandler which is called if an error occurs inside one of your handlers, and a FileHandler which is called for serving files if no route is found. This makes handling errors more elegant, instead of this: + + +```go + +if err != nil { + log.Printf("error occured:%s",err) + // .. do something to handle and display to user + return +} + +``` + +you can do this in your handlers: + +```go + +if err != nil { + return err +} + +``` + +and display errors in a consistent way using your ErrorHandler function (you can also return a custom error type from handlers as fragmenta does to send more information than just error). + + +## Params + +Parsing of params is delayed until you require them in your handler - no parsing is done until that point. When you do require them, just parse params as follows, and a full params object will be available with a map of all params from urls, and form bodies. Multipart file forms are parsed automatically and the files made available for use. + +```go + +// Parse params (any url, query and form params) +params,err := mux.Params(request) +if err != nil { + return err +} + +params.Values["key"][4] +params.Get("my_query_key") +params.GetInt("user_id") +params.GetFloat("float") +params.GetBool("bool") +params.GetDate("published_at","2017-01-02") + +for _,fh := range params.Files { + +} + +``` + +## Benchmarks + +Speed isn't everything (see the list of features above), but it is important the router doesn't slow down request times, particularly if you have a lot of urls to match. For benchmarks against a few popular routers, see https://github.com/kennygrant/routebench + +Performance is adequate: + +``` + +BenchmarkStatic/stdlib_mux-4 1000 1946545 ns/op 20619 B/op 537 allocs/op +BenchmarkStatic/gorilla_mux-4 1000 1846382 ns/op 115648 B/op 1578 allocs/op +BenchmarkStatic/fragmenta_mux-4 100000 13969 ns/op 0 B/op 0 allocs/op +BenchmarkStatic/httprouter_mux-4 100000 16240 ns/op 0 B/op 0 allocs/op + +BenchmarkGithubFuzz/stdlib_mux-4 300 4592686 ns/op 35767 B/op 902 allocs/op +BenchmarkGithubFuzz/gorilla_mux-4 100 12931693 ns/op 246784 B/op 2590 allocs/op +BenchmarkGithubFuzz/fragmenta_mux-4 5000 324911 ns/op 7617 B/op 136 allocs/op +BenchmarkGithubFuzz/httprouter_mux-4 10000 101702 ns/op 23791 B/op 296 allocs/op + + +``` + + diff --git a/vendor/github.com/fragmenta/mux/handlers.go b/vendor/github.com/fragmenta/mux/handlers.go new file mode 100644 index 0000000..c756da4 --- /dev/null +++ b/vendor/github.com/fragmenta/mux/handlers.go @@ -0,0 +1,33 @@ +package mux + +import ( + "fmt" + "io" + "net/http" +) + +// fileHandler is the default static file handler called if there is no route. +func fileHandler(w http.ResponseWriter, r *http.Request) error { + // Just return a not found error + // Set the headers + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusNotFound) + + // Write a simple error message page - omit error details for security reasons + html := fmt.Sprintf("

404 Not Found Error

") + io.WriteString(w, html) + return nil +} + +// errHandler is a simple built-in error handler which writes the error string to context.Writer +// users of the mux should override this with their own handler. +func errHandler(w http.ResponseWriter, r *http.Request, err error) { + + // Set the headers + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusInternalServerError) + + // Write a simple error message page - omit error details for security reasons + html := fmt.Sprintf("

500 Internal Error

") + io.WriteString(w, html) +} diff --git a/vendor/github.com/fragmenta/mux/mux.go b/vendor/github.com/fragmenta/mux/mux.go new file mode 100644 index 0000000..b909920 --- /dev/null +++ b/vendor/github.com/fragmenta/mux/mux.go @@ -0,0 +1,226 @@ +package mux + +import ( + "net/http" + "strings" + "sync" +) + +// HandlerFunc defines a std net/http HandlerFunc, but which returns an error. +type HandlerFunc func(w http.ResponseWriter, r *http.Request) error + +// ErrorHandlerFunc defines a HandlerFunc which accepts an error and displays it. +type ErrorHandlerFunc func(w http.ResponseWriter, r *http.Request, err error) + +// Middleware is a handler that wraps another handler +type Middleware func(http.HandlerFunc) http.HandlerFunc + +// Route defines the interface routes are expected to conform to. +type Route interface { + // Match against URL + MatchMethod(string) bool + MatchMaybe(string) bool + Match(string) bool + + // Handler returns the handler to execute + Handler() HandlerFunc + + // Parse the URL for params according to pattern + Parse(string) map[string]string + + // Set accepted methods + Get() Route + Post() Route + Put() Route + Delete() Route + Methods(...string) Route +} + +// MaxCacheEntries defines the maximum number of entries in the request->route cache +// 0 means caching is turned off +var MaxCacheEntries = 500 + +// mux is a private variable which is set only once on startup. +var mux *Mux + +// SetDefault sets the default mux on the package for use in parsing params +// we could instead decorate each request with a reference to the Route +// but this means extra allocations for each request, +// when almost all apps require only one mux. +func SetDefault(m *Mux) { + if mux == nil { + mux = m + + // Set our router to handle all routes + http.Handle("/", mux) + } +} + +// Mux handles http requests by selecting a handler +// and passing the request to it. +// Routes are evaluated in the order they were added. +// Before the request reaches the handler +// it is passed through the middleware chain. +type Mux struct { + cache map[string]Route + cacheMu sync.RWMutex + + routes []Route + handlerFuncs []Middleware + + // See httptrace for best way to instrument + ErrorHandler ErrorHandlerFunc + FileHandler HandlerFunc + RedirectWWW bool +} + +// New returns a new mux +func New() *Mux { + m := &Mux{ + RedirectWWW: false, + FileHandler: fileHandler, + ErrorHandler: errHandler, + cache: make(map[string]Route, MaxCacheEntries), + } + + return m +} + +// ServeHTTP implements net/http.Handler. +func (m *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // If redirect www is set, test host + if m.RedirectWWW && strings.HasPrefix(r.Host, "www.") { + redirect := strings.Replace("https://"+r.Host+r.URL.String(), "www.", "", 1) + http.Redirect(w, r, redirect, http.StatusMovedPermanently) + } + + // Avoid iteration if possible + if len(m.handlerFuncs) == 0 { + m.RouteRequest(w, r) + return + } + h := m.RouteRequest + for _, mh := range m.handlerFuncs { + h = mh(h) + } + h(w, r) +} + +// RouteRequest is the final endpoint of all requests +func (m *Mux) RouteRequest(w http.ResponseWriter, r *http.Request) { + // Match a route + route := m.Match(r) + if route == nil { + err := m.FileHandler(w, r) + if err != nil { + m.ErrorHandler(w, r, err) + } + return + } + + // Execute the route + err := route.Handler()(w, r) + if err != nil { + m.ErrorHandler(w, r, err) + } + +} + +// Match finds the route (if any) which matches this request +func (m *Mux) Match(r *http.Request) Route { + // Handle nil request + if r == nil { + return nil + } + + // Check if we have a cached result for this same path + if MaxCacheEntries > 0 { + m.cacheMu.RLock() + route, ok := m.cache[requestCacheKey(r)] + m.cacheMu.RUnlock() + // This check is necessary as we only use the request url for the cache key + // this means we get a cache miss on some identical routes with diff methods. + if ok && route.MatchMethod(r.Method) { + return route + } + } + + // Routes are checked in order against the request path + for _, route := range m.routes { + // Test with probabalistic match + if route.MatchMaybe(r.URL.Path) { + // Test on method + if route.MatchMethod(r.Method) { + // Test exact match (may be expensive regexp) + if route.Match(r.URL.Path) { + m.cacheRoute(requestCacheKey(r), route) + return route + } + } + + } + } + + return nil +} + +// Return a key suitable for storing this request in our cache. +// NB: To avoid allocations we do not include every permutation in the cache +// so routes returned must be checked against request. +func requestCacheKey(r *http.Request) string { + return r.URL.Path +} + +// cacheRoute saves the route with key provided +func (m *Mux) cacheRoute(key string, r Route) { + if MaxCacheEntries == 0 { + return // MaxCacheEntries is 0 so cache is off + } + m.cacheMu.Lock() + // If cache is too big, evict + if len(m.cache) > MaxCacheEntries { + m.cache = make(map[string]Route, MaxCacheEntries) + } + // Fill the cache for this key -> route pair + m.cache[key] = r + m.cacheMu.Unlock() +} + +// AddMiddleware adds a middleware function, this should be done before +// starting the server as it remakes our chain of middleware. +// This prepends to our chain of middleware +func (m *Mux) AddMiddleware(middleware Middleware) { + m.handlerFuncs = append([]Middleware{middleware}, m.handlerFuncs...) +} + +// AddHandler adds a route for this pattern using a +// stdlib http.HandlerFunc which does not return an error. +func (m *Mux) AddHandler(pattern string, handler http.HandlerFunc) Route { + return m.Add(pattern, func(w http.ResponseWriter, r *http.Request) error { + handler(w, r) + return nil + }) +} + +// Add adds a route for this request with the default methods (GET/HEAD) +// Route is returned so that method functions can be chained +func (m *Mux) Add(pattern string, handler HandlerFunc) Route { + route, err := NewRoute(pattern, handler) + if err != nil { + // errors should be rare, but log them to stdout for debug + println("mux: error parsing route:%s", pattern) + } + + m.routes = append(m.routes, route) + return route +} + +// Get adds a route for this pattern/hanlder with the default methods (GET/HEAD) +func (m *Mux) Get(pattern string, handler HandlerFunc) Route { + return m.Add(pattern, handler) +} + +// Post adds a route for this pattern/hanlder with method http.PostMethod +func (m *Mux) Post(pattern string, handler HandlerFunc) Route { + return m.Add(pattern, handler).Post() +} diff --git a/vendor/github.com/fragmenta/mux/params.go b/vendor/github.com/fragmenta/mux/params.go new file mode 100644 index 0000000..dc64edc --- /dev/null +++ b/vendor/github.com/fragmenta/mux/params.go @@ -0,0 +1,386 @@ +package mux + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math" + "mime/multipart" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// Params returns a new set of params parsed from the request. +func Params(r *http.Request) (*RequestParams, error) { + return ParamsWithMux(mux, r) +} + +// ParamsWithMux returns params for a given mux and request +func ParamsWithMux(m *Mux, r *http.Request) (*RequestParams, error) { + params := &RequestParams{ + Values: make(url.Values, 0), + Files: make(map[string][]*multipart.FileHeader, 0), + } + + // Find the route for request + route := mux.Match(r) + if route == nil { + return nil, errors.New("mux: could not find route for request") + } + + // Parse the request path params first + urlParams := route.Parse(r.URL.Path) + for k, v := range urlParams { + params.Set(k, []string{v}) + } + + // Add query string params from request + queryParams := r.URL.Query() + for k, v := range queryParams { + params.Add(k, v) + } + + // If the body is empty, return now without error + if r.Body == nil { + return params, nil + } + + // Parse based on content type + contentType := r.Header.Get("Content-Type") + + if strings.HasPrefix(contentType, "application/x-www-form-urlencoded") { + err := r.ParseForm() + if err != nil { + return nil, err + } + for k, v := range r.Form { + params.Add(k, v) + } + + } else if strings.HasPrefix(contentType, "multipart/form-data") { + err := r.ParseMultipartForm(20 << 20) // 20MB + if err != nil { + return nil, err + } + + // Add the form values + for k, v := range r.MultipartForm.Value { + params.Add(k, v) + } + + // Add the form files + for k, v := range r.MultipartForm.File { + params.Files[k] = v + } + } + + return params, nil +} + +// ParamsJSON returns a new set of params parsed from the request (json included, for testing). +// This is a temporary method for testing json parsing, we should add this capability to Params() +func ParamsJSON(r *http.Request) (*RequestParams, error) { + + params := &RequestParams{ + Values: make(url.Values, 0), + Files: make(map[string][]*multipart.FileHeader, 0), + } + + // Find the route for request + route := mux.Match(r) + if route == nil { + return nil, errors.New("mux: could not find route for request") + } + + // Parse the request path params first + urlParams := route.Parse(r.URL.Path) + for k, v := range urlParams { + params.Set(k, []string{v}) + } + + // Add query string params from request + queryParams := r.URL.Query() + for k, v := range queryParams { + params.Add(k, v) + } + + // If the body is empty, return now without error + if r.Body == nil { + return params, nil + } + + // Parse based on content type + contentType := r.Header.Get("Content-Type") + + if strings.HasPrefix(contentType, "application/x-www-form-urlencoded") { + err := r.ParseForm() + if err != nil { + return nil, err + } + for k, v := range r.Form { + params.Add(k, v) + } + + } else if strings.HasPrefix(contentType, "multipart/form-data") { + err := r.ParseMultipartForm(20 << 20) // 20MB + if err != nil { + return nil, err + } + + // Add the form values + for k, v := range r.MultipartForm.Value { + params.Add(k, v) + } + + // Add the form files + for k, v := range r.MultipartForm.File { + params.Files[k] = v + } + } else if strings.HasPrefix(contentType, "application/json") { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return params, err + } + + // If no body provided, return straight away + if len(body) == 0 { + return params, err + } + + rawData := map[string]interface{}{} + if err := json.Unmarshal(body, &rawData); err != nil { + return params, err + } + + // Convert the underlying type of the JSON values + for k, v := range rawData { + setParam(params, k, v) + } + } + + return params, nil +} + +// setParam converts the underlying type of JSON values to strings that we can add +// to the given params. Also handles arrays of strings, floats, ints, and booleans. +func setParam(params *RequestParams, k string, v interface{}) { + switch v.(type) { + case int64: + params.Values.Add(k, fmt.Sprint(v)) + case float64: + // JSON doesn't handle integers so we get floats + // interpret as integer if we don't lose information + f := v.(float64) + if f == math.Trunc(f) { + // Add as integer + params.Values.Add(k, fmt.Sprint(int64(f))) + } else { + // Add as float + params.Values.Add(k, fmt.Sprint(f)) + } + case string, bool: + params.Values.Add(k, fmt.Sprint(v)) + case []interface{}: + + // If this is an array, iterate and call setParam recursively to find the underlying + // type of each element in the array + for _, i := range v.([]interface{}) { + setParam(params, k, i) + } + default: + params.Values.Add(k, "") + } +} + +// RequestParams parses all params in a request and stores them in Values +// this includes: +// path params (from route) +// query params (from request) +// body params (from form request bodies) +type RequestParams struct { + Values url.Values + Files map[string][]*multipart.FileHeader +} + +// Map returns a flattened map of params with only one entry for each key, +// rather than the array of values Request params allow. +func (p *RequestParams) Map() map[string]string { + flat := make(map[string]string) + + for k, v := range p.Values { + flat[k] = v[0] + } + + return flat +} + +// Set sets this key to these values, removing any other entries. +func (p *RequestParams) Set(key string, values []string) { + p.Values[key] = values +} + +// SetString sets this key to this single string value, removing any other entries. +func (p *RequestParams) SetString(key string, v string) { + p.Set(key, []string{v}) +} + +// SetInt sets this key to this single string value, removing any other entries. +func (p *RequestParams) SetInt(key string, v int64) { + p.Set(key, []string{fmt.Sprintf("%d", v)}) +} + +// Add appends these values to this key, without removing any other entries. +func (p *RequestParams) Add(key string, values []string) { + p.Values[key] = append(p.Values[key], values...) +} + +// Delete all values associated with the key. +func (p *RequestParams) Delete(key string) { + delete(p.Values, key) +} + +// Exists returns true if this key exists in Values +func (p *RequestParams) Exists(key string) bool { + _, ok := p.Values[key] + return ok +} + +// Get returns the first value for this key or a blank string if no entry. +func (p *RequestParams) Get(key string) string { + v, ok := p.Values[key] + if !ok { + return "" + } + return v[0] +} + +// GetStrings returns all string values associated with this key +// if there are no values associated an empty array is returned +func (p *RequestParams) GetStrings(key string) []string { + return p.Values[key] +} + +// GetDate returns the first value associated with a given key as a time, +// using the given time format. +func (p *RequestParams) GetDate(key string, format string) (time.Time, error) { + v := p.Get(key) + return time.Parse(format, v) +} + +// GetInt returns the first value associated with the given key as an integer. +// If there is no value or a parse error, it returns 0 +// If the string contains non-numeric characters, it is truncated from +// the first non-numeric character. +func (p *RequestParams) GetInt(key string) int64 { + var i int64 + v := p.Get(key) + // We truncate the string at the first non-numeric character + v = v[0 : strings.LastIndexAny(v, "0123456789")+1] + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0 + } + return i +} + +// GetInts returns all values associated with the key as an array of integers. +func (p *RequestParams) GetInts(key string) []int64 { + ints := []int64{} + + for _, v := range p.Values[key] { + vi, err := strconv.ParseInt(v, 10, 64) + if err != nil { + vi = 0 + } + ints = append(ints, vi) + } + + return ints +} + +// GetUniqueInts returns all unique non-zero int values +// associated with the given key as an array of integers +func (p *RequestParams) GetUniqueInts(key string) []int64 { + ints := []int64{} + + for _, v := range p.Values[key] { + if string(v) == "" { + continue // ignore blank ints + } + vi, err := strconv.ParseInt(v, 10, 64) + if err != nil { + vi = 0 + } + + // Do not insert 0, or duplicate entries + if vi > 0 && !contains(ints, vi) { + ints = append(ints, vi) + } + } + + return ints +} + +// GetIntsString returns all values associated with the key +// as a comma separated string. +func (p *RequestParams) GetIntsString(key string) string { + ints := "" + + for _, v := range p.Values[key] { + if "" == string(v) { + continue // ignore blank ints + } + + if len(ints) > 0 { + ints += "," + string(v) + } else { + ints += string(v) + } + + } + + return ints +} + +// GetFloat returns the first value associated with the key as an integer. +// If there is no value or a parse error, it returns 0.0 +func (p *RequestParams) GetFloat(key string) float64 { + var value float64 + v := p.Get(key) + // Remove percent signs from float values + v = strings.Replace(v, "%", "", -1) + value, err := strconv.ParseFloat(v, 64) + if err != nil { + return 0.0 + } + return value +} + +// GetFloats returns all values associated with the key as an array of floats. +func (p *RequestParams) GetFloats(key string) []float64 { + var values []float64 + for _, v := range p.Values[key] { + // Remove percent signs from float values + v = strings.Replace(v, "%", "", -1) + value, err := strconv.ParseFloat(v, 64) + if err != nil { + value = 0.0 + } + values = append(values, value) + } + return values +} + +// contains returns true if this array of ints contains the given int +func contains(list []int64, item int64) bool { + for _, b := range list { + if b == item { + return true + } + } + return false +} diff --git a/vendor/github.com/fragmenta/mux/route.go b/vendor/github.com/fragmenta/mux/route.go new file mode 100644 index 0000000..0a5905f --- /dev/null +++ b/vendor/github.com/fragmenta/mux/route.go @@ -0,0 +1,272 @@ +package mux + +import ( + "bytes" + "fmt" + "net/http" + "regexp" + "strings" +) + +// NewRoute returns a new Route of our default type. +func NewRoute(pattern string, handler HandlerFunc) (Route, error) { + return NewPrefixRoute(pattern, handler) +} + +// NewNaiveRoute creates a new Route, given a pattern to match and a handler for the route. +func NewNaiveRoute(pattern string, handler HandlerFunc) (Route, error) { + r := &NaiveRoute{} + err := r.Setup(pattern, handler) + return r, err +} + +// NewPrefixRoute creates a new PrefixRoute, given a pattern to match and a handler for the route. +func NewPrefixRoute(pattern string, handler HandlerFunc) (Route, error) { + r := &PrefixRoute{} + err := r.Setup(pattern, handler) + return r, err +} + +// NaiveRoute holds a pattern which matches a route and params within it, +// and an associated handler which will be called when the route matches. +type NaiveRoute struct { + pattern string + handler HandlerFunc + methods []string + paramNames []string + regexp *regexp.Regexp +} + +// Handler returns our handlerfunc. +func (r *NaiveRoute) Handler() HandlerFunc { + return r.handler +} + +// Setup sets up the route from a pattern +func (r *NaiveRoute) Setup(p string, h HandlerFunc) error { + // Allow GET and HEAD by default + r.methods = []string{http.MethodGet, http.MethodHead} + r.handler = h + r.pattern = p + + // Parse regexp once on startup + return r.compileRegexp() +} + +// Handle calls the handler with the writer and request. +func (r *NaiveRoute) Handle(w http.ResponseWriter, req *http.Request) error { + return r.handler(w, req) +} + +// MatchMethod returns true if our list of methods contains method +func (r *NaiveRoute) MatchMethod(method string) bool { + + for _, v := range r.methods { + if v == method { + return true + } + // Treat "" as GET + if method == "" && v == http.MethodGet { + return true + } + } + + return false +} + +// MatchMaybe returns false if the path definitely is not MatchMethod +// or true/maybe if it *may* match. +func (r *NaiveRoute) MatchMaybe(path string) bool { + return r.Match(path) // Just cheat and do a full match on base class +} + +// Match returns true if this route matches the path given. +func (r *NaiveRoute) Match(path string) bool { + + // If we have a short pattern match, and we have a regexp, check against that + if r.regexp != nil { + return r.regexp.MatchString(path) + } + + // If no regexp, check for exact string match against pattern + return (r.pattern == path) +} + +// Get sets the method exclusively to GET +func (r *NaiveRoute) Get() Route { + return r.Method(http.MethodGet) +} + +// Post sets the method exclusively to POST +func (r *NaiveRoute) Post() Route { + return r.Method(http.MethodPost) +} + +// Put sets the method exclusively to PUT +func (r *NaiveRoute) Put() Route { + return r.Method(http.MethodPut) +} + +// Delete sets the method exclusively to DELETE +func (r *NaiveRoute) Delete() Route { + return r.Method(http.MethodDelete) +} + +// Method sets the method exclusively to method +func (r *NaiveRoute) Method(method string) Route { + r.methods = []string{method} + return r +} + +// Methods sets the methods allowed as an array +func (r *NaiveRoute) Methods(permitted ...string) Route { + r.methods = permitted + return r +} + +// Pattern returns the string pattern for the route +func (r *NaiveRoute) Pattern() string { + return r.pattern +} + +// String returns the route formatted as a string +func (r *NaiveRoute) String() string { + return fmt.Sprintf("%s %s", r.methods[0], r.pattern) +} + +// Parse parses this path given our regexp and returns a map of URL params. +func (r *NaiveRoute) Parse(path string) map[string]string { + + // Set up our params map + params := make(map[string]string, 0) + + // If called on a nil route, return empty params + if r == nil || r.regexp == nil || len(r.paramNames) == 0 { + return params + } + + // Find a set of matches, and for each match set the entry in our map. + matches := r.regexp.FindStringSubmatch(path) + + if matches != nil { + for i, key := range r.paramNames { + index := i + 1 + if len(matches) > index { + value := matches[index] + params[key] = value + } + } + } + + return params +} + +// compileRegexp compiles our route format to a true regexp +// Both name and regexp are required - routes should be well structured and restrictive by default +// Convert the pattern from the form /pages/{id:[0-9]*}/edit +// to one suitable for regexp - /pages/([0-9]*)/edit +// We want to match things like this: +// /pages/{id:[0-9]*}/edit +// /pages/{id:[0-9]*}/edit?param=test +func (r *NaiveRoute) compileRegexp() (err error) { + + // First return if no regexp + if !strings.Contains(r.pattern, "{") { + return nil + } + + // Check if it is well-formed. + idxs, errBraces := r.findBraces(r.pattern) + if errBraces != nil { + return errBraces + } + + pattern := bytes.NewBufferString("^") + end := 0 + + // Walk through indexes two at a time + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := r.pattern[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(r.pattern[idxs[i]+1:end-1], ":", 2) + if len(parts) != 2 { + return fmt.Errorf("Missing name or pattern in %s", raw) + } + + // Add the name to params in order of finding + r.paramNames = append(r.paramNames, parts[0]) + + // Add the real regexp + fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), parts[1]) + + } + // Add the remaining pattern + pattern.WriteString(regexp.QuoteMeta(r.pattern[end:])) + r.regexp, err = regexp.Compile(pattern.String()) + + return err +} + +// findBraces returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +// This method of parsing regexp is based on gorilla mux. +func (r *NaiveRoute) findBraces(s string) ([]int, error) { + var level, idx int + var idxs []int + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("Route error: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("Route error: unbalanced braces in %q", s) + } + return idxs, nil +} + +// PrefixRoute uses a static prefix to reject route matches quickly. +type PrefixRoute struct { + NaiveRoute + index int +} + +// Setup sets up the pattern prefix for the Prefix route. +func (r *PrefixRoute) Setup(p string, h HandlerFunc) error { + + // Record the prefix len up to the first regexp (if any) + r.index = strings.Index(p, "{") + + // Finish setup with NaiveRoute + return r.NaiveRoute.Setup(p, h) +} + +// MatchMaybe returns false if the path definitely is not MatchMethod +// or true/maybe if it *may* match. +func (r *PrefixRoute) MatchMaybe(path string) bool { + + // If no prefix we are static, so can safely match absolutely + if r.index < 0 { + return path == r.pattern + } + + // Reject with a string comparison of static prefix with path + // HasPrefix checks on length first so it is fast. + // If this returns yes, we are really saying maybe + // and require a further check with Match(). + return strings.HasPrefix(path, r.pattern[:r.index]) +} + +// String returns the route formatted as a string. +func (r *PrefixRoute) String() string { + return fmt.Sprintf("%s %s (prefix:%s)", r.methods[0], r.pattern, r.pattern[:r.index]) +} diff --git a/vendor/github.com/fragmenta/query/LICENSE b/vendor/github.com/fragmenta/query/LICENSE new file mode 100644 index 0000000..c6efb8b --- /dev/null +++ b/vendor/github.com/fragmenta/query/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Mechanism Design Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/fragmenta/query/README.md b/vendor/github.com/fragmenta/query/README.md new file mode 100644 index 0000000..ed78a2c --- /dev/null +++ b/vendor/github.com/fragmenta/query/README.md @@ -0,0 +1,116 @@ +Query [![GoDoc](https://godoc.org/github.com/fragmenta/query?status.svg)](https://godoc.org/github.com/fragmenta/query) [![Go Report Card](https://goreportcard.com/badge/github.com/fragmenta/query)](https://goreportcard.com/report/github.com/fragmenta/query) +===== + + + +Query lets you build SQL queries with chainable methods, and defer execution of SQL until you wish to extract a count or array of models. It will probably remain limited in scope - it is not intended to be a full ORM with strict mapping between db tables and structs, but a tool for querying the database with minimum friction, and performing CRUD operations linked to models; simplifying your use of SQL to store model data without getting in the way. Full or partial SQL queries are of course also available, and full control over sql. Model creation and column are delegated to the model, to avoid dictating any particular model structure or interface, however a suggested interface is given (see below and in tests), which makes usage painless in your handlers without any boilerplate. + +Supported databases: PostgreSQL, SQLite, MySQL. Bug fixes, suggestions and contributions welcome. + +Usage +===== + + +```go + +// In your app - open a database with options +options := map[string]string{"adapter":"postgres","db":"query_test"} +err := query.OpenDatabase(options) +defer query.CloseDatabase() + +... + +// In your model +type Page struct { + ID int64 + CreatedAt time.Time + UpdatedAt time.Time + MyField myStruct + ... + // Models can have any structure, any PK, here an int is used +} + +// Normally you'd define helpers on your model class to load rows from the database +// Query does not attempt to read data into columns with reflection or tags - +// that is left to your model so you can read as little or as much as you want from queries + +func Find(ID int64) (*Page, error) { + result, err := PagesQuery().Where("id=?", ID).FirstResult() + if err != nil { + return nil, err + } + return NewWithColumns(result), nil +} + +func FindAll(q *Query) ([]*Page, error) { + results, err := q.Results() + if err != nil { + return nil, err + } + + var models []*Page + for _, r := range results { + m := NewWithColumns(r) + models = append(models, m) + } + + return models, nil +} + +... + +// In your handlers, construct queries and ask your models for the data + +// Find a simple model by id +page, err := pages.Find(1) + +// Start querying the database using chained finders +q := page.Query().Where("id IN (?,?)",4,5).Order("id desc").Limit(30) + +// Build up chains depending on other app logic, still no db requests +if shouldRestrict { + q.Where("id > ?",3).OrWhere("keywords ~* ?","Page") +} + +// Pass the relation around, until you are ready to retrieve models from the db +results, err := pages.FindAll(q) +``` + +What it does +============ + +* Builds chainable queries including where, orwhere,group,having,order,limit,offset or plain sql +* Allows any Primary Key/Table name or model fields (query.New lets you define this) +* Allows Delete and Update operations on queried records, without creating objects +* Defers SQL requests until full query is built and results requested +* Provide helpers and return results for join ids, counts, single rows, or multiple rows + + +What it doesn't do +================== + +* Attempt to read your models with reflection or struct tags +* Require changes to your structs like tagging fields or specific fields +* Cause problems with untagged fields, embedding, and fields not in the database +* Provide hooks after/before update etc - your models are fully in charge of queries and their lifecycle + + + +Tests +================== + +All 3 databases supported have a test suite - to run the tests, create a database called query_test in mysql and psql then run go test at the root of the package. The sqlite tests are disabled by default because enabling them prohibits cross compilation, which is useful if you don't want to install go on your server but just upload a binary compiled locally. + +```bash +go test +``` + + + +Versions +================== + +- 1.0 - First version with interfaces and chainable finders +- 1.0.1 - Updated to quote table names and fields, for use of reserved words, bug fix for mysql concurrency +- 1.3 - updated API, now shifted instantiation to models instead, to avoid use of reflection +- 1.3.1 - Fixed bugs in Mysql import, updated tests diff --git a/vendor/github.com/fragmenta/query/adapters/database.go b/vendor/github.com/fragmenta/query/adapters/database.go new file mode 100644 index 0000000..6ce457d --- /dev/null +++ b/vendor/github.com/fragmenta/query/adapters/database.go @@ -0,0 +1,152 @@ +// Package adapters offers adapters for pouplar databases +package adapters + +import ( + "database/sql" + "fmt" + "time" +) + +// Database provides an interface for database adapters to conform to +type Database interface { + + // Open and close + Open(opts map[string]string) error + Close() error + SQLDB() *sql.DB + + // Execute queries with or without returned rows + Exec(query string, args ...interface{}) (sql.Result, error) + Query(query string, args ...interface{}) (*sql.Rows, error) + + // Insert a record, returning id + Insert(sql string, args ...interface{}) (id int64, err error) + + // Return extra SQL for insert statement (see psql) + InsertSQL(pk string) string + + // A format string for the arg placeholder + Placeholder(i int) string + + // Quote Table and Column names + QuoteField(name string) string + + // Convert a time to a string + TimeString(t time.Time) string + + // Convert a string to a time + ParseTime(s string) (time.Time, error) +} + +// Adapter is a struct defining a few functions used by all adapters +type Adapter struct { + queries map[string]interface{} +} + +// ReplaceArgPlaceholder does no replacements by default, and use default ? placeholder for args +// psql requires a different placeholder numericall labelled +func (db *Adapter) ReplaceArgPlaceholder(sql string, args []interface{}) string { + return sql +} + +// Placeholder is the argument placeholder for this adapter +func (db *Adapter) Placeholder(i int) string { + return "?" +} + +// TimeString - given a time, return the standard string representation +func (db *Adapter) TimeString(t time.Time) string { + return t.Format("2006-01-02 15:04:05.000 -0700") +} + +// ParseTime - given a string, return a time object built from it +func (db *Adapter) ParseTime(s string) (time.Time, error) { + + // Deal with broken mysql dates - deal better with this? + if s == "0000-00-00 00:00:00" { + return time.Now(), nil + } + + // Try to choose the right format for date string + format := "2006-01-02 15:04:05" + if len(s) > len(format) { + format = "2006-01-02 15:04:05.000" + } + if len(s) > len(format) { + format = "2006-01-02 15:04:05.000 -0700" + } + + t, err := time.Parse(format, s) + if err != nil { + fmt.Println("Unhandled field type:", s, "\n", err) + } + + return t, err +} + +// QuoteField quotes a table name or column name +func (db *Adapter) QuoteField(name string) string { + return fmt.Sprintf(`"%s"`, name) +} + +// InsertSQL provides extra SQL for end of insert statement (RETURNING for psql) +func (db *Adapter) InsertSQL(pk string) string { + return "" +} + +// performQuery executes Query SQL on the given sqlDB and return the rows. +// NB caller must call use defer rows.Close() with rows returned +func (db *Adapter) performQuery(sqlDB *sql.DB, debug bool, query string, args ...interface{}) (*sql.Rows, error) { + + if sqlDB == nil { + return nil, fmt.Errorf("No database available.") + } + + if debug { + fmt.Println("QUERY:", query, "ARGS", args) + } + + // This should be cached, perhaps hold a map in memory of queries strings and compiled queries? + // use queries map to store this + stmt, err := sqlDB.Prepare(query) + if err != nil { + return nil, err + } + defer stmt.Close() + + rows, err := stmt.Query(args...) + + if err != nil { + return nil, err + } + + // Caller is responsible for closing rows with defer rows.Close() + return rows, err +} + +// performExec executes Query SQL on the given sqlDB with no rows returned, just result +func (db *Adapter) performExec(sqlDB *sql.DB, debug bool, query string, args ...interface{}) (sql.Result, error) { + + if sqlDB == nil { + return nil, fmt.Errorf("No database available.") + } + + if debug { + fmt.Println("QUERY:", query, "ARGS", args) + } + + stmt, err := sqlDB.Prepare(query) + if err != nil { + return nil, err + } + defer stmt.Close() + + result, err := stmt.Exec(args...) + + if err != nil { + return result, err + } + + // Caller is responsible for closing rows with defer rows.Close() + return result, err +} diff --git a/vendor/github.com/fragmenta/query/adapters/database_mysql.go b/vendor/github.com/fragmenta/query/adapters/database_mysql.go new file mode 100644 index 0000000..f8cd29b --- /dev/null +++ b/vendor/github.com/fragmenta/query/adapters/database_mysql.go @@ -0,0 +1,131 @@ +package adapters + +import ( + "database/sql" + "fmt" + + // Mysql driver + _ "github.com/go-sql-driver/mysql" +) + +// MysqlAdapter conforms to the query.Database interface +type MysqlAdapter struct { + *Adapter + options map[string]string + sqlDB *sql.DB + debug bool +} + +// Open this database +func (db *MysqlAdapter) Open(opts map[string]string) error { + + db.debug = false + db.options = map[string]string{ + "adapter": "mysql", + "user": "root", // sub your user + "password": "", + "db": "query_test", + "protocol": "tcp", + "host": "localhost", + "port": "3306", + "params": "charset=utf8&parseTime=true", + } + + if opts["debug"] == "true" { + db.debug = true + } + + // Merge options + for k, v := range opts { + db.options[k] = v + } + + // A typical connection string is of the form: + //"user:password@tcp(localhost:3306)/dbname?charset=utf8&parseTime=true") + options := fmt.Sprintf("%s:%s@%s(%s:%s)/%s?%s", + db.options["user"], + db.options["password"], + db.options["protocol"], + db.options["host"], + db.options["port"], + db.options["db"], + db.options["params"]) + + var err error + db.sqlDB, err = sql.Open(db.options["adapter"], options) + if err != nil { + return err + } + + if db.sqlDB == nil { + fmt.Printf("Mysql options:%s", options) + return fmt.Errorf("\nError creating database with options: %v", db.options) + } + + // Call ping on the db to check it does actually exist! + err = db.sqlDB.Ping() + if err != nil { + return err + } + + return err + +} + +// Close the database +func (db *MysqlAdapter) Close() error { + if db.sqlDB != nil { + return db.sqlDB.Close() + } + return nil +} + +// SQLDB returns the internal db.sqlDB pointer +func (db *MysqlAdapter) SQLDB() *sql.DB { + return db.sqlDB +} + +// Query SQL execute - NB caller must call use defer rows.Close() with rows returned +func (db *MysqlAdapter) Query(query string, args ...interface{}) (*sql.Rows, error) { + return db.performQuery(db.sqlDB, db.debug, query, args...) +} + +// Exec - use this for non-select statements +func (db *MysqlAdapter) Exec(query string, args ...interface{}) (sql.Result, error) { + return db.performExec(db.sqlDB, db.debug, query, args...) +} + +// QuoteField quotes a table name or column name +func (db *MysqlAdapter) QuoteField(name string) string { + return fmt.Sprintf("`%s`", name) +} + +// Insert a record with params and return the id - psql behaves differently +func (db *MysqlAdapter) Insert(query string, args ...interface{}) (id int64, err error) { + + tx, err := db.sqlDB.Begin() + if err != nil { + return 0, err + } + + // Execute the sql using db + result, err := db.Exec(query, args...) + if err != nil { + return 0, err + } + + // TODO - check this works on mysql under load with concurrent connections + // fine if connection not shared + id, err = result.LastInsertId() + if err != nil { + return 0, err + } + + err = tx.Commit() + if err != nil { + return 0, err + } + + return id, nil + +} diff --git a/vendor/github.com/fragmenta/query/adapters/database_psql.go b/vendor/github.com/fragmenta/query/adapters/database_psql.go new file mode 100644 index 0000000..c6d6921 --- /dev/null +++ b/vendor/github.com/fragmenta/query/adapters/database_psql.go @@ -0,0 +1,124 @@ +package adapters + +import ( + "database/sql" + "fmt" + + // psql driver + _ "github.com/lib/pq" +) + +// PostgresqlAdapter conforms to the query.Database interface +type PostgresqlAdapter struct { + *Adapter + options map[string]string + sqlDB *sql.DB + debug bool +} + +// Open this database with the given options +// opts map keys:adapter, user, password, db, host, port, params (give extra parameters in the params option) +// Additional options available are detailed in the pq driver docs at +// https://godoc.org/github.com/lib/pq +func (db *PostgresqlAdapter) Open(opts map[string]string) error { + + db.debug = false + db.options = map[string]string{ + "adapter": "postgres", + "user": "", + "password": "", + "db": "", + "host": "localhost", // for unix instead of tcp use path - see driver + "port": "5432", // default PSQL port + "params": "sslmode=disable connect_timeout=60", // disable sslmode for localhost, set timeout + } + + if opts["debug"] == "true" { + db.debug = true + } + + // Merge options + for k, v := range opts { + db.options[k] = v + } + + // Default to psql database on localhost on port 5432, typical connection string: + // user=server password=p host=localhost port=5432 dbname=db sslmode=disable + // See https://godoc.org/github.com/lib/pq for options, use params to override defaults if required + optionString := fmt.Sprintf("user=%s %s host=%s port=%s dbname=%s %s", + db.options["user"], + paramOrBlank("password", db.options["password"]), + db.options["host"], + db.options["port"], + db.options["db"], + db.options["params"]) + + var err error + db.sqlDB, err = sql.Open(db.options["adapter"], optionString) + if err != nil { + return err + } + + // Call ping on the db to check it does actually exist! + err = db.sqlDB.Ping() + if err != nil { + return err + } + + if db.sqlDB != nil && db.debug { + fmt.Printf("Database %s opened using %s\n", db.options["db"], db.options["adapter"]) + } + + return nil + +} + +func paramOrBlank(k, v string) string { + if len(v) > 0 { + return fmt.Sprintf("%s=%s", k, v) + } + return "" +} + +// Close the database +func (db *PostgresqlAdapter) Close() error { + if db.sqlDB != nil { + return db.sqlDB.Close() + } + return nil +} + +// SQLDB returns the internal db.sqlDB pointer +func (db *PostgresqlAdapter) SQLDB() *sql.DB { + return db.sqlDB +} + +// Query executes query SQL - NB caller must call use defer rows.Close() with rows returned +func (db *PostgresqlAdapter) Query(query string, args ...interface{}) (*sql.Rows, error) { + return db.performQuery(db.sqlDB, db.debug, query, args...) +} + +// Exec - use this for non-select statements +func (db *PostgresqlAdapter) Exec(query string, args ...interface{}) (sql.Result, error) { + return db.performExec(db.sqlDB, db.debug, query, args...) +} + +// Placeholder returns the db placeholder +func (db *PostgresqlAdapter) Placeholder(i int) string { + return fmt.Sprintf("$%d", i) +} + +// InsertSQL is extra SQL for end of insert statement (RETURNING for psql) +func (db *PostgresqlAdapter) InsertSQL(pk string) string { + return fmt.Sprintf("RETURNING %s", pk) +} + +// Insert a record with params and return the id +func (db *PostgresqlAdapter) Insert(sql string, args ...interface{}) (id int64, err error) { + + // TODO - handle different types of id, not just int + // Execute the sql using db and retrieve new row id + row := db.sqlDB.QueryRow(sql, args...) + err = row.Scan(&id) + return id, err +} diff --git a/vendor/github.com/fragmenta/query/adapters/database_sqlite.go b/vendor/github.com/fragmenta/query/adapters/database_sqlite.go new file mode 100644 index 0000000..35e90db --- /dev/null +++ b/vendor/github.com/fragmenta/query/adapters/database_sqlite.go @@ -0,0 +1,101 @@ +package adapters + +// FIXME: Sqlite drivers are broken compiling with cgo at present +// therefore we don't use this adapter + +import ( + "database/sql" + "fmt" + // Unfortunately can't cross compile with sqlite support enabled - + // see https://github.com/mattn/go-sqlite3/issues/106 + // For now for we just turn off sqlite as we don't use it in production... + // pure go version of sqlite, or ditch sqlite and find some other pure go simple db + // would be nice not to require a db at all for very simple usage + //_ "github.com/mattn/go-sqlite3" +) + +// SqliteAdapter conforms to the query.Database interface +type SqliteAdapter struct { + *Adapter + options map[string]string + sqlDB *sql.DB + debug bool +} + +// Open this database +func (db *SqliteAdapter) Open(opts map[string]string) error { + + db.debug = false + db.options = map[string]string{ + "adapter": "sqlite3", + "db": "./tests/query_test.sqlite", + } + + if opts["debug"] == "true" { + db.debug = true + } + + for k, v := range opts { + db.options[k] = v + } + + var err error + db.sqlDB, err = sql.Open(db.options["adapter"], db.options["db"]) + if err != nil { + return err + } + + if db.sqlDB != nil && db.debug { + fmt.Printf("Database %s opened using %s\n", db.options["db"], db.options["adapter"]) + } + + // Call ping on the db to check it does actually exist! + err = db.sqlDB.Ping() + if err != nil { + return err + } + + return err + +} + +// Close the database +func (db *SqliteAdapter) Close() error { + if db.sqlDB != nil { + return db.sqlDB.Close() + } + return nil +} + +// SQLDB returns the internal db.sqlDB pointer +func (db *SqliteAdapter) SQLDB() *sql.DB { + return db.sqlDB +} + +// Query execute Query SQL - NB caller must call use defer rows.Close() with rows returned +func (db *SqliteAdapter) Query(query string, args ...interface{}) (*sql.Rows, error) { + return db.performQuery(db.sqlDB, db.debug, query, args...) +} + +// Exec - use this for non-select statements +func (db *SqliteAdapter) Exec(query string, args ...interface{}) (sql.Result, error) { + return db.performExec(db.sqlDB, db.debug, query, args...) +} + +// Insert a record with params and return the id - psql behaves differently +func (db *SqliteAdapter) Insert(query string, args ...interface{}) (id int64, err error) { + + // Execute the sql using db + result, err := db.Exec(query, args...) + if err != nil { + return 0, err + } + + id, err = result.LastInsertId() + if err != nil { + return 0, err + } + + return id, nil + +} diff --git a/vendor/github.com/fragmenta/query/database.go b/vendor/github.com/fragmenta/query/database.go new file mode 100644 index 0000000..0cbf4fe --- /dev/null +++ b/vendor/github.com/fragmenta/query/database.go @@ -0,0 +1,84 @@ +package query + +import ( + "database/sql" + "fmt" + "time" + + "github.com/fragmenta/query/adapters" +) + +// database is the package global db - this reference is not exported outside the package. +var database adapters.Database + +// OpenDatabase opens the database with the given options +func OpenDatabase(opts map[string]string) error { + + // If we already have a db, return it + if database != nil { + return fmt.Errorf("query: database already open - %s", database) + } + + // Assign the db global in query package + switch opts["adapter"] { + case "sqlite3": + database = &adapters.SqliteAdapter{} + case "mysql": + database = &adapters.MysqlAdapter{} + case "postgres": + database = &adapters.PostgresqlAdapter{} + default: + database = nil // fail + } + + if database == nil { + return fmt.Errorf("query: database adapter not recognised - %s", opts) + } + + // Ask the db adapter to open + return database.Open(opts) +} + +// CloseDatabase closes the database opened by OpenDatabase +func CloseDatabase() error { + var err error + if database != nil { + err = database.Close() + database = nil + } + + return err +} + +// SetMaxOpenConns sets the maximum number of open connections +func SetMaxOpenConns(max int) { + database.SQLDB().SetMaxOpenConns(max) +} + +// QuerySQL executes the given sql Query against our database, with arbitrary args +func QuerySQL(query string, args ...interface{}) (*sql.Rows, error) { + if database == nil { + return nil, fmt.Errorf("query: QuerySQL called with nil database") + } + results, err := database.Query(query, args...) + return results, err +} + +// ExecSQL executes the given sql against our database with arbitrary args +// NB returns sql.Result - not to be used when rows expected +func ExecSQL(query string, args ...interface{}) (sql.Result, error) { + if database == nil { + return nil, fmt.Errorf("query: ExecSQL called with nil database") + } + results, err := database.Exec(query, args...) + return results, err +} + +// TimeString returns a string formatted as a time for this db +// if the database is nil, an empty string is returned. +func TimeString(t time.Time) string { + if database != nil { + return database.TimeString(t) + } + return "" +} diff --git a/vendor/github.com/fragmenta/query/query.go b/vendor/github.com/fragmenta/query/query.go new file mode 100644 index 0000000..37437f1 --- /dev/null +++ b/vendor/github.com/fragmenta/query/query.go @@ -0,0 +1,798 @@ +// Package query lets you build and execute SQL chainable queries against a database of your choice, and defer execution of SQL until you wish to extract a count or array of models. + +// NB in order to allow cross-compilation, we exlude sqlite drivers by default +// uncomment them to allow use of sqlite + +package query + +import ( + "database/sql" + "fmt" + "sort" + "strconv" + "strings" +) + +// FIXME - this package global should in theory be protected by a mutex, even if it is only for debugging + +// Debug sets whether we output debug statements for SQL +var Debug bool + +func init() { + Debug = false // default to false +} + +// Result holds the results of a query as map[string]interface{} +type Result map[string]interface{} + +// Func is a function which applies effects to queries +type Func func(q *Query) *Query + +// Query provides all the chainable relational query builder methods +type Query struct { + + // Database - database name and primary key, set with New() + tablename string + primarykey string + + // SQL - Private fields used to store sql before building sql query + sql string + sel string + join string + where string + group string + having string + order string + offset string + limit string + + // Extra args to be substituted in the *where* clause + args []interface{} +} + +// New builds a new Query, given the table and primary key +func New(t string, pk string) *Query { + + // If we have no db, return nil + if database == nil { + return nil + } + + q := &Query{ + tablename: t, + primarykey: pk, + } + + return q +} + +// Exec the given sql and args against the database directly +// Returning sql.Result (NB not rows) +func Exec(sql string, args ...interface{}) (sql.Result, error) { + results, err := database.Exec(sql, args...) + return results, err +} + +// Rows executes the given sql and args against the database directly +// Returning sql.Rows +func Rows(sql string, args ...interface{}) (*sql.Rows, error) { + results, err := database.Query(sql, args...) + return results, err +} + +// Copy returns a new copy of this query which can be mutated without affecting the original +func (q *Query) Copy() *Query { + return &Query{ + tablename: q.tablename, + primarykey: q.primarykey, + sql: q.sql, + sel: q.sel, + join: q.join, + where: q.where, + group: q.group, + having: q.having, + order: q.order, + offset: q.offset, + limit: q.limit, + args: q.args, + } +} + +// TODO: These should instead be something like query.New("table_name").Join(a,b).Insert() and just have one multiple function? + +// InsertJoin inserts a join clause on the query +func (q *Query) InsertJoin(a int64, b int64) error { + return q.InsertJoins([]int64{a}, []int64{b}) +} + +// InsertJoins using an array of ids (more general version of above) +// This inserts joins for every possible relation between the ids +func (q *Query) InsertJoins(a []int64, b []int64) error { + + // Make sure we have some data + if len(a) == 0 || len(b) == 0 { + return fmt.Errorf("Null data for joins insert %s", q.table()) + } + + // Check for null entries in start of data - this is not a good idea. + // if a[0] == 0 || b[0] == 0 { + // return fmt.Errorf("Zero data for joins insert %s", q.table()) + // } + + values := "" + for _, av := range a { + for _, bv := range b { + // NB no zero values allowed, we simply ignore zero values + if av != 0 && bv != 0 { + values += fmt.Sprintf("(%d,%d),", av, bv) + } + + } + } + + values = strings.TrimRight(values, ",") + + sql := fmt.Sprintf("INSERT into %s VALUES %s;", q.table(), values) + + if Debug { + fmt.Printf("JOINS SQL:%s\n", sql) + } + + _, err := database.Exec(sql) + return err +} + +// UpdateJoins updates the given joins, using the given id to clear joins first +func (q *Query) UpdateJoins(id int64, a []int64, b []int64) error { + + if Debug { + fmt.Printf("SetJoins %s %s=%d: %v %v \n", q.table(), q.pk(), id, a, b) + } + + // First delete any existing joins + err := q.Where(fmt.Sprintf("%s=?", q.pk()), id).Delete() + if err != nil { + return err + } + + // Now join all a's with all b's by generating joins for each possible combination + + // Make sure we have data in both cases, otherwise do not attempt insert any joins + if len(a) > 0 && len(b) > 0 { + // Now insert all new ids - NB the order of arguments here MUST match the order in the table + err = q.InsertJoins(a, b) + if err != nil { + return err + } + } + + return nil +} + +// Insert inserts a record in the database +func (q *Query) Insert(params map[string]string) (int64, error) { + + // Insert and retrieve ID in one step from db + sql := q.insertSQL(params) + + if Debug { + fmt.Printf("INSERT SQL:%s %v\n", sql, valuesFromParams(params)) + } + + id, err := database.Insert(sql, valuesFromParams(params)...) + if err != nil { + return 0, err + } + + return id, nil +} + +// insertSQL sets the insert sql for update statements, turn params into sql i.e. "col"=? +// NB we always use parameterized queries, never string values. +func (q *Query) insertSQL(params map[string]string) string { + var cols, vals []string + + for i, k := range sortedParamKeys(params) { + cols = append(cols, database.QuoteField(k)) + vals = append(vals, database.Placeholder(i+1)) + } + query := fmt.Sprintf("INSERT INTO %s (%s) VALUES(%s) %s;", q.table(), strings.Join(cols, ","), strings.Join(vals, ","), database.InsertSQL(q.pk())) + + return query +} + +// Update one model specified in this query - the column names MUST be verified in the model +func (q *Query) Update(params map[string]string) error { + // We should check the query has a where limitation to avoid updating all? + // pq unfortunately does not accept limit(1) here + return q.UpdateAll(params) +} + +// Delete one model specified in this relation +func (q *Query) Delete() error { + // We should check the query has a where limitation? + return q.DeleteAll() +} + +// UpdateAll updates all models specified in this relation +func (q *Query) UpdateAll(params map[string]string) error { + // Create sql for update from ALL params + q.Select(fmt.Sprintf("UPDATE %s SET %s", q.table(), querySQL(params))) + + // Execute, after PREpending params to args + // in an update statement, the where comes at the end + q.args = append(valuesFromParams(params), q.args...) + + if Debug { + fmt.Printf("UPDATE SQL:%s\n%v\n", q.QueryString(), valuesFromParams(params)) + } + + _, err := q.Result() + + return err +} + +// DeleteAll delets *all* models specified in this relation +func (q *Query) DeleteAll() error { + + q.Select(fmt.Sprintf("DELETE FROM %s", q.table())) + + if Debug { + fmt.Printf("DELETE SQL:%s <= %v\n", q.QueryString(), q.args) + } + + // Execute + _, err := q.Result() + + return err +} + +// Count fetches a count of model objects (executes SQL). +func (q *Query) Count() (int64, error) { + + // In order to get consistent results, we use the same query builder + // but reset select to simple count select + + // Store the previous select and set + s := q.sel + countSelect := fmt.Sprintf("SELECT COUNT(%s) FROM %s", q.pk(), q.table()) + q.Select(countSelect) + + // Store the previous order (minus order by) and set to empty + // Order must be blank on count because of limited select + o := strings.Replace(q.order, "ORDER BY ", "", 1) + q.order = "" + + // Fetch count from db for our sql with count select and no order set + var count int64 + rows, err := q.Rows() + if err != nil { + return 0, fmt.Errorf("Error querying database for count: %s\nQuery:%s", err, q.QueryString()) + } + + // We expect just one row, with one column (count) + defer rows.Close() + for rows.Next() { + err = rows.Scan(&count) + if err != nil { + return 0, err + } + } + + // Reset select after getting count query + q.Select(s) + q.Order(o) + q.reset() + + return count, err +} + +// Result executes the query against the database, returning sql.Result, and error (no rows) +// (Executes SQL) +func (q *Query) Result() (sql.Result, error) { + results, err := database.Exec(q.QueryString(), q.args...) + return results, err +} + +// Rows executes the query against the database, and return the sql rows result for this query +// (Executes SQL) +func (q *Query) Rows() (*sql.Rows, error) { + results, err := database.Query(q.QueryString(), q.args...) + return results, err +} + +// FirstResult executes the SQL and returrns the first result +func (q *Query) FirstResult() (Result, error) { + + // Set a limit on the query + q.Limit(1) + + // Fetch all results (1) + results, err := q.Results() + if err != nil { + return nil, err + } + + if len(results) == 0 { + return nil, fmt.Errorf("No results found for Query:%s", q.QueryString()) + } + + // Return the first result + return results[0], nil +} + +// ResultInt64 returns the first result from a query stored in the column named col as an int64. +func (q *Query) ResultInt64(c string) (int64, error) { + result, err := q.FirstResult() + if err != nil || result[c] == nil { + return 0, err + } + var i int64 + switch result[c].(type) { + case int64: + i = result[c].(int64) + case int: + i = int64(result[c].(int)) + case float64: + i = int64(result[c].(float64)) + case string: + f, err := strconv.ParseFloat(result[c].(string), 64) + if err != nil { + return i, err + } + i = int64(f) + } + + return i, nil +} + +// ResultFloat64 returns the first result from a query stored in the column named col as a float64. +func (q *Query) ResultFloat64(c string) (float64, error) { + result, err := q.FirstResult() + if err != nil || result[c] == nil { + return 0, err + } + var f float64 + switch result[c].(type) { + case float64: + f = result[c].(float64) + case int: + f = float64(result[c].(int)) + case int64: + f = float64(result[c].(int)) + case string: + f, err = strconv.ParseFloat(result[c].(string), 64) + if err != nil { + return f, err + } + } + + return f, nil +} + +// Results returns an array of results +func (q *Query) Results() ([]Result, error) { + + // Make an empty result set map + var results []Result + + // Fetch rows from db for our sql + rows, err := q.Rows() + + if err != nil { + return results, fmt.Errorf("Error querying database for rows: %s\nQUERY:%s", err, q) + } + + // Close rows before returning + defer rows.Close() + + // Fetch the columns from the database + cols, err := rows.Columns() + if err != nil { + return results, fmt.Errorf("Error fetching columns: %s\nQUERY:%s\nCOLS:%s", err, q, cols) + } + + // For each row, construct an entry in results with a map of column string keys to values + for rows.Next() { + result, err := scanRow(cols, rows) + if err != nil { + return results, fmt.Errorf("Error fetching row: %s\nQUERY:%s\nCOLS:%s", err, q, cols) + } + results = append(results, result) + } + + return results, nil +} + +// ResultIDs returns an array of ids as the result of a query +// FIXME - this should really use the query primary key, not "id" hardcoded +func (q *Query) ResultIDs() []int64 { + var ids []int64 + if Debug { + fmt.Printf("#info ResultIDs:%s\n", q.DebugString()) + } + results, err := q.Results() + if err != nil { + return ids + } + + for _, r := range results { + if r["id"] != nil { + ids = append(ids, r["id"].(int64)) + } + } + + return ids +} + +// ResultIDSets returns a map from a values to arrays of b values, the order of a,b is respected not the table key order +func (q *Query) ResultIDSets(a, b string) map[int64][]int64 { + idSets := make(map[int64][]int64, 0) + + results, err := q.Results() + if err != nil { + return idSets + } + + for _, r := range results { + if r[a] != nil && r[b] != nil { + av := r[a].(int64) + bv := r[b].(int64) + idSets[av] = append(idSets[av], bv) + } + } + if Debug { + fmt.Printf("#info ResultIDSets:%s\n", q.DebugString()) + } + return idSets +} + +// QueryString builds a query string to use for results +func (q *Query) QueryString() string { + + if q.sql == "" { + + // if we have arguments override the selector + if q.sel == "" { + // Note q.table() etc perform quoting on field names + q.sel = fmt.Sprintf("SELECT %s.* FROM %s", q.table(), q.table()) + } + + q.sql = fmt.Sprintf("%s %s %s %s %s %s %s %s", q.sel, q.join, q.where, q.group, q.having, q.order, q.offset, q.limit) + q.sql = strings.TrimRight(q.sql, " ") + q.sql = strings.Replace(q.sql, " ", " ", -1) + q.sql = strings.Replace(q.sql, " ", " ", -1) + + // Replace ? with whatever placeholder db prefers + q.replaceArgPlaceholders() + + q.sql = q.sql + ";" + } + + return q.sql +} + +// CHAINABLE FINDERS + +// Apply the Func to this query, and return the modified Query +// This allows chainable finders from other packages +// e.g. q.Apply(status.Published) where status.Published is a Func +func (q *Query) Apply(f Func) *Query { + return f(q) +} + +// Conditions applies a series of query funcs to a query +func (q *Query) Conditions(funcs ...Func) *Query { + for _, f := range funcs { + q = f(q) + } + return q +} + +// SQL defines sql manually and overrides all other setters +// Completely replaces all stored sql +func (q *Query) SQL(sql string) *Query { + q.sql = sql + q.reset() + return q +} + +// Limit sets the sql LIMIT with an int +func (q *Query) Limit(limit int) *Query { + q.limit = fmt.Sprintf("LIMIT %d", limit) + q.reset() + return q +} + +// Offset sets the sql OFFSET with an int +func (q *Query) Offset(offset int) *Query { + q.offset = fmt.Sprintf("OFFSET %d", offset) + q.reset() + return q +} + +// Where defines a WHERE clause on SQL - Additional calls add WHERE () AND () clauses +func (q *Query) Where(sql string, args ...interface{}) *Query { + + if len(q.where) > 0 { + q.where = fmt.Sprintf("%s AND (%s)", q.where, sql) + } else { + q.where = fmt.Sprintf("WHERE (%s)", sql) + } + + // NB this assumes that args are only supplied for where clauses + // this may be an incorrect assumption! + if args != nil { + if q.args == nil { + q.args = args + } else { + q.args = append(q.args, args...) + } + } + + q.reset() + return q +} + +// OrWhere defines a where clause on SQL - Additional calls add WHERE () OR () clauses +func (q *Query) OrWhere(sql string, args ...interface{}) *Query { + + if len(q.where) > 0 { + q.where = fmt.Sprintf("%s OR (%s)", q.where, sql) + } else { + q.where = fmt.Sprintf("WHERE (%s)", sql) + } + + if args != nil { + if q.args == nil { + q.args = args + } else { + q.args = append(q.args, args...) + } + } + + q.reset() + return q +} + +// WhereIn adds a Where clause which selects records IN() the given array +// If IDs is an empty array, the query limit is set to 0 +func (q *Query) WhereIn(col string, IDs []int64) *Query { + // Return no results, so that when chaining callers + // don't have to check for empty arrays + if len(IDs) == 0 { + q.Limit(0) + q.reset() + return q + } + + in := "" + for _, ID := range IDs { + in = fmt.Sprintf("%s%d,", in, ID) + } + in = strings.TrimRight(in, ",") + sql := fmt.Sprintf("%s IN (%s)", col, in) + + if len(q.where) > 0 { + q.where = fmt.Sprintf("%s AND (%s)", q.where, sql) + } else { + q.where = fmt.Sprintf("WHERE (%s)", sql) + } + + q.reset() + return q +} + +// Define a join clause on SQL - we create an inner join like this: +// INNER JOIN extras_seasons ON extras.id = extra_id +// q.Select("SELECT units.* FROM units INNER JOIN sites ON units.site_id = sites.id") + +// rails join example +// INNER JOIN "posts_tags" ON "posts_tags"."tag_id" = "tags"."id" WHERE "posts_tags"."post_id" = 111 + +// Join adds an inner join to the query +func (q *Query) Join(otherModel string) *Query { + modelTable := q.tablename + + tables := []string{ + modelTable, + ToPlural(otherModel), + } + sort.Strings(tables) + joinTable := fmt.Sprintf("%s_%s", tables[0], tables[1]) + + sql := fmt.Sprintf("INNER JOIN %s ON %s.id = %s.%s_id", database.QuoteField(joinTable), database.QuoteField(modelTable), database.QuoteField(joinTable), ToSingular(modelTable)) + + if len(q.join) > 0 { + q.join = fmt.Sprintf("%s %s", q.join, sql) + } else { + q.join = fmt.Sprintf("%s", sql) + } + + q.reset() + return q +} + +// Order defines ORDER BY sql +func (q *Query) Order(sql string) *Query { + if sql == "" { + q.order = "" + } else { + q.order = fmt.Sprintf("ORDER BY %s", sql) + } + q.reset() + + return q +} + +// Group defines GROUP BY sql +func (q *Query) Group(sql string) *Query { + if sql == "" { + q.group = "" + } else { + q.group = fmt.Sprintf("GROUP BY %s", sql) + } + q.reset() + return q +} + +// Having defines HAVING sql +func (q *Query) Having(sql string) *Query { + if sql == "" { + q.having = "" + } else { + q.having = fmt.Sprintf("HAVING %s", sql) + } + q.reset() + return q +} + +// Select defines SELECT sql +func (q *Query) Select(sql string) *Query { + q.sel = sql + q.reset() + return q +} + +// DebugString returns a query representation string useful for debugging +func (q *Query) DebugString() string { + return fmt.Sprintf("--\nQuery-SQL:%s\nARGS:%s\n--", q.QueryString(), q.argString()) +} + +// Clear sql/query caches +func (q *Query) reset() { + // Perhaps later clear cached compiled representation of query too + + // clear stored sql + q.sql = "" +} + +// Return an arg string (for debugging) +func (q *Query) argString() string { + output := "-" + + for _, a := range q.args { + output = output + fmt.Sprintf("'%s',", q.argToString(a)) + } + output = strings.TrimRight(output, ",") + output = output + "" + + return output +} + +// Convert arguments to string - used only for debug argument strings +// Not to be exported or used to try to escape strings... +func (q *Query) argToString(arg interface{}) string { + switch arg.(type) { + case string: + return arg.(string) + case []byte: + return string(arg.([]byte)) + case int, int8, int16, int32, uint, uint8, uint16, uint32: + return fmt.Sprintf("%d", arg) + case int64, uint64: + return fmt.Sprintf("%d", arg) + case float32, float64: + return fmt.Sprintf("%f", arg) + case bool: + return fmt.Sprintf("%d", arg) + default: + return fmt.Sprintf("%v", arg) + + } + +} + +// Ask model for primary key name to use +func (q *Query) pk() string { + return database.QuoteField(q.primarykey) +} + +// Ask model for table name to use +func (q *Query) table() string { + return database.QuoteField(q.tablename) +} + +// Replace ? with whatever database prefers (psql uses numbered args) +func (q *Query) replaceArgPlaceholders() { + // Match ? and replace with argument placeholder from database + for i := range q.args { + q.sql = strings.Replace(q.sql, "?", database.Placeholder(i+1), 1) + } +} + +// Sorts the param names given - map iteration order is explicitly random in Go +// but we need params in a defined order to avoid unexpected results. +func sortedParamKeys(params map[string]string) []string { + sortedKeys := make([]string, len(params)) + i := 0 + for k := range params { + sortedKeys[i] = k + i++ + } + sort.Strings(sortedKeys) + + return sortedKeys +} + +// Generate a set of values for the params in order +func valuesFromParams(params map[string]string) []interface{} { + + // NB DO NOT DEPEND ON PARAMS ORDER - see note on SortedParamKeys + var values []interface{} + for _, key := range sortedParamKeys(params) { + values = append(values, params[key]) + } + return values +} + +// Used for update statements, turn params into sql i.e. "col"=? +func querySQL(params map[string]string) string { + var output []string + for _, k := range sortedParamKeys(params) { + output = append(output, fmt.Sprintf("%s=?", database.QuoteField(k))) + } + return strings.Join(output, ",") +} + +func scanRow(cols []string, rows *sql.Rows) (Result, error) { + + // We return a map[string]interface{} for each row scanned + result := Result{} + + values := make([]interface{}, len(cols)) + for i := 0; i < len(cols); i++ { + var col interface{} + values[i] = &col + } + + // Scan results into these interfaces + err := rows.Scan(values...) + if err != nil { + return nil, fmt.Errorf("Error scanning row: %s", err) + } + + // Make a string => interface map and hand off to caller + // We fix up a few types which the pq driver returns as less handy equivalents + // We enforce usage of int64 at all times as all our records use int64 + for i := 0; i < len(cols); i++ { + v := *values[i].(*interface{}) + if values[i] != nil { + switch v.(type) { + default: + result[cols[i]] = v + case bool: + result[cols[i]] = v.(bool) + case int: + result[cols[i]] = int64(v.(int)) + case []byte: // text cols are given as bytes + result[cols[i]] = string(v.([]byte)) + case int64: + result[cols[i]] = v.(int64) + } + } + + } + + return result, nil +} diff --git a/vendor/github.com/fragmenta/query/textual.go b/vendor/github.com/fragmenta/query/textual.go new file mode 100644 index 0000000..16c7d8b --- /dev/null +++ b/vendor/github.com/fragmenta/query/textual.go @@ -0,0 +1,134 @@ +package query + +import ( + "bytes" + "strings" +) + +// Truncate the given string to length using … as ellipsis. +func Truncate(s string, length int) string { + return TruncateWithEllipsis(s, length, "…") +} + +// TruncateWithEllipsis truncates the given string to length using provided ellipsis. +func TruncateWithEllipsis(s string, length int, ellipsis string) string { + + l := len(s) + el := len(ellipsis) + if l+el > length { + s = string(s[0:length-el]) + ellipsis + } + return s +} + +// ToPlural returns the plural version of an English word +// using some simple rules and a table of exceptions. +func ToPlural(text string) (plural string) { + + // We only deal with lowercase + word := strings.ToLower(text) + + // Check translations first, and return a direct translation if there is one + if translations[word] != "" { + return translations[word] + } + + // If we have no translation, just follow some basic rules - avoid new rules if possible + if strings.HasSuffix(word, "s") || strings.HasSuffix(word, "z") || strings.HasSuffix(word, "h") { + plural = word + "es" + } else if strings.HasSuffix(word, "y") { + plural = strings.TrimRight(word, "y") + "ies" + } else if strings.HasSuffix(word, "um") { + plural = strings.TrimRight(word, "um") + "a" + } else { + plural = word + "s" + } + + return plural +} + +// common transformations from singular to plural +// Which irregulars are important or correct depends on your usage of English +// Some of those below are now considered old-fashioned and many more could be added +// As this is used for database models, it only needs a limited subset of all irregulars +// NB you should not attempt to reverse and singularize, but just use the singular provided +var translations = map[string]string{ + "hero": "heroes", + "supernova": "supernovae", + "day": "days", + "monkey": "monkeys", + "money": "monies", + "chassis": "chassis", + "sheep": "sheep", + "aircraft": "aircraft", + "fish": "fish", + "nucleus": "nuclei", + "mouse": "mice", + "buffalo": "buffalo", + "species": "species", + "information": "information", + "wife": "wives", + "shelf": "shelves", + "index": "indices", + "matrix": "matrices", + "formula": "formulae", + "millennium": "millennia", + "ganglion": "ganglia", + "octopus": "octopodes", + "man": "men", + "woman": "women", + "person": "people", + "axis": "axes", + "die": "dice", + // ..etc +} + +// ToSingular converts a word to singular. +// NB reversal from plurals may fail +func ToSingular(word string) (singular string) { + + if strings.HasSuffix(word, "ses") || strings.HasSuffix(word, "zes") || strings.HasSuffix(word, "hes") { + singular = strings.TrimRight(word, "es") + } else if strings.HasSuffix(word, "ies") { + singular = strings.TrimRight(word, "ies") + "y" + } else if strings.HasSuffix(word, "a") { + singular = strings.TrimRight(word, "a") + "um" + } else { + singular = strings.TrimRight(word, "s") + } + + return singular +} + +// ToSnake converts a string from struct field names to corresponding database column names (e.g. FieldName to field_name). +func ToSnake(text string) string { + b := bytes.NewBufferString("") + for i, c := range text { + if i > 0 && c >= 'A' && c <= 'Z' { + b.WriteRune('_') + } + b.WriteRune(c) + } + return strings.ToLower(b.String()) +} + +// ToCamel converts a string from database column names to corresponding struct field names (e.g. field_name to FieldName). +func ToCamel(text string, private ...bool) string { + lowerCamel := false + if private != nil { + lowerCamel = private[0] + } + b := bytes.NewBufferString("") + s := strings.Split(text, "_") + for i, v := range s { + if len(v) > 0 { + s := v[:1] + if i > 0 || lowerCamel == false { + s = strings.ToUpper(s) + } + b.WriteString(s) + b.WriteString(v[1:]) + } + } + return b.String() +} diff --git a/vendor/github.com/fragmenta/server/LICENSE b/vendor/github.com/fragmenta/server/LICENSE new file mode 100644 index 0000000..c6efb8b --- /dev/null +++ b/vendor/github.com/fragmenta/server/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Mechanism Design Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/fragmenta/server/README.md b/vendor/github.com/fragmenta/server/README.md new file mode 100644 index 0000000..7275400 --- /dev/null +++ b/vendor/github.com/fragmenta/server/README.md @@ -0,0 +1,84 @@ +# server +A wrapper for the net/http server offering a few other features: + +* Config loading from a json config file (for use in setup/handlers) +* Levelled, Structured logging using a flexible set of loggers - easy to add your own custom loggers +* Optional logging middleware for requests +* Scheduling of tasks at specific times of day and intervals +* Uses the new request context in the Go stdlib +* Add tracing to requests so that you can follow a request id through handlers +* Requires Go 1.8 + +Example usage: + +```go + + // Redirect all :80 traffic to our canonical url on :443 + server.StartRedirectAll(80, config.Get("root_url")) + + // If in production, serve over tls with autocerts from let's encrypt + err = server.StartTLSAutocert(config.Get("autocert_email"), config.Get("autocert_domains")) + if err != nil { + server.Fatalf("Error starting server %s", err) + } + +``` + +## Config + +The config package offers access to json config files containing dev/production/test configs. + +Example usage: + +```go + + // Load config + config.Load(path) + + // Get a key from our current config (dev/prod/test) + config.Get("mykey") + +``` + +## Logging + +The logging package offers structured, levelled logging which can be configured to send to a file, stdout, and/or other services like an influxdb server with additional plugin loggers. You can add as many loggers which log events as you want, and because logging is structured, each logger can decide which information to act on. Example log output to sdtout is below (real colouring is nicer): + +Example usage: + +```go + + // Set up a stderr logger with time prefix + logger, err := log.NewStdErr(log.PrefixDateTime) + if err != nil { + return err + } + + // Add to the list of loggers receiving events + log.Add(logger) + +``` + +```bash +2017-01-16:00:37:05 Starting server port:3000 #info +2017-01-16:00:37:05 Finished loading assets in 109.483µs #info +2017-01-16:00:37:05 Finished loading templates in 3.184977ms #info +2017-01-16:00:37:05 Finished opening database in 6.387409ms db:mydb user:myuser #info +2017-01-16:00:37:05 Finished loading server in 9.99619ms #info +2017-01-16:00:37:06 <- Request ip:[::1]:64913 len:0 method:GET trace:07466847-28899DB4 url:/ #info +2017-01-16:00:37:06 in handler using request context trace:07466847-28899DB4 #info +2017-01-16:00:37:06 -> Response in 3.005292ms trace:07466847-28899DB4 url:/ #info +2017-01-16:00:37:07 <- Request ip:[::1]:64913 len:0 method:GET trace:A0E55A1B-012DA648 url:/ #info +2017-01-16:00:37:07 in handler using request context trace:A0E55A1B-012DA648 #info +2017-01-16:00:37:07 -> Response in 3.32221ms trace:A0E55A1B-012DA648 url:/ #info +``` + +## Scheduling + +A scheduling facility so that you can schedule actions (like sending a tweet) on app startup. + +```go + + schedule.At(func(){}, context, time, repeatDuration) + +``` diff --git a/vendor/github.com/fragmenta/server/config/config.go b/vendor/github.com/fragmenta/server/config/config.go new file mode 100644 index 0000000..1707a13 --- /dev/null +++ b/vendor/github.com/fragmenta/server/config/config.go @@ -0,0 +1,140 @@ +// Package config offers utilities for parsing a json config file. +// Values are read as strings, and can be fetched with Get, GetInt or GetBool. +// The caller is expected to parse them for more complex types. +package config + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "strconv" +) + +const ( + // DefaultPath is where our config is normally found for fragmenta apps. + DefaultPath = "secrets/fragmenta.json" +) + +// Config modes are set when creating a new config +const ( + ModeDevelopment = iota + ModeProduction + ModeTest +) + +// Current is the current configuration object for +var Current *Config + +// Config represents a set of key/value pairs for each mode of the app, +// production, development and test. Which set of values is used +// is set by Mode. +type Config struct { + Mode int + configs []map[string]string +} + +// New returns a new config, which defaults to development +func New() *Config { + return &Config{ + Mode: ModeDevelopment, + configs: make([]map[string]string, 3), + } +} + +// Load our json config file from the path +func (c *Config) Load(path string) error { + + // Read the config json file + file, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("error opening config %s %v", path, err) + } + + var data map[string]map[string]string + err = json.Unmarshal(file, &data) + if err != nil { + return fmt.Errorf("error reading config %s %v", path, err) + } + + if len(data) < 3 { + return fmt.Errorf("error reading config - not enough configs, got :%d expected 3", len(data)) + } + + c.configs[ModeDevelopment] = data["development"] + c.configs[ModeProduction] = data["production"] + c.configs[ModeTest] = data["test"] + + return nil +} + +// Production returns true if current config is production. +func (c *Config) Production() bool { + return c.Mode == ModeProduction +} + +// Configuration returns all the configuration key/values for a given mode. +func (c *Config) Configuration(m int) map[string]string { + return c.configs[c.Mode] +} + +// Get returns a specific value or "" if no value +func (c *Config) Get(key string) string { + if c == nil { + return "" + } + return c.configs[c.Mode][key] +} + +// GetInt returns the current configuration value as int64, or 0 if no value +func (c *Config) GetInt(key string) int64 { + v := c.Get(key) + if v != "" { + i, err := strconv.ParseInt(v, 10, 64) + if err == nil { + return i + } + } + return 0 +} + +// GetBool returns the current configuration value as bool +// (yes=true, no=false), or false if no value +func (c *Config) GetBool(key string) bool { + v := c.Get(key) + return (v == "yes") +} + +// Config (Get) returns a specific value or "" if no value +// For compatability with older server config, we wrap this function +// Deprecated +func (c *Config) Config(key string) string { + return c.Get(key) +} + +// These convenience functions wrap the Current pkg global + +// Production returns true if current config is production. +func Production() bool { + return Current.Production() +} + +// Configuration returns all the configuration key/values for a given mode. +func Configuration(m int) map[string]string { + return Current.Configuration(m) +} + +// Get returns a specific value or "" if no value +func Get(key string) string { + return Current.Get(key) +} + +// GetInt returns the current configuration value as int64, or 0 if no value +func GetInt(key string) int64 { + return Current.GetInt(key) +} + +// GetBool returns the current configuration value as bool +// (yes=true, no=false), or false if no value +func GetBool(key string) bool { + return Current.GetBool(key) +} diff --git a/vendor/github.com/fragmenta/server/deprecated.go b/vendor/github.com/fragmenta/server/deprecated.go new file mode 100644 index 0000000..c34b24b --- /dev/null +++ b/vendor/github.com/fragmenta/server/deprecated.go @@ -0,0 +1,162 @@ +package server + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "strconv" + "time" +) + +// Deprecated - use server/log pkg instead to log + +// Logger interface for a logger - deprecated for 2.0 +type Logger interface { + Printf(format string, args ...interface{}) +} + +// Logf logs the message with the given arguments to our internal logger +func (s *Server) Logf(format string, v ...interface{}) { + s.Logger.Printf(format, v...) +} + +// Log logs the message to our internal logger +func (s *Server) Log(message string) { + s.Logf(message) +} + +// Fatalf the message with the given arguments to our internal logger, and then exits with status 1 +func (s *Server) Fatalf(format string, v ...interface{}) { + s.Logger.Printf(format, v...) + + // Now exit + os.Exit(1) +} + +// Fatal logs the message, and then exits with status 1 +func (s *Server) Fatal(format string) { + s.Fatalf(format) +} + +// Timef logs a time since starting, when used with defer at the start of a function to time +// Usage: defer s.Timef("Completed %s in %s",time.Now(),args...) +func (s *Server) Timef(format string, start time.Time, v ...interface{}) { + end := time.Since(start).String() + var args []interface{} + args = append(args, end) + args = append(args, v...) + s.Logf(format, args...) +} + +// Deprecated - this config parsing (mostly internal to the server anyway) +// has been moved to a new file and will be removed in 2.0 +// Instead of passing config to setup and thence to handlers via router context, +// apps/handlers should use server/config to access it if required. + +// Mode returns the mode (production or development) +func (s *Server) Mode() string { + if s.production { + return "Production" + } + return "Development" +} + +// SetProduction sets the mode manually to SetProduction +func (s *Server) SetProduction(value bool) { + s.production = value +} + +// Production tells the caller if this server is in production mode or not? +func (s *Server) Production() bool { + return s.production +} + +// Configuration returns the map of configuration keys to values +func (s *Server) Configuration() map[string]string { + if s.production { + return s.configProduction + } + return s.configDevelopment + +} + +// Config returns a specific configuration value or "" if no value +func (s *Server) Config(key string) string { + return s.Configuration()[key] +} + +// ConfigInt returns the current configuration value as int64, or 0 if no value +func (s *Server) ConfigInt(key string) int64 { + v := s.Config(key) + if v != "" { + i, err := strconv.ParseInt(v, 10, 64) + if err == nil { + return i + } + } + return 0 +} + +// ConfigBool returns the current configuration value as bool (yes=true, no=false), or false if no value +func (s *Server) ConfigBool(key string) bool { + v := s.Config(key) + return (v == "yes") +} + +// configPath returns our expected config file path +func (s *Server) configPath() string { + return "secrets/fragmenta.json" +} + +// Read our config file and set up the server accordingly +func (s *Server) readConfig() error { + + path := s.configPath() + + // Read the config json file + file, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("Error opening config %s %v", path, err) + } + + var data map[string]map[string]string + err = json.Unmarshal(file, &data) + if err != nil { + return fmt.Errorf("Error reading config %s %v", path, err) + } + + s.configDevelopment = data["development"] + s.configProduction = data["production"] + s.configTest = data["test"] + + // Update our port from the config port if we have it + portString := s.Config("port") + if portString != "" { + s.port, err = strconv.Atoi(portString) + if err != nil { + return fmt.Errorf("Error reading port %s", err) + } + } + + return nil +} + +// Deprecated - the server relies on config for lots of settings +// the port can be changed in config instead for development easily. +// This flag will be removed in 2.0 + +// readArguments reads command line arguments +func (s *Server) readArguments() error { + + var p int + flag.IntVar(&p, "p", p, "Port") + flag.Parse() + + if p > 0 { + s.port = p + } + + return nil +} diff --git a/vendor/github.com/fragmenta/server/errors.go b/vendor/github.com/fragmenta/server/errors.go new file mode 100644 index 0000000..aae052d --- /dev/null +++ b/vendor/github.com/fragmenta/server/errors.go @@ -0,0 +1,97 @@ +package server + +import ( + "fmt" + "net/http" + "runtime" + "strings" +) + +// StatusError wraps a std error and stores more information (status code, display title/msg and caller info) +type StatusError struct { + Err error + Status int + Title string + Message string + File string + Line int +} + +// Error returns the underling error string - it should not be shown in production +func (e *StatusError) Error() string { + return fmt.Sprintf("Status %d at %s : %s", e.Status, e.FileLine(), e.Err) +} + +// String returns a string represenation of this error, useful for debugging +func (e *StatusError) String() string { + return fmt.Sprintf("Status %d at %s : %s %s %s", e.Status, e.FileLine(), e.Title, e.Message, e.Err) +} + +// FileLine returns file name and line of error +func (e *StatusError) FileLine() string { + parts := strings.Split(e.File, "/") + f := strings.Join(parts[len(parts)-4:len(parts)], "/") + return fmt.Sprintf("%s:%d", f, e.Line) +} + +func (e *StatusError) setupFromArgs(args ...string) *StatusError { + if e.Err == nil { + e.Err = fmt.Errorf("Error:%d", e.Status) + } + if len(args) > 0 { + e.Title = args[0] + } + if len(args) > 1 { + e.Message = args[1] + } + return e +} + +// NotFoundError returns a new StatusError with Status StatusNotFound and optional Title and Message +// Usage return router.NotFoundError(err,"Optional Title", "Optional user-friendly Message") +func NotFoundError(e error, args ...string) *StatusError { + err := Error(e, http.StatusNotFound, "Not Found", "Sorry, the page you're looking for couldn't be found.") + return err.setupFromArgs(args...) +} + +// InternalError returns a new StatusError with Status StatusInternalServerError and optional Title and Message +// Usage: return router.InternalError(err) +func InternalError(e error, args ...string) *StatusError { + err := Error(e, http.StatusInternalServerError, "Server Error", "Sorry, something went wrong, please let us know.") + return err.setupFromArgs(args...) +} + +// NotAuthorizedError returns a new StatusError with Status StatusUnauthorized and optional Title and Message +func NotAuthorizedError(e error, args ...string) *StatusError { + err := Error(e, http.StatusUnauthorized, "Not Allowed", "Sorry, I can't let you do that.") + return err.setupFromArgs(args...) +} + +// BadRequestError returns a new StatusError with Status StatusBadRequest and optional Title and Message +func BadRequestError(e error, args ...string) *StatusError { + err := Error(e, http.StatusBadRequest, "Bad Request", "Sorry, there was an error processing your request, please check your data.") + return err.setupFromArgs(args...) +} + +// Error returns a new StatusError with code StatusInternalServerError and a generic message +func Error(e error, s int, t string, m string) *StatusError { + // Get runtime info - use zero values if none available + _, f, l, _ := runtime.Caller(2) + err := &StatusError{ + Status: s, + Err: e, + Title: t, + Message: m, + File: f, + Line: l, + } + return err +} + +// ToStatusError returns a *StatusError or wraps a standard error in a 500 StatusError +func ToStatusError(e error) *StatusError { + if err, ok := e.(*StatusError); ok { + return err + } + return Error(e, http.StatusInternalServerError, "Error", "Sorry, an error occurred.") +} diff --git a/vendor/github.com/fragmenta/server/headers.go b/vendor/github.com/fragmenta/server/headers.go new file mode 100644 index 0000000..62bd022 --- /dev/null +++ b/vendor/github.com/fragmenta/server/headers.go @@ -0,0 +1,28 @@ +package server + +import ( + "fmt" + "net/http" + "time" +) + +// + +// daysToSeconds is a time constant for converting days to seconds +const daysToSeconds = 86400 + +// Date format is the preferred date format for the Expires header +const dateFormat = "Mon, 2 Jan 2006 15:04:05 MST" + +// AddCacheHeaders adds Cache-Control, Expires and Etag headers +// using the age in days and content hash provided +func AddCacheHeaders(w http.ResponseWriter, days int, hash string) { + // Cache for the given age in days + w.Header().Set("Cache-Control", fmt.Sprintf("max-age:%d", days*daysToSeconds)) + + // Set an expires header of form Mon Jan 2 15:04:05 -0700 MST 2006 + w.Header().Set("Expires", time.Now().AddDate(0, 0, days).UTC().Format(dateFormat)) + + // For etag send the hash given + w.Header().Set("ETag", fmt.Sprintf("\"%s\"", hash)) +} diff --git a/vendor/github.com/fragmenta/server/log/default.go b/vendor/github.com/fragmenta/server/log/default.go new file mode 100644 index 0000000..1fc06cd --- /dev/null +++ b/vendor/github.com/fragmenta/server/log/default.go @@ -0,0 +1,141 @@ +package log + +import ( + "fmt" + "io" + "os" + "sort" + "time" +) + +const ( + // Separator is used to separate key:value pairs + Separator = ":" + // PrefixDate constant for date prefixes + PrefixDate = "2006-01-02 " + // PrefixTime constants for time prefix + PrefixTime = "15:04:05 " + // PrefixDateTime constants for date + time prefix + PrefixDateTime = "2006-01-02:15:04:05 " +) + +// NewStdErr returns a new StructuredLogger of type Default which writes to stderr. By default +// prefix is empty, and level is LevelDebug (the lowest), +// so all output is captured. +func NewStdErr(prefix string) (*Default, error) { + d := &Default{ + Prefix: prefix, // Treated as a time format if set + Level: LevelInfo, + Writer: os.Stderr, + Color: true, + } + return d, nil +} + +// Default defines a default logger which simply logs to Writer, +// Writer is set to stderr, Level is LevelDebug and Prefix is empty by default. +type Default struct { + + // Prefix is used to prefix any log lines emitted. + Prefix string + + // Level is the level above which input is ignored. + Level int + + // Writer is the output of this logger. + Writer io.Writer + + // Color sets whether terminal colour instructions are emitted. + Color bool +} + +// Log logs the key:value pairs given to the writer. Keys are sorted before +// output in alphabetical order to ensure consistent results. +func (d *Default) Log(values V) { + l := d.LevelValue(values) + if l < d.Level { + return + } + + // Start by writing the prefix (treated as a time format string) + d.WriteString(time.Now().UTC().Format(d.Prefix)) + + // If keys contains message, extract that first + msg, ok := values[MessageKey].(string) + if ok { + d.WriteString(msg + " ") + } + // If keys contains duration, extract that next + duration, ok := values[DurationKey].(time.Duration) + if ok { + d.WriteString("in " + duration.String() + " ") + } + + // Now print other keys with colouring + var prefix, suffix string + keys := d.SortedKeys(values) + for _, k := range keys { + d.WriteString(k) + d.WriteString(Separator) + + switch k { + case IPKey: + fallthrough + case TraceKey: + d.WriteString(fmt.Sprintf("%s%v%s ", TraceColor, values[k], ClearColors)) + default: + d.WriteString(fmt.Sprintf("%v ", values[k])) + } + + } + + if d.Color { + prefix = d.LevelColor(l) + suffix = ClearColors + } + d.WriteString(fmt.Sprintf("%s#%v%s ", prefix, d.LevelName(l), suffix)) + + d.WriteString("\n") + +} + +// WriteString writes the string to the Writer. +func (d *Default) WriteString(s string) { + d.Writer.Write([]byte(s)) +} + +// LevelValue extracts the Level from values (if present) or returns 0 if not. +func (d *Default) LevelValue(values V) int { + l, ok := values[LevelKey].(int) + if ok { + return l + } + return 0 +} + +// LevelName returns the human-readable name for this level. +func (d *Default) LevelName(l int) string { + return LevelNames[l] +} + +// LevelColor returns the human-readable colour for this level. +func (d *Default) LevelColor(l int) string { + return LevelColors[l] +} + +// SortedKeys returns an array of keys for a map sorted in alpha order, +// this means we get a predictable order for the map entries when we print. +// The special keys level and message are ommitted. +func (d *Default) SortedKeys(values V) []string { + var keys []string + for k := range values { + // Ignore these special keys + if k == DurationKey || k == MessageKey || k == LevelKey { + continue + } + // Append the sorted key + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/fragmenta/server/log/deprecated.go b/vendor/github.com/fragmenta/server/log/deprecated.go new file mode 100644 index 0000000..4ae685e --- /dev/null +++ b/vendor/github.com/fragmenta/server/log/deprecated.go @@ -0,0 +1,85 @@ +package log + +// This file contains MultiWriter logging to a file and stdout, +// it has now been deprecated in favour of structured logging found in log.go +// It will be removed in Fragmenta 2.0 + +import ( + "io" + stdlog "log" + "os" + "strings" +) + +// We accept hashtags like #error in log messages, which can be used to filter messages +// These tags can also be used to indicate the level of the message +// NB if filter is set we only output messages containg filter string + +// Logger conforms with the server.Logger interface +type Logger struct { + log *stdlog.Logger + Filter string +} + +// New creates a new Logger which writes to a file and to stderr +func New(path string, production bool) *Logger { + var logWriter io.Writer + stdlog.SetFlags(stdlog.Llongfile) + // doubleWriter writes to stdErr and to a file + logFile, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) + if err != nil { + logWriter = io.MultiWriter(os.Stderr) + } else { + + // Do not write to Stderr in production + if production { + logWriter = io.MultiWriter(logFile) + } else { + logWriter = io.MultiWriter(os.Stderr, logFile) + } + + } + + // By default logger logs to console and a file + l := stdlog.New(logWriter, "", stdlog.Ldate|stdlog.Ltime) + if l == nil { + stdlog.Printf("Error setting up log at path %s", path) + } + + logger := &Logger{ + log: l, + Filter: "", + } + + logger.Printf("#info Opened log file at %s", path) + return logger +} + +// Printf logs events selectively given our filter +func (l *Logger) Printf(format string, args ...interface{}) { + + if l.Filter == "" { + // If we have no filter, print all + l.writeLog(format, args...) + } else if strings.Contains(format, l.Filter) { + // if we have a filter, print only those messages which match it (e.g. only match #error in production) + l.writeLog(format, args...) + } + +} + +// Log events to the server log file and other output +func (l *Logger) writeLog(format string, args ...interface{}) { + + if l.log != nil { + if strings.Contains(format, "%") { + l.log.Printf(format, args...) + } else { + l.log.Print(format) + } + } else { + // If we failed to create a log, just log something to stdout + stdlog.Printf(format, args...) + } + +} diff --git a/vendor/github.com/fragmenta/server/log/file.go b/vendor/github.com/fragmenta/server/log/file.go new file mode 100644 index 0000000..27f4b07 --- /dev/null +++ b/vendor/github.com/fragmenta/server/log/file.go @@ -0,0 +1,46 @@ +package log + +import ( + "errors" + "os" +) + +// File logs to a local file for all messages at or above Level. +type File struct { + Default // File embeds default + Path string +} + +const ( + // FileFlags serts the flags for OpenFile on the log file + FileFlags = os.O_WRONLY | os.O_APPEND | os.O_CREATE + + // FilePermissions serts the perms for OpenFile on the log file + FilePermissions = 0640 +) + +// NewFile creates a new file logger for the given path at Level Info. +func NewFile(path string) (*File, error) { + if path == "" { + return nil, errors.New("log: null file path for file log") + } + + f := &File{ + Default: Default{ + Prefix: PrefixDateTime, + Level: LevelInfo, + Writer: nil, + Color: false, + }, + } + + // Set the writer to the given file + logFile, err := os.OpenFile(path, FileFlags, FilePermissions) + if err != nil { + return nil, err + } + + f.Writer = logFile + + return f, nil +} diff --git a/vendor/github.com/fragmenta/server/log/log.go b/vendor/github.com/fragmenta/server/log/log.go new file mode 100644 index 0000000..bc6107d --- /dev/null +++ b/vendor/github.com/fragmenta/server/log/log.go @@ -0,0 +1,131 @@ +// Package log provides a structured, levelled logger interface +// for use in server handlers, which handles multiple output streams. +// A typical use might be to log everything to stderr, but to add another +// logger to send important data off to +// The Default logger simply logs to stderr, a local File logger is available, +// and data can be extracted and sent elsewhere by additional loggers +// (for example page hits to a stats service). +// +// Usage: +// logger,err := log.NewStdErr() +// log.Add(logger) +// log.Error(log.V{"key":value,"key":value}) +// +package log + +import ( + "os" + "time" +) + +const ( + // LevelKey is the key for setting level + LevelKey = "level" + // MessageKey is the key for a message + MessageKey = "msg" + // DurationKey is used by the Time function + DurationKey = "duration" + // ErrorKey is used for errors + ErrorKey = "error" + // IPKey is used for IP addresses (for colouring) + IPKey = "ip" + // URLKey is used for identifying URLs (for filtering) + URLKey = "url" + // TraceKey is used for trace ids emitted in middleware + TraceKey = "trace" +) + +// Debug sends the key/value map at level Debug to all registered (log)gers. +func Debug(values map[string]interface{}) { + values[LevelKey] = LevelDebug + Log(values) +} + +// Info sends the key/value map at level Info to all registered loggers. +func Info(values map[string]interface{}) { + values[LevelKey] = LevelInfo + Log(values) +} + +// Error sends the key/value map at level Error to all registered loggers. +func Error(values map[string]interface{}) { + values[LevelKey] = LevelError + Log(values) +} + +// Fatal sends the key/value map at level Fatal to all registered loggers, +// no other action is taken. +func Fatal(values map[string]interface{}) { + values[LevelKey] = LevelFatal + Log(values) +} + +// Time sends the key/value map to all registered loggers with an additional duration, start and end params set. +func Time(start time.Time, values map[string]interface{}) { + values[DurationKey] = time.Now().UTC().Sub(start) + Log(values) +} + +// Log sends the key/value map to all registered loggers. If level is not set, +// it defaults to LevelInfo. +func Log(values map[string]interface{}) { + _, ok := values[LevelKey] + if !ok { + values[LevelKey] = LevelInfo + } + + for _, l := range loggers { + l.Log(values) + } +} + +// Add adds the given logger to the list of outputs, +// it should not be called from other goroutines. +func Add(l StructuredLogger) { + loggers = append(loggers, l) +} + +// Valid levels for logging. +const ( + LevelNone = iota + LevelDebug + LevelInfo + LevelError + LevelFatal +) + +var ( + // LevelNames is a list of human-readable for levels. + LevelNames = []string{"none", "debug", "info", "error", "fatal"} + + // NoColor determines if a terminal is colourable or not + NoColor = os.Getenv("TERM") == "dumb" + + // LevelColors is a list of human-friendly terminal colors for levels. + LevelColors = []string{"\033[0m", "\033[34m", "\033[32m", "\033[33m", "\033[31m"} + + // TraceColor sets a for IP addresses or request id + TraceColor = "\033[33m" + + // ClearColors clears all formatting + ClearColors = "\033[0m" +) + +// This variable stores multiple loggers, which may decide whether +// to print or not depending on the level and/or message content. +// They may log to a file, stderr, or over the network, and different +// destinations may all log the same messages. +var loggers []StructuredLogger + +// StructuredLogger defines an interface for loggers +// which may be added with Add() to the list of outputs. +type StructuredLogger interface { + Log(V) +} + +// Values is a map of structured key value pairs +// usage: log.Warn(log.Values{"user":1,"foo":"bar"}) +type Values map[string]interface{} + +// V is a shorthand for values +type V map[string]interface{} diff --git a/vendor/github.com/fragmenta/server/log/middleware.go b/vendor/github.com/fragmenta/server/log/middleware.go new file mode 100644 index 0000000..b63e608 --- /dev/null +++ b/vendor/github.com/fragmenta/server/log/middleware.go @@ -0,0 +1,91 @@ +package log + +import ( + "context" + "crypto/rand" + "fmt" + "net/http" + "strings" + "time" +) + +// RequestID is but a simple token for tracing requests. +type RequestID struct { + id []byte +} + +// String returns a string formatting for the request id. +func (r *RequestID) String() string { + return fmt.Sprintf("%X-%X-%X-%X", r.id[0:2], r.id[2:4], r.id[4:6], r.id[6:8]) +} + +// NewRequestID returns a new random request id. +func newRequestID() *RequestID { + r := &RequestID{ + id: make([]byte, 8), + } + rand.Read(r.id) + return r +} + +type ctxKey struct{} + +// Trace retreives the request id from a request as a string. +func Trace(r *http.Request) string { + rid, ok := r.Context().Value(&ctxKey{}).(*RequestID) + if ok { + return rid.String() + } + return "" +} + +// GetRequestID retreives the request id from a request. +func GetRequestID(r *http.Request) *RequestID { + return r.Context().Value(&ctxKey{}).(*RequestID) +} + +// SetRequestID saves the request id in the request context. +func SetRequestID(r *http.Request, rid *RequestID) *http.Request { + ctx := r.Context() + ctx = context.WithValue(ctx, &ctxKey{}, rid) + return r.WithContext(ctx) +} + +// Middleware adds a logging wrapper and request tracing to requests. +func Middleware(h http.HandlerFunc) http.HandlerFunc { + + return func(w http.ResponseWriter, r *http.Request) { + requestID := newRequestID() + r = SetRequestID(r, requestID) // Sets on context for handlers + + level := LevelInfo + + // For assets etc, use level debug as they clutter up logs + if r.URL.Path == "/favicon.ico" || + strings.HasPrefix(r.URL.Path, "/assets") || + strings.HasPrefix(r.URL.Path, "/stats") { + level = LevelDebug + } + + Log(Values{ + MessageKey: "<- Request", + "method": r.Method, + URLKey: r.RequestURI, + "len": r.ContentLength, + IPKey: r.RemoteAddr, + TraceKey: requestID.String(), + LevelKey: level, + }) + + start := time.Now() + h(w, r) + + Time(start, Values{ + MessageKey: "-> Response", + URLKey: r.RequestURI, + TraceKey: requestID.String(), + LevelKey: level, + }) + } + +} diff --git a/vendor/github.com/fragmenta/server/redirects.go b/vendor/github.com/fragmenta/server/redirects.go new file mode 100644 index 0000000..fad3c88 --- /dev/null +++ b/vendor/github.com/fragmenta/server/redirects.go @@ -0,0 +1,40 @@ +package server + +import ( + "fmt" + "net/http" + "strings" +) + +// Redirect uses status 302 StatusFound by default - this is not a permanent redirect +// We don't accept external or relative paths for security reasons +func Redirect(w http.ResponseWriter, r *http.Request, path string) error { + // 301 - http.StatusMovedPermanently - permanent redirect + // 302 - http.StatusFound - tmp redirect + return RedirectStatus(w, r, path, http.StatusFound) +} + +// RedirectStatus redirects setting the status code (for example unauthorized) +// We don't accept external or relative paths for security reasons +func RedirectStatus(w http.ResponseWriter, r *http.Request, path string, status int) error { + + // We check this is an internal path - to redirect externally use http.Redirect directly + if strings.HasPrefix(path, "/") && !strings.Contains(path, ":") { + // Status may be any value, e.g. + // 301 - http.StatusMovedPermanently - permanent redirect + // 302 - http.StatusFound - tmp redirect + // 401 - Access denied + http.Redirect(w, r, path, status) + return nil + } + + return fmt.Errorf("server: ignoring insecure redirect to external path %s", path) +} + +// RedirectExternal redirects setting the status code +// (for example unauthorized), but does no checks on the path +// Use with caution and only on paths *fixed at compile time*. +func RedirectExternal(w http.ResponseWriter, r *http.Request, path string) error { + http.Redirect(w, r, path, http.StatusFound) + return nil +} diff --git a/vendor/github.com/fragmenta/server/server.go b/vendor/github.com/fragmenta/server/server.go new file mode 100644 index 0000000..ef3b8cd --- /dev/null +++ b/vendor/github.com/fragmenta/server/server.go @@ -0,0 +1,212 @@ +// Package server is a wrapper around the stdlib http server and x/autocert pkg. +package server + +import ( + "crypto/tls" + "fmt" + "log" + "net/http" + "os" + "strings" + "time" + + "golang.org/x/crypto/acme/autocert" +) + +// Server wraps the stdlib http server and x/autocert pkg with some setup. +type Server struct { + + // Which port to serve on - in 2.0 pass as argument for New() + port int + + // Which mode we're in, read from ENV variable + // Deprecated - due to be removed in 2.0 + production bool + + // Deprecated Logging - due to be removed in 2.0 + // Instead use the structured logging with server/log + Logger Logger + + // Deprecated configs will be removed from the server object in 2.0 + // Use server/config instead to read the config from app. + // Server configs - access with Config(string) + configProduction map[string]string + configDevelopment map[string]string + configTest map[string]string +} + +// New creates a new server instance +func New() (*Server, error) { + + // Check environment variable to see if we are in production mode + prod := false + if os.Getenv("FRAG_ENV") == "production" { + prod = true + } + + // Set up a new server + s := &Server{ + port: 3000, + production: prod, + configProduction: make(map[string]string), + configDevelopment: make(map[string]string), + configTest: make(map[string]string), + Logger: log.New(os.Stderr, "fragmenta: ", log.LstdFlags), + } + + // Old style config read - this will be going away in Fragmenta 2.0 + // use server/config instead from the app + err := s.readConfig() + if err != nil { + return s, err + } + err = s.readArguments() + if err != nil { + return s, err + } + + return s, err +} + +// Port returns the port of the server +func (s *Server) Port() int { + return s.port +} + +// PortString returns a string port suitable for passing to http.Server +func (s *Server) PortString() string { + return fmt.Sprintf(":%d", s.port) +} + +// Start starts an http server on the given port +func (s *Server) Start() error { + server := &http.Server{ + // Set the port in the preferred string format + Addr: s.PortString(), + + // The default server from net/http has no timeouts - set some limits + ReadHeaderTimeout: 30 * time.Second, + ReadTimeout: 60 * time.Second, + WriteTimeout: 60 * time.Second, + IdleTimeout: 10 * time.Second, // IdleTimeout was introduced in Go 1.8 + + } + return server.ListenAndServe() +} + +// StartTLS starts an https server on the given port +// with tls cert/key from config keys. +// Settings based on an article by Filippo Valsorda. +// https://blog.cloudflare.com/exposing-go-on-the-internet/ +func (s *Server) StartTLS(cert, key string) error { + + // Set up a new http server + server := &http.Server{ + // Set the port in the preferred string format + Addr: s.PortString(), + + // The default server from net/http has no timeouts - set some limits + ReadHeaderTimeout: 30 * time.Second, + ReadTimeout: 60 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 10 * time.Second, // IdleTimeout was introduced in Go 1.8 + + // This TLS config follows recommendations in the above article + TLSConfig: &tls.Config{ + // VersionTLS11 or VersionTLS12 would exclude many browsers + // inc. Android 4.x, IE 10, Opera 12.17, Safari 6 + // So unfortunately not acceptable as a default yet + // Current default here for clarity + MinVersion: tls.VersionTLS10, + + // Causes servers to use Go's default ciphersuite preferences, + // which are tuned to avoid attacks. Does nothing on clients. + PreferServerCipherSuites: true, + // Only use curves which have assembly implementations + CurvePreferences: []tls.CurveID{ + tls.CurveP256, + tls.X25519, // Go 1.8 only + }, + }, + } + + return server.ListenAndServeTLS(cert, key) +} + +// StartTLSAutocert starts an https server on the given port +// by requesting certs from an ACME provider. +// The server must be on a public IP which matches the +// DNS for the domains. +func (s *Server) StartTLSAutocert(email string, domains string) error { + autocertDomains := strings.Split(domains, " ") + certManager := &autocert.Manager{ + Prompt: autocert.AcceptTOS, + Email: email, // Email for problems with certs + HostPolicy: autocert.HostWhitelist(autocertDomains...), // Domains to request certs for + Cache: autocert.DirCache("secrets"), // Cache certs in secrets folder + } + server := s.ConfiguredTLSServer(certManager) + return server.ListenAndServeTLS("", "") +} + +// ConfiguredTLSServer returns a TLS server instance with a secure config +// this server has read/write timeouts set to 20 seconds, +// prefers server cipher suites and only uses certain accelerated curves +// see - https://blog.gopheracademy.com/advent-2016/exposing-go-on-the-internet/ +func (s *Server) ConfiguredTLSServer(certManager *autocert.Manager) *http.Server { + + return &http.Server{ + // Set the port in the preferred string format + Addr: s.PortString(), + + // The default server from net/http has no timeouts - set some limits + ReadHeaderTimeout: 30 * time.Second, + ReadTimeout: 60 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 10 * time.Second, // IdleTimeout was introduced in Go 1.8 + + // This TLS config follows recommendations in the above article + TLSConfig: &tls.Config{ + // Pass in a cert manager if you want one set + // this will only be used if the server Certificates are empty + GetCertificate: certManager.GetCertificate, + + // VersionTLS11 or VersionTLS12 would exclude many browsers + // inc. Android 4.x, IE 10, Opera 12.17, Safari 6 + // So unfortunately not acceptable as a default yet + // Current default here for clarity + MinVersion: tls.VersionTLS10, + + // Causes servers to use Go's default ciphersuite preferences, + // which are tuned to avoid attacks. Does nothing on clients. + PreferServerCipherSuites: true, + // Only use curves which have assembly implementations + CurvePreferences: []tls.CurveID{ + tls.CurveP256, + tls.X25519, // Go 1.8 only + }, + }, + } + +} + +// StartRedirectAll starts redirecting all requests on the given port to the given host +// this should be called before StartTLS if redirecting http on port 80 to https +func (s *Server) StartRedirectAll(p int, host string) { + port := fmt.Sprintf(":%d", p) + // Listen and server on port p in a separate goroutine + go func() { + http.ListenAndServe(port, &redirectHandler{host: host}) + }() +} + +// redirectHandler is useful if serving tls direct (not behind a proxy) +// and a redirect from port 80 is required. +type redirectHandler struct { + host string +} + +// ServeHTTP on this handler simply redirects to the main site +func (m *redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, m.host+r.URL.String(), http.StatusFound) +} diff --git a/vendor/github.com/fragmenta/view/LICENSE b/vendor/github.com/fragmenta/view/LICENSE new file mode 100644 index 0000000..c6efb8b --- /dev/null +++ b/vendor/github.com/fragmenta/view/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Mechanism Design Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/fragmenta/view/README.md b/vendor/github.com/fragmenta/view/README.md new file mode 100644 index 0000000..971dbb8 --- /dev/null +++ b/vendor/github.com/fragmenta/view/README.md @@ -0,0 +1,31 @@ +# view +Package view provides template registration, rendering, and helper functions for golang views + +### Usage + +Load templates on app startup: + +```Go + err := view.LoadTemplates() + if err != nil { + server.Fatalf("Error reading templates %s", err) + } +``` + +Render a template + +```Go + // Set up the view + view := view.New(context) + // Add a key to the view + view.AddKey("page", page) + // Optionally set template, layout or other attributes + view.Template("src/pages/views/home.html.got") + // Render the view + return view.Render() +``` + + +Public subpackages: + +* helpers - utilities for handling files diff --git a/vendor/github.com/fragmenta/view/deprecated.go b/vendor/github.com/fragmenta/view/deprecated.go new file mode 100644 index 0000000..f903d43 --- /dev/null +++ b/vendor/github.com/fragmenta/view/deprecated.go @@ -0,0 +1,49 @@ +package view + +import ( + "net/http" +) + +// RenderContext is the type passed in to New, which helps construct the rendering view +// Alternatively, you can use NewWithPath, which doesn't require a RenderContext +type RenderContext interface { + Path() string + RenderContext() map[string]interface{} + Writer() http.ResponseWriter +} + +// New creates a new Renderer +func New(c RenderContext) *Renderer { + r := &Renderer{ + path: c.Path(), + layout: "app/views/layout.html.got", + template: "", + format: "text/html", + status: http.StatusOK, + context: c.RenderContext(), + writer: c.Writer(), + } + + // This sets layout and template based on the view.path + r.setDefaultTemplates() + + return r +} + +// NewWithPath creates a new Renderer with a path and an http.ResponseWriter +func NewWithPath(p string, w http.ResponseWriter) *Renderer { + r := &Renderer{ + path: p, + layout: "app/views/layout.html.got", + template: "", + format: "text/html", + status: http.StatusOK, + context: make(map[string]interface{}, 0), + writer: w, + } + + // This sets layout and template based on the view.path + r.setDefaultTemplates() + + return r +} diff --git a/vendor/github.com/fragmenta/view/helpers/forms.go b/vendor/github.com/fragmenta/view/helpers/forms.go new file mode 100644 index 0000000..030c9b8 --- /dev/null +++ b/vendor/github.com/fragmenta/view/helpers/forms.go @@ -0,0 +1,297 @@ +package helpers + +import ( + "fmt" + got "html/template" + "reflect" + "strconv" + "strings" + "time" +) + +// FORMS + +// These should probably use templates from or from lib, so that users can change what form fields get generated +// and use templ rather than fmt.Sprintf + +// We need to set this token in the session on the get request for the form + +// CSRF generates an input field tag containing a CSRF token +func CSRF() got.HTML { + token := "my_csrf_token" // instead of generating this here, should we instead get router or app to generate and put into the context? + output := fmt.Sprintf("", token) + return got.HTML(output) +} + +// Field accepts name string, value interface{}, fieldType string, args ...string +func Field(label string, name string, v interface{}, args ...string) got.HTML { + attributes := "" + if len(args) > 0 { + attributes = strings.Join(args, " ") + } + // If no type, add it to attributes + if !strings.Contains(attributes, "type=") { + attributes = attributes + " type=\"text\"" + } + + tmpl := + `
+ + +
` + + if label == "" { + tmpl = `%s` + } + + output := fmt.Sprintf(tmpl, Escape(label), Escape(name), Escape(fmt.Sprintf("%v", v)), attributes) + + return got.HTML(output) +} + +// DateField sets up a date field with a data-date attribute storing the real date +func DateField(label string, name string, t time.Time, args ...string) got.HTML { + + // NB we use text type for date fields because of inconsistent browser behaviour + // and to support our own date picker popups + tmpl := + `
+ + +
` + + attributes := "" + if len(args) > 0 { + attributes = strings.Join(args, " ") + } + output := fmt.Sprintf(tmpl, Escape(label), Escape(name), Escape(name), Date(t), Date(t, "2006-01-02"), attributes) + + return got.HTML(output) +} + +// TextArea returns a field div containing a textarea +func TextArea(label string, name string, v interface{}, args ...string) got.HTML { + attributes := "" + if len(args) > 0 { + attributes = strings.Join(args, " ") + } + + fieldTemplate := + `
+ + +
` + output := fmt.Sprintf(fieldTemplate, + Escape(label), + Escape(name), + attributes, // NB we do not escape attributes, which may contain HTML + v) // NB value may contain HTML + + return got.HTML(output) +} + +// TODO flip the select helpers to use Selectable all the time? +// Redefine concrete type Option as a Selectable and this should be doable? + +// Selectable provides an interface for options in a select +type Selectable interface { + SelectName() string + SelectValue() string +} + +// SelectableOption provides a concrete implementation of Selectable - this should be called string option or similar +type SelectableOption struct { + Name string + Value string +} + +// SelectName returns the public name for this select option +func (o SelectableOption) SelectName() string { + return o.Name +} + +// SelectValue returns the value for this select option +func (o SelectableOption) SelectValue() string { + return o.Value +} + +// StringOptions creates an array of selectables from strings +func StringOptions(args ...string) []SelectableOption { + var options []SelectableOption + // Construct a slice of options from these strings + + for _, s := range args { + options = append(options, SelectableOption{s, s}) + } + + return options +} + +// NumberOptions creates an array of selectables, with an optional min and max value supplied as arguments +func NumberOptions(args ...int64) []SelectableOption { + + min := int64(0) + max := int64(50) + + if len(args) > 0 { + min = args[0] + } + + if len(args) > 1 { + max = args[1] + } + + var options []SelectableOption + + for i := min; i <= max; i++ { + v := strconv.Itoa(int(i)) + n := v + + options = append(options, SelectableOption{n, v}) + } + + return options +} + +// Better to use an interface and not reflect here - Would rather avoid use of reflect... + +// OptionsForSelect creates a select field given an array of keys and values in order +func OptionsForSelect(value interface{}, options interface{}) got.HTML { + + stringValue := fmt.Sprintf("%v", value) + + output := "" + + switch reflect.TypeOf(options).Kind() { + case reflect.Slice: + s := reflect.ValueOf(options) + for i := 0; i < s.Len(); i++ { + o := s.Index(i).Interface().(Selectable) + sel := "" + if o.SelectValue() == stringValue { + sel = "selected" + } + + output += fmt.Sprintf(` +`, o.SelectValue(), sel, Escape(o.SelectName())) + + } + } + + return got.HTML(output) +} + +// SelectArray creates a select field given an array of keys and values in order +func SelectArray(label string, name string, value interface{}, options interface{}) got.HTML { + + stringValue := fmt.Sprintf("%v", value) + + tmpl := + `
+ + +
` + + if label == "" { + tmpl = `%s` + } + + opts := "" + + switch reflect.TypeOf(options).Kind() { + case reflect.Slice: + s := reflect.ValueOf(options) + for i := 0; i < s.Len(); i++ { + o := s.Index(i).Interface().(Selectable) + sel := "" + if o.SelectValue() == stringValue { + sel = "selected" + } + + opts += fmt.Sprintf(` +`, o.SelectValue(), sel, Escape(o.SelectName())) + + } + } + + output := fmt.Sprintf(tmpl, Escape(label), Escape(name), Escape(name), opts) + + return got.HTML(output) +} + +// FIXME - make Option conform to Selectable interface and use that instead of concrete type below + +// Option type contains number and string +type Option struct { + Id int64 // The value - FIXME migrate to ID and use as interface + Name string // The name +} + +// SelectName returns the public name for this select option +func (o Option) SelectName() string { + return o.Name +} + +// SelectValue returns the value for this select option +func (o Option) SelectValue() string { + return fmt.Sprintf("%d", o.Id) +} + +// SelectID returns the value for this select option as an int64 +func (o Option) SelectID() int64 { + return o.Id +} + +// ID returns the value for this select option as an int64 +// this is supplied in case of use in templates +func (o Option) ID() int64 { + return o.Id +} + +// SetID is deprecated +func (o Option) SetID(id int64) { + o.Id = id +} + +// NewOption returns a new option, given an id and name +func NewOption(id int64, name string) Option { + return Option{Id: id, Name: name} +} + +// Select creates a select field given an array of keys and values in order +func Select(label string, name string, value int64, options []Option) got.HTML { + + tmpl := + `
+ + +
` + + if label == "" { + tmpl = `%s` + } + + opts := "" + for _, o := range options { + + s := "" + if o.Id == value { + s = "selected" + } + + opts += fmt.Sprintf(` +`, o.Id, s, Escape(o.Name)) + } + + output := fmt.Sprintf(tmpl, Escape(label), Escape(name), opts) + + return got.HTML(output) +} diff --git a/vendor/github.com/fragmenta/view/helpers/helpers.go b/vendor/github.com/fragmenta/view/helpers/helpers.go new file mode 100644 index 0000000..2824f50 --- /dev/null +++ b/vendor/github.com/fragmenta/view/helpers/helpers.go @@ -0,0 +1,210 @@ +package helpers + +import ( + "fmt" + got "html/template" + "strings" + "time" +) + +// ARRAYS + +// Array takes a set of interface pointers as variadic args, and returns a single array +func Array(args ...interface{}) []interface{} { + return []interface{}{args} +} + +// CommaSeparatedArray returns the values as a comma separated string +func CommaSeparatedArray(args []string) string { + result := "" + for _, v := range args { + if len(result) > 0 { + result = fmt.Sprintf("%s,%s", result, v) + } else { + result = v + } + + } + return result +} + +// MAPS + +// Empty returns an empty map[string]interface{} for use as a context +func Empty() map[string]interface{} { + return map[string]interface{}{} +} + +// Map sets a map key and return the map +func Map(m map[string]interface{}, k string, v interface{}) map[string]interface{} { + m[k] = v + return m +} + +// Set a map key and return an empty string +func Set(m map[string]interface{}, k string, v interface{}) string { + m[k] = v + return "" // Render nothing, we want no side effects +} + +// SetIf sets a map key if the given condition is true +func SetIf(m map[string]interface{}, k string, v interface{}, t bool) string { + if t { + m[k] = v + } else { + m[k] = "" + } + return "" // Render nothing, we want no side effects +} + +// Append all args to an array, and return that array +func Append(m []interface{}, args ...interface{}) []interface{} { + for _, v := range args { + m = append(m, v) + } + return m +} + +// CreateMap - given a set of interface pointers as variadic args, generate and return a map to the values +// This is currently unused as we just use simpler Map add above to add to context +func CreateMap(args ...interface{}) map[string]interface{} { + m := make(map[string]interface{}, 0) + + key := "" + for _, v := range args { + if len(key) == 0 { + key = string(v.(string)) + } else { + m[key] = v + } + } + + return m +} + +// Contains returns true if this array of ints contains the given int +func Contains(list []int64, item int64) bool { + for _, b := range list { + if b == item { + return true + } + } + return false +} + +// Blank returns true if a string is empty +func Blank(s string) bool { + return len(s) == 0 +} + +// Exists returns true if this string has a length greater than 0 +func Exists(s string) bool { + return len(s) > 0 +} + +// Time returns a formatted time string given a time and optional format +func Time(time time.Time, formats ...string) got.HTML { + layout := "Jan 2, 2006 at 15:04" + if len(formats) > 0 { + layout = formats[0] + } + value := fmt.Sprintf(time.Format(layout)) + return got.HTML(Escape(value)) +} + +// Ago returns a time string reporting distance from the current date +// of form 5 hours ago +func Ago(t time.Time, formats ...string) string { + duration := time.Since(t) + absDuration := duration + if duration < 0 { + absDuration = -duration + } + hours := absDuration / time.Hour + + // Use ago only for past dates + ago := " ago" + if duration < 0 { + ago = "" + } + + switch { + case absDuration < time.Minute: + return fmt.Sprintf("%d seconds%s", duration/time.Second, ago) + case absDuration < time.Hour: + return fmt.Sprintf("%d minutes%s", duration/time.Minute, ago) + case absDuration < time.Hour*24: + unit := "hour" + if hours > 1 { + unit = "hours" + } + return fmt.Sprintf("%d %s%s", hours, unit, ago) + default: + unit := "day" + if hours > 48 { + unit = "days" + } + return fmt.Sprintf("%d %s%s", hours/24, unit, ago) + } +} + +// Date returns a formatted date string given a time and optional format +// Date format layouts are for the date 2006-01-02 +func Date(t time.Time, formats ...string) got.HTML { + + //layout := "2006-01-02" // Jan 2, 2006 + layout := "Jan 2, 2006" + if len(formats) > 0 { + layout = formats[0] + } + value := fmt.Sprintf(t.Format(layout)) + return got.HTML(Escape(value)) +} + +// UTCDate returns a formatted date string in 2006-01-02 +func UTCDate(t time.Time) got.HTML { + return Date(t.UTC(), "2006-01-02") +} + +// UTCTime returns a formatted date string in 2006-01-02 +func UTCTime(t time.Time) got.HTML { + return Time(t.UTC(), "2006-01-02T15:04:00:00.000Z") +} + +// JSONTime returns a formatted date string with format +// suitable for using in a json file +func JSONTime(t time.Time) got.HTML { + return Time(t.UTC(), "2006-01-02T15:04:05Z07:00") +} + +// UTCNow returns a formatted date string in 2006-01-02 +func UTCNow() got.HTML { + return Date(time.Now().UTC(), "2006-01-02") +} + +// YearNow returns a formatted date string for the current year +func YearNow() got.HTML { + return Date(time.Now().UTC(), "2006") +} + +// Truncate text to a given length +func Truncate(s string, l int64) string { + return s +} + +// CSV escape (replace , with ,,) +func CSV(s got.HTML) string { + return strings.Replace(string(s), ",", ",,", -1) +} + +// JSON escapes a string for use in a json template (html template) +func JSON(t string) got.HTML { + // Escape mandatory characters + t = strings.Replace(t, "\r", " ", -1) + t = strings.Replace(t, "\n", " ", -1) + t = strings.Replace(t, "\t", " ", -1) + t = strings.Replace(t, "\\", "\\\\", -1) + t = strings.Replace(t, "\"", "\\\"", -1) + // Because we use html/template escape as temlate.HTML + return got.HTML(t) +} diff --git a/vendor/github.com/fragmenta/view/helpers/html.go b/vendor/github.com/fragmenta/view/helpers/html.go new file mode 100644 index 0000000..e9ac8a2 --- /dev/null +++ b/vendor/github.com/fragmenta/view/helpers/html.go @@ -0,0 +1,77 @@ +package helpers + +import ( + "fmt" + "strings" + + got "html/template" + + "github.com/kennygrant/sanitize" +) + +// Style inserts a css tag +func Style(name string) got.HTML { + return got.HTML(fmt.Sprintf("", EscapeURL(name))) +} + +// Script inserts a script tag +func Script(name string) got.HTML { + return got.HTML(fmt.Sprintf("", EscapeURL(name))) +} + +// Escape escapes HTML using HTMLEscapeString +func Escape(s string) string { + return got.HTMLEscapeString(s) +} + +// EscapeURL escapes URLs using HTMLEscapeString +func EscapeURL(s string) string { + return got.URLQueryEscaper(s) +} + +// Link returns got.HTML with an anchor link given text and URL required +// Attributes (if supplied) should not contain user input +func Link(t string, u string, a ...string) got.HTML { + attributes := "" + if len(a) > 0 { + attributes = strings.Join(a, " ") + } + return got.HTML(fmt.Sprintf("%s", Escape(u), Escape(attributes), Escape(t))) +} + +// HTML returns a string (which must not contain user input) as go template HTML +func HTML(s string) got.HTML { + return got.HTML(s) +} + +// HTMLAttribute returns a string (which must not contain user input) as go template HTMLAttr +func HTMLAttribute(s string) got.HTMLAttr { + return got.HTMLAttr(s) +} + +// URL returns returns a string (which must not contain user input) as go template URL +func URL(s string) got.URL { + return got.URL(s) +} + +// Strip all html tags and returns as go template HTML +func Strip(s string) got.HTML { + return got.HTML(sanitize.HTML(s)) +} + +// Sanitize the html, leaving only tags we consider safe (see the sanitize package for details and tests) +func Sanitize(s string) got.HTML { + s, err := sanitize.HTMLAllowing(s) + if err != nil { + fmt.Printf("#error sanitizing html:%s", err) + return got.HTML("") + } + return got.HTML(s) +} + +// XMLPreamble returns an XML preamble as got.HTML, +// primarily to work around a bug in html/template which escapes ` +} diff --git a/vendor/github.com/fragmenta/view/helpers/maths.go b/vendor/github.com/fragmenta/view/helpers/maths.go new file mode 100644 index 0000000..4a5dc92 --- /dev/null +++ b/vendor/github.com/fragmenta/view/helpers/maths.go @@ -0,0 +1,142 @@ +package helpers + +import ( + "fmt" + "strconv" + "strings" +) + +// PRICES + +// FIXME - move to currency type with concrete implementations per currency, as it'd be neater than funcs with multiple options. currency.GBP.PriceToCents something like that? + +// PriceToCentsString returns a price in cents as a string for use in params +func PriceToCentsString(p string) string { + if p == "" { + return "0" // Return 0 for blank price + } + + return fmt.Sprintf("%d", PriceToCents(p)) +} + +// PriceToCents converts a price string in human friendly notation (£45 or £34.40) to a price in pence as an int64 +func PriceToCents(p string) int { + price := strings.Replace(p, "£", "", -1) + price = strings.Replace(price, ",", "", -1) // assumed to be in thousands + price = strings.Replace(price, " ", "", -1) + + var pennies int + var err error + if strings.Contains(price, ".") { + // Split the string on . and rejoin with padded pennies + parts := strings.Split(price, ".") + + if len(parts[1]) == 0 { + parts[1] = "00" + } else if len(parts[1]) == 1 { + parts[1] = parts[1] + "0" + } + + price = parts[0] + parts[1] + + pennies, err = strconv.Atoi(price) + } else { + pennies, err = strconv.Atoi(price) + pennies = pennies * 100 + } + if err != nil { + fmt.Printf("Error converting price %s", price) + pennies = 0 + } + + return pennies +} + +// CentsToPrice converts a price in pence to a human friendly price including currency unit +// At present it assumes the currency is pounds, it should instead take an optional param for currency +// or not include it at all +func CentsToPrice(p int64) string { + price := fmt.Sprintf("£%.2f", float64(p)/100.0) + return strings.TrimSuffix(price, ".00") // remove zero pence at end if we have it +} + +// CentsToPriceShort converts a price in pence to a human friendly price abreviated (no pence) +func CentsToPriceShort(p int64) string { + if p >= 100000000000 { // If greater than £1b use b suffix + return fmt.Sprintf("£%.2fb", float64(p)/100000000000.0) + } else if p >= 100000000 { // If greater than £1m use m suffix + return fmt.Sprintf("£%.2fm", float64(p)/100000000.0) + } else if p >= 100000 { // If greater than £1k use k suffix + return fmt.Sprintf("£%.1fk", float64(p)/100000.0) + } + return CentsToPrice(p) +} + +// NumberToHuman formats large numbers for human consumption +// some preceision is lost, e.g. 1.3m rather than 130000001 +func NumberToHuman(n int64) string { + if n >= 100000000000 { // If greater than 1 billion use b suffix + return fmt.Sprintf("%.2fb", float64(n)/100000000000.0) + } else if n >= 100000000 { // If greater than 1m use m suffix + return fmt.Sprintf("%.2fm", float64(n)/100000000.0) + } else if n >= 1000 { // If greater than 1000 use k suffix + return fmt.Sprintf("%.2fk", float64(n)/1000.0) + } + // Just format as a string + return fmt.Sprintf("%d", n) +} + +// NumberToCommas formats large numbers with commas +// the entire number is still represented +func NumberToCommas(n int64) string { + // Print the number + s := fmt.Sprintf("%d", n) + + if len(s) < 4 { + return s + } + + // Split the number with commas every 3 numerals + var formatted string + // Count backwards from the end in 3s + for i := len(s) - 1; i >= 0; i-- { + c := s[i] + if i < len(s)-1 && (len(s)-i-1)%3 == 0 { + formatted = string(c) + "," + formatted + } else { + formatted = string(c) + formatted + } + } + + return formatted +} + +// CentsToBase converts cents to the base currency unit, preserving cent display, with no currency +func CentsToBase(p int64) string { + return fmt.Sprintf("%.2f", float64(p)/100.0) +} + +// Mod returns a modulo b +func Mod(a int, b int) int { + return a % b +} + +// Add returns a + b +func Add(a int, b int) int { + return a + b +} + +// Subtract returns a - b +func Subtract(a int, b int) int { + return a - b +} + +// Odd returns true if a is odd +func Odd(a int) bool { + return a%2 == 0 +} + +// Int64 returns an int64 from an int +func Int64(i int) int64 { + return int64(i) +} diff --git a/vendor/github.com/fragmenta/view/parser/parser.go b/vendor/github.com/fragmenta/view/parser/parser.go new file mode 100644 index 0000000..3f56703 --- /dev/null +++ b/vendor/github.com/fragmenta/view/parser/parser.go @@ -0,0 +1,17 @@ +// Package parser defines an interface for parsers which the base template conforms to +package parser + +// FuncMap is a map of functions +type FuncMap map[string]interface{} + +// Parser loads template files, and returns a template suitable for rendering content +type Parser interface { + // Setup is called once on setup of a parser + Setup(helpers FuncMap) error + + // Can this parser handle this file? + CanParseFile(path string) bool + + // Parse the file given and return a compiled template + NewTemplate(fullpath, path string) (Template, error) +} diff --git a/vendor/github.com/fragmenta/view/parser/scanner.go b/vendor/github.com/fragmenta/view/parser/scanner.go new file mode 100644 index 0000000..ccdeeb4 --- /dev/null +++ b/vendor/github.com/fragmenta/view/parser/scanner.go @@ -0,0 +1,219 @@ +// Package parser defines an interface for parsers (creating templates) and templates (rendering content), and defines a base template type which conforms to both interfaces and can be included in any templates +package parser + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" +) + +// Scanner scans paths for templates and creates a representation of each using parsers +type Scanner struct { + // A map of all templates keyed by path name + Templates map[string]Template + + // A set of parsers (in order) with which to parse templates + Parsers []Parser + + // A set of paths (in order) from which to load templates + Paths []string + + // Helpers is a list of helper functions + Helpers FuncMap + + // rootPath is used to store the root path during scans + rootPath string +} + +// NewScanner creates a new template scanner +func NewScanner(paths []string, helpers FuncMap) (*Scanner, error) { + s := &Scanner{ + Helpers: helpers, + Paths: paths, + Templates: make(map[string]Template), + Parsers: []Parser{new(JSONTemplate), new(HTMLTemplate), new(TextTemplate)}, + } + + return s, nil +} + +// ScanPath scans a path for template files, including sub-paths +func (s *Scanner) ScanPath(root string) error { + + // Store the rootPath - used in walkFunc + s.rootPath = path.Clean(root) + + // Store current path, and change to root path + // so that template includes use relative paths from root + // this may not be necc. any more, test removing it + pwd, err := os.Getwd() + if err != nil { + return err + } + + // Change dir to the rootPath so that paths are relative + err = os.Chdir(s.rootPath) + if err != nil { + return err + } + + err = filepath.Walk(".", s.walkFunc) + if err != nil { + return err + } + + // Change back to original path + err = os.Chdir(pwd) + if err != nil { + return err + } + + return nil +} + +// walkFunc handles files from filepath.Walk in ScanPath +// It follows symlinks where encountered by recursing +func (s *Scanner) walkFunc(path string, info os.FileInfo, err error) error { + + // If an error occurred, report it + if err != nil { + return err + } + + // Check if this is a symlink, if so recurse + // This assumes that the structure at linkedPath exactly mirrors that at path + if isSymlink(info) { + + // Find the linked path + linkedPath, err := filepath.EvalSymlinks(path) + if err != nil { + return fmt.Errorf("error reading symbolic link: %s", err) + } + + // Calculate a new temp root path, based on linked path + // trimmed of the original path + // This assumes that the structure at linkedPath exactly mirrors that at path + newRoot := strings.TrimSuffix(linkedPath, path) + + // Store current dir + pwd, err := os.Getwd() + if err != nil { + return err + } + + // Change dir to the linked path container + err = os.Chdir(newRoot) + if err != nil { + return err + } + + // filepath.Walk at the location, based on the newRoot + err = filepath.Walk(path, s.walkFunc) + + // Change dir back to pwd + err = os.Chdir(pwd) + if err != nil { + return err + } + + return err + } + + // Deal with files, directories we return nil error to recurse on them + if !info.IsDir() { + // Ask parsers in turn to handle the file - first one to claim it wins + for _, p := range s.Parsers { + if p.CanParseFile(path) { + + fullpath := filepath.Join(s.rootPath, path) + t, err := p.NewTemplate(fullpath, path) + if err != nil { + return err + } + + s.Templates[path] = t + return nil + } + } + + } + + return nil +} + +// isSymlink returns true if this is a symlink +func isSymlink(info os.FileInfo) bool { + return info.Mode()&os.ModeSymlink != 0 +} + +// ScanPaths resets template list and rescans all template paths +func (s *Scanner) ScanPaths() error { + // Make sure templates is empty + s.Templates = make(map[string]Template) + + // Set up the parsers + for _, p := range s.Parsers { + err := p.Setup(s.Helpers) + if err != nil { + return err + } + } + + // Scan paths again + for _, p := range s.Paths { + err := s.ScanPath(p) + if err != nil { + return err + } + } + + // Now parse and finalize templates + for _, t := range s.Templates { + err := t.Parse() + if err != nil { + return err + } + } + + // Now finalize templates + for _, t := range s.Templates { + err := t.Finalize(s.Templates) + if err != nil { + return err + } + } + + return nil +} + +// PATH UTILITIES + +// dotFile returns true if the file path supplied a dot file? +func dotFile(p string) bool { + return strings.HasPrefix(path.Base(p), ".") +} + +// suffix returns true if the path have this suffix (ignoring dotfiles)? +func suffix(p string, suffix string) bool { + if dotFile(p) { + return false + } + return strings.HasSuffix(p, suffix) +} + +// suffixes returns true if the path has these suffixes (ignoring dotfiles)? +func suffixes(p string, suffixes []string) bool { + if dotFile(p) { + return false + } + + for _, s := range suffixes { + if strings.HasSuffix(p, s) { + return true + } + } + + return false +} diff --git a/vendor/github.com/fragmenta/view/parser/template.go b/vendor/github.com/fragmenta/view/parser/template.go new file mode 100644 index 0000000..d311913 --- /dev/null +++ b/vendor/github.com/fragmenta/view/parser/template.go @@ -0,0 +1,175 @@ +package parser + +import ( + "crypto/md5" + "fmt" + "io" + "io/ioutil" + "regexp" +) + +// Template renders its content given a ViewContext +type Template interface { + // Parse a template file + Parse() error + + // Called after parsing is finished + Finalize(templates map[string]Template) error + + // Render to this writer + Render(writer io.Writer, context map[string]interface{}) error + + // Return the original template content + Source() string + + // Return the template path + Path() string + + // Return the cache key + CacheKey() string + + // Return dependencies of this template (used for creating cache keys) + Dependencies() []Template +} + +var templateInclude = regexp.MustCompile(`{{\s*template\s*["]([\S]*)["].*}}`) + +// MaxCacheKeyLength determines the max key length for cache keys +var MaxCacheKeyLength = 250 + +// TODO - base template is a mixin, get rid of all methods which are going to be overridden like StartParse + +// BaseTemplate is a base template which conforms to Template and Parser interfaces. +// This is an abstract base type, we use html or text templates +type BaseTemplate struct { + fullpath string // the full true path from project root + path string // the relative template path from src - used for unique identifier + source string // at present we store in memory + key string // set at parse time + dependencies []Template // set at parse time + +} + +// PARSER + +// Setup sets up the template for parsing +func (t *BaseTemplate) Setup(viewsPath string, helpers FuncMap) error { + return nil +} + +// CanParseFile returns true if we can parse this file +func (t *BaseTemplate) CanParseFile(path string) bool { + if dotFile(path) { + return false + } + + return true +} + +// NewTemplate returns a newly created template for this path +func (t *BaseTemplate) NewTemplate(fullpath, path string) (Template, error) { + template := new(BaseTemplate) + template.fullpath = fullpath + template.path = path + return template, nil +} + +// TEMPLATE PARSING + +// Parse the template (BaseTemplate simply stores it) +func (t *BaseTemplate) Parse() error { + + // Read the file + s, err := t.readFile(t.fullpath) + if err == nil { + t.source = s + } + + return err +} + +// ParseString a string template +func (t *BaseTemplate) ParseString(s string) error { + t.path = t.generateHash(s) + t.source = s + return nil +} + +// Render the template ignoring context +func (t *BaseTemplate) Render(writer io.Writer, context map[string]interface{}) error { + writer.Write([]byte(t.Source())) + return nil +} + +// Finalize is called on each template after parsing is finished, supplying complete template set. +func (t *BaseTemplate) Finalize(templates map[string]Template) error { + + t.dependencies = []Template{} + + return nil +} + +// Source the parsed version of this template +func (t *BaseTemplate) Source() string { + return t.source +} + +// Path returns the path of this template +func (t *BaseTemplate) Path() string { + return t.path +} + +// CacheKey returns the cache key of this template - +// (this is generated from path + hash of contents + dependency hash keys). +// So it automatically changes when templates are changed +func (t *BaseTemplate) CacheKey() string { + // If we have a key, return it + // NB this relies on templates being reloaded on reload of app in production... + if t.key != "" { + return t.key + } + + //println("Making key for",t.Path()) + + // Otherwise generate the key + t.key = t.path + "/" + t.generateHash(t.Source()) + + for _, d := range t.dependencies { + t.key = t.key + "-" + d.CacheKey() + } + + // Finally, if the key is too long, set it to a hash of the key instead + // (Memcache for example has limits on key length) + // possibly we should deal with this at a higher level + // I'd suggest always md5 keys with /view/ prefix... + // put this into cache itself though... + if len(t.key) > MaxCacheKeyLength { + t.key = t.generateHash(t.key) + } + + return t.key +} + +// Dependencies returns which other templates this one depends on (for generating nested cache keys) +func (t *BaseTemplate) Dependencies() []Template { + return t.dependencies +} + +// Utility method to read files into a string +func (t *BaseTemplate) readFile(path string) (string, error) { + fileBytes, err := ioutil.ReadFile(path) + if err != nil { + println("Error reading template file at path ", path) + return "", err + } + return string(fileBytes), err +} + +// Utility method to generate a hash from string +func (t *BaseTemplate) generateHash(input string) string { + + // FIXME: use sha256, not md5 + h := md5.New() + io.WriteString(h, input) + return fmt.Sprintf("%x", h.Sum(nil)) +} diff --git a/vendor/github.com/fragmenta/view/parser/template.html.go b/vendor/github.com/fragmenta/view/parser/template.html.go new file mode 100644 index 0000000..f8fc09b --- /dev/null +++ b/vendor/github.com/fragmenta/view/parser/template.html.go @@ -0,0 +1,108 @@ +package parser + +import ( + "fmt" + got "html/template" + "io" + "sync" +) + +var ( + mu sync.RWMutex // Shared mutex to go with shared template set, because of dev reloads + htmlTemplateSet *got.Template // This is a shared template set for HTML templates +) + +// HTMLTemplate represents an HTML template using go HTML/template +type HTMLTemplate struct { + BaseTemplate +} + +// Setup performs setup before parsing templates +func (t *HTMLTemplate) Setup(helpers FuncMap) error { + mu.Lock() + defer mu.Unlock() + htmlTemplateSet = got.New("").Funcs(got.FuncMap(helpers)) + return nil +} + +// CanParseFile returns true if this parser handles this path +func (t *HTMLTemplate) CanParseFile(path string) bool { + allowed := []string{".html.got", ".xml.got"} + return suffixes(path, allowed) +} + +// NewTemplate returns a new template for this type +func (t *HTMLTemplate) NewTemplate(fullpath, path string) (Template, error) { + template := new(HTMLTemplate) + template.fullpath = fullpath + template.path = path + return template, nil +} + +// Parse the template at path +func (t *HTMLTemplate) Parse() error { + mu.Lock() + defer mu.Unlock() + err := t.BaseTemplate.Parse() + if err != nil { + return err + } + + // Add to our template set - NB duplicates not allowed by golang templates + if htmlTemplateSet.Lookup(t.Path()) == nil { + _, err = htmlTemplateSet.New(t.path).Parse(t.Source()) + } else { + err = fmt.Errorf("Duplicate template:%s %s", t.Path(), t.Source()) + } + + return err +} + +// ParseString parses a string template +func (t *HTMLTemplate) ParseString(s string) error { + mu.Lock() + defer mu.Unlock() + err := t.BaseTemplate.ParseString(s) + + // Add to our template set + if htmlTemplateSet.Lookup(t.Path()) == nil { + _, err = htmlTemplateSet.New(t.path).Parse(t.Source()) + } else { + err = fmt.Errorf("Duplicate template:%s %s", t.Path(), t.Source()) + } + + return err +} + +// Finalize the template set, called after parsing is complete +func (t *HTMLTemplate) Finalize(templates map[string]Template) error { + + // Go html/template records dependencies both ways (child <-> parent) + // tmpl.Templates() includes tmpl and children and parents + // we only want includes listed as dependencies + // so just do a simple search of unparsed source instead + + // Search source for {{\s template "|`xxx`|" x }} pattern + paths := templateInclude.FindAllStringSubmatch(t.Source(), -1) + + // For all includes found, add the template to our dependency list + for _, p := range paths { + d := templates[p[1]] + if d != nil { + t.dependencies = append(t.dependencies, d) + } + } + + return nil +} + +// Render the template to the given writer, returning an error +func (t *HTMLTemplate) Render(writer io.Writer, context map[string]interface{}) error { + mu.RLock() + defer mu.RUnlock() + tmpl := htmlTemplateSet.Lookup(t.Path()) + if tmpl == nil { + return fmt.Errorf("#error loading template for %s", t.Path()) + } + return tmpl.Execute(writer, context) +} diff --git a/vendor/github.com/fragmenta/view/parser/template.json.go b/vendor/github.com/fragmenta/view/parser/template.json.go new file mode 100644 index 0000000..ba3a244 --- /dev/null +++ b/vendor/github.com/fragmenta/view/parser/template.json.go @@ -0,0 +1,103 @@ +package parser + +import ( + "fmt" + got "html/template" + "io" + "sync" +) + +var ( + jsonMu sync.RWMutex // Shared mutex to go with shared template set, because of dev reloads + jsonTemplateSet *got.Template // This is a shared template set for json templates +) + +// JSONTemplate represents a template using go HTML/template +type JSONTemplate struct { + BaseTemplate +} + +// Setup performs one-time setup before parsing templates +func (t *JSONTemplate) Setup(helpers FuncMap) error { + mu.Lock() + defer mu.Unlock() + jsonTemplateSet = got.New("").Funcs(got.FuncMap(helpers)) + return nil +} + +// CanParseFile returns true if this template can parse this file +func (t *JSONTemplate) CanParseFile(path string) bool { + allowed := []string{".json.got"} + return suffixes(path, allowed) +} + +// NewTemplate returns a new JSONTemplate +func (t *JSONTemplate) NewTemplate(fullpath, path string) (Template, error) { + template := new(JSONTemplate) + template.fullpath = fullpath + template.path = path + return template, nil +} + +// Parse the template +func (t *JSONTemplate) Parse() error { + mu.Lock() + defer mu.Unlock() + err := t.BaseTemplate.Parse() + + // Add to our template set + if jsonTemplateSet.Lookup(t.Path()) == nil { + _, err = jsonTemplateSet.New(t.path).Parse(t.Source()) + } else { + err = fmt.Errorf("Duplicate template:%s %s", t.Path(), t.Source()) + } + + return err +} + +// ParseString parses a string template +func (t *JSONTemplate) ParseString(s string) error { + mu.Lock() + defer mu.Unlock() + + err := t.BaseTemplate.ParseString(s) + + // Add to our template set + if jsonTemplateSet.Lookup(t.Path()) == nil { + _, err = jsonTemplateSet.New(t.path).Parse(t.Source()) + } else { + err = fmt.Errorf("Duplicate template:%s %s", t.Path(), t.Source()) + } + + return err +} + +// Finalize the template set, called after parsing is complete +func (t *JSONTemplate) Finalize(templates map[string]Template) error { + + // Go html/template records dependencies both ways (child <-> parent) + // tmpl.Templates() includes tmpl and children and parents + // we only want includes listed as dependencies + // so just do a simple search of parsed source instead + + // Search source for {{\s template "|`xxx`|" x }} pattern + paths := templateInclude.FindAllStringSubmatch(t.Source(), -1) + + // For all includes found, add the template to our dependency list + for _, p := range paths { + d := templates[p[1]] + if d != nil { + t.dependencies = append(t.dependencies, d) + } + } + + return nil +} + +// Render the template +func (t *JSONTemplate) Render(writer io.Writer, context map[string]interface{}) error { + jsonMu.RLock() + defer jsonMu.RUnlock() + tmpl := jsonTemplateSet.Lookup(t.Path()) + return tmpl.Execute(writer, context) +} diff --git a/vendor/github.com/fragmenta/view/parser/template.text.go b/vendor/github.com/fragmenta/view/parser/template.text.go new file mode 100644 index 0000000..4256b49 --- /dev/null +++ b/vendor/github.com/fragmenta/view/parser/template.text.go @@ -0,0 +1,95 @@ +package parser + +import ( + "fmt" + "io" + got "text/template" +) + +var textTemplateSet *got.Template + +// TextTemplate using go text/template +type TextTemplate struct { + BaseTemplate +} + +// Setup runs before parsing templates +func (t *TextTemplate) Setup(helpers FuncMap) error { + textTemplateSet = got.New("").Funcs(got.FuncMap(helpers)) + return nil +} + +// CanParseFile returns true if this parser handles this file path? +func (t *TextTemplate) CanParseFile(path string) bool { + allowed := []string{".text.got", ".csv.got"} + return suffixes(path, allowed) +} + +// NewTemplate returns a new template of this type +func (t *TextTemplate) NewTemplate(fullpath, path string) (Template, error) { + template := new(TextTemplate) + template.fullpath = fullpath + template.path = path + return template, nil +} + +// Parse the template +func (t *TextTemplate) Parse() error { + err := t.BaseTemplate.Parse() + + // Add to our template set + if textTemplateSet.Lookup(t.path) == nil { + _, err = textTemplateSet.New(t.path).Parse(t.Source()) + } else { + err = fmt.Errorf("Duplicate template:%s %s", t.Path(), t.Source()) + } + + return err +} + +// ParseString a string template +func (t *TextTemplate) ParseString(s string) error { + err := t.BaseTemplate.ParseString(s) + + // Add to our template set + if textTemplateSet.Lookup(t.Path()) == nil { + _, err = textTemplateSet.New(t.path).Parse(t.Source()) + } else { + err = fmt.Errorf("Duplicate template:%s %s", t.Path(), t.Source()) + } + + return err +} + +// Finalize the template set, called after parsing is complete +// Record a list of dependent templates (for breaking caches automatically) +func (t *TextTemplate) Finalize(templates map[string]Template) error { + + // Search source for {{\s template "|`xxx`|" x }} pattern + paths := templateInclude.FindAllStringSubmatch(t.Source(), -1) + + // For all includes found, add the template to our dependency list + for _, p := range paths { + d := templates[p[1]] + if d != nil { + t.dependencies = append(t.dependencies, d) + } + } + + return nil +} + +// Render renders the template +func (t *TextTemplate) Render(writer io.Writer, context map[string]interface{}) error { + tmpl := t.goTemplate() + if tmpl == nil { + return fmt.Errorf("Error rendering template:%s %s", t.Path(), t.Source()) + } + + return tmpl.Execute(writer, context) +} + +// goTemplate returns teh underlying go template +func (t *TextTemplate) goTemplate() *got.Template { + return textTemplateSet.Lookup(t.Path()) +} diff --git a/vendor/github.com/fragmenta/view/render.go b/vendor/github.com/fragmenta/view/render.go new file mode 100644 index 0000000..2f7cb3e --- /dev/null +++ b/vendor/github.com/fragmenta/view/render.go @@ -0,0 +1,384 @@ +package view + +import ( + "bytes" + "fmt" + "html/template" + "io" + "net/http" + "os" + "path" + "regexp" + "strings" +) + +// Renderer is a view which is set up on each request and renders the response to its writer +type Renderer struct { + + // The view rendering context + context map[string]interface{} + + // The writer to write the context to + writer http.ResponseWriter + + // The layout template path + layout string + + // The template path + template string + + // The mime format to render with, defaults to text/html + format string + + // The http status code + status int + + // The request path + path string +} + +type ctxKey struct { + name string +} + +func (k *ctxKey) String() string { + return "view ctx: " + k.name +} + +// AuthenticityContext is used as a key to save request authenticity tokens +var AuthenticityContext = &ctxKey{authenticityKey} +var authenticityKey = "authenticity_token" + +// LanguageContext is used as a key to save request lang +var LanguageContext = &ctxKey{languageKey} +var languageKey = "lang" + +// NewRenderer returns a new renderer for this request. +func NewRenderer(w http.ResponseWriter, r *http.Request) *Renderer { + renderer := &Renderer{ + path: "/", + layout: "app/views/layout.html.got", + template: "", + format: "text/html", + status: http.StatusOK, + context: make(map[string]interface{}, 0), + writer: w, + } + + if r != nil { + // Read the path from request + renderer.path = canonicalPath(r) + + // Extract the authenticity token (if any) from context + token := r.Context().Value(AuthenticityContext) + if token != nil { + renderer.context[authenticityKey] = token.(string) + } + + // Extract the language (if any) from context + lang := r.Context().Value(LanguageContext) + if lang != nil { + renderer.context[languageKey] = lang.(string) + } + } + + // This sets layout and template based on the view.path + renderer.setDefaultTemplates() + + return renderer +} + +// Layout sets the layout used +func (r *Renderer) Layout(layout string) *Renderer { + r.layout = layout + return r +} + +// Template sets the template used +func (r *Renderer) Template(template string) *Renderer { + r.template = template + return r +} + +// Format sets the format used, e.g. text/html, +func (r *Renderer) Format(format string) *Renderer { + r.format = format + return r +} + +// Path sets the request path on the renderer (used for choosing a default template) +func (r *Renderer) Path(p string) *Renderer { + r.path = path.Clean(p) + return r +} + +// Status sets the Renderer status +func (r *Renderer) Status(status int) *Renderer { + r.status = status + return r +} + +// Header sets a header on the Renderer's Writer (if set) +func (r *Renderer) Header(k, v string) *Renderer { + if r.writer != nil { + r.writer.Header().Set(k, v) + } + return r +} + +// CacheKey sets the Cache-Control and Etag headers on the response +// using the CacheKey() from the Cacher passed in +func (r *Renderer) CacheKey(key string) { + r.writer.Header().Set("Cache-Control", "no-cache, public") + r.writer.Header().Set("Etag", key) +} + +// Text sets the view content as text +func (r *Renderer) Text(content string) *Renderer { + r.context["content"] = content + return r +} + +// HTML sets the view content as html (use with caution) +func (r *Renderer) HTML(content string) *Renderer { + r.context["content"] = template.HTML(content) + return r +} + +// AddKey adds a key/value pair to context +func (r *Renderer) AddKey(key string, value interface{}) *Renderer { + r.context[key] = value + return r +} + +// Context sets the entire context for rendering +func (r *Renderer) Context(c map[string]interface{}) *Renderer { + r.context = c + return r +} + +// RenderToString renders our template into layout using our context and return a string +func (r *Renderer) RenderToString() (string, error) { + + content := "" + + if len(r.template) > 0 { + mu.RLock() + t := scanner.Templates[r.template] + mu.RUnlock() + if t == nil { + return content, fmt.Errorf("No such template found %s", r.template) + } + + var rendered bytes.Buffer + err := t.Render(&rendered, r.context) + if err != nil { + return content, err + } + + content = rendered.String() + } + + return content, nil +} + +// FIXME - test for side-effects then replace RenderToString with the layout version as a bug fix + +// RenderToStringWithLayout renders our template into layout using our context and return a string +func (r *Renderer) RenderToStringWithLayout() (string, error) { + var rendered bytes.Buffer + + // We require a template + if len(r.template) > 0 { + mu.RLock() + t := scanner.Templates[r.template] + mu.RUnlock() + if t == nil { + return "", fmt.Errorf("No such template found %s", r.template) + } + + // Render the template to a buffer + err := t.Render(&rendered, r.context) + if err != nil { + return "", err + } + + // Render that buffer into the layout if we have one + if len(r.layout) > 0 { + r.context["content"] = template.HTML(rendered.String()) + + mu.RLock() + l := scanner.Templates[r.layout] + mu.RUnlock() + if l == nil { + return "", fmt.Errorf("No such layout found %s", r.layout) + } + + // Render the layout to the buffer + rendered.Reset() + err := l.Render(&rendered, r.context) + if err != nil { + return "", err + } + + } + + } + + return rendered.String(), nil +} + +// Response renders our template into layout using our context and write out to writer +// Response is an alternative name for Render so that we can +// call render.Response(), it may replace it eventually. +func (r *Renderer) Response() error { + return r.Render() +} + +// Render our template into layout using our context and write out to writer +func (r *Renderer) Render() error { + + // Reload if not in production + if !Production { + // fmt.Printf("#warn Reloading templates in development mode\n") + err := ReloadTemplates() + if err != nil { + return err + } + } + + // If we have a template, render it + // using r.Context unless overridden by content being set with .Text("My string") + if len(r.template) > 0 && r.context["content"] == nil { + mu.RLock() + t := scanner.Templates[r.template] + mu.RUnlock() + if t == nil { + return fmt.Errorf("#error No such template found %s", r.template) + } + + var rendered bytes.Buffer + err := t.Render(&rendered, r.context) + if err != nil { + return fmt.Errorf("#error Could not render template %s - %s", r.template, err) + } + + if r.layout != "" { + r.context["content"] = template.HTML(rendered.String()) + } else { + r.context["content"] = rendered.String() + } + } + + // Now render the content into the layout template + if r.layout != "" { + mu.RLock() + layout := scanner.Templates[r.layout] + mu.RUnlock() + if layout == nil { + return fmt.Errorf("#error Could not find layout %s", r.layout) + } + + err := layout.Render(r.writer, r.context) + if err != nil { + return fmt.Errorf("#error Could not render layout %s %s", r.layout, err) + } + + } else if r.context["content"] != nil { + // Deal with no layout by rendering content directly to writer + r.writer.Header().Set("Content-Type", r.format+"; charset=utf-8") + _, err := io.WriteString(r.writer, r.context["content"].(string)) + return err + } + + return nil +} + +// SendFile writes the file at the given path out to our writer +// it assumes the appropriate headers have been set first (Content-Type, Content-Disposition) e.g.: +// view.Header("Content-type", "application/pdf") +// view.Header("Content-Disposition", "attachment; filename='myfile.pdf'") +// view.SendFile(mypath) +func (r *Renderer) SendFile(p string) error { + f, err := os.Open(p) + if err != nil { + return err + } + _, err = io.Copy(r.writer, f) + if err != nil { + return err + } + return nil +} + +// Set sensible default layout/template paths after we know our path +// /pages => pages/views/index.html.got +// /pages/create => pages/views/create.html.got +// /pages/xxx => pages/views/show.html.got +// /pages/xxx/edit => pages/views/edit.html.got +func (r *Renderer) setDefaultTemplates() { + + // First deal with home (a special case) + if r.path == "/" { + r.template = "pages/views/home.html.got" + return + } + + // Now see if we can find a template based on our path + trimmed := strings.Trim(r.path, "/") + parts := strings.Split(trimmed, "/") + + pkg := "app" + action := "index" + + // TODO: add handling for theme templates + // we should attempt to match theme paths first, before default paths + // but need to know which theme is active for the domain for each request + + // Deal with default paths by matching the path within the folders + switch len(parts) { + default: + case 1: // /pages + pkg = parts[0] + case 2: // /pages/create or /pages/1 etc + pkg = parts[0] + action = parts[1] + // NB the +, we require 1 or more digits + numeric, _ := regexp.MatchString("^[0-9]+", parts[1]) + if numeric { + action = "show" + } + case 3: // /pages/xxx/edit + pkg = parts[0] + action = parts[2] + } + + // fmt.Printf("#templates setting default template:%s/views/%s.html.got", pkg, action) + + // Set a default template + mu.RLock() + path := fmt.Sprintf("%s/views/%s.html.got", pkg, action) + if scanner.Templates[path] != nil { + r.template = path + } + + // Set a default layout + path = fmt.Sprintf("%s/views/layout.html.got", pkg) + if scanner.Templates[path] != nil { + r.layout = path + } + mu.RUnlock() +} + +// canonicalPath extracts the request path, runs path.Clean +// and ensures it is prefixed with /. +func canonicalPath(r *http.Request) string { + // Clean the path + canonicalPath := path.Clean(r.URL.Path) + if len(canonicalPath) == 0 { + canonicalPath = "/" + } else if canonicalPath[0] != '/' { + canonicalPath = "/" + canonicalPath + } + return canonicalPath +} diff --git a/vendor/github.com/fragmenta/view/view.go b/vendor/github.com/fragmenta/view/view.go new file mode 100644 index 0000000..30ff0b5 --- /dev/null +++ b/vendor/github.com/fragmenta/view/view.go @@ -0,0 +1,151 @@ +// Package view provides methods for rendering templates, and helper functions for golang views +package view + +import ( + "fmt" + "sync" + + "github.com/fragmenta/view/helpers" + "github.com/fragmenta/view/parser" +) + +// Production is true if this server is running in production mode +var Production bool + +// The scanner is a private type used for scanning templates +var scanner *parser.Scanner + +// This mutex guards the pkg scanner variable during reload and access +// it is only neccessary because of hot reload during development +var mu sync.RWMutex + +// Helpers is a list of functions available in templates +var Helpers parser.FuncMap + +func init() { + Helpers = DefaultHelpers() +} + +// LoadTemplates loads our templates from ./src, and assigns them to the package variable Templates +// This function is deprecated and will be removed, use LoadTemplatesAtPaths instead +func LoadTemplates() error { + return LoadTemplatesAtPaths([]string{"src"}, Helpers) +} + +// DefaultHelpers returns a default set of helpers for the app, +// which can then be extended/replaced. Helper functions may not be changed +// after LoadTemplates is called, as reloading is required if they change. +func DefaultHelpers() parser.FuncMap { + funcs := make(parser.FuncMap) + + // HEAD helpers + funcs["style"] = helpers.Style + funcs["script"] = helpers.Script + funcs["dev"] = func() bool { return !Production } + + // HTML helpers + funcs["html"] = helpers.HTML + funcs["htmlattr"] = helpers.HTMLAttribute + funcs["url"] = helpers.URL + + funcs["sanitize"] = helpers.Sanitize + funcs["strip"] = helpers.Strip + funcs["truncate"] = helpers.Truncate + + // XML helpers + funcs["xmlpreamble"] = helpers.XMLPreamble + + // JSON helpers + funcs["json"] = helpers.JSON + funcs["jsontime"] = helpers.JSONTime + + // CSV helper + funcs["csv"] = helpers.CSV + + // Form helpers + funcs["field"] = helpers.Field + funcs["datefield"] = helpers.DateField + funcs["textarea"] = helpers.TextArea + funcs["select"] = helpers.Select + funcs["selectarray"] = helpers.SelectArray + funcs["optionsforselect"] = helpers.OptionsForSelect + + funcs["utcdate"] = helpers.UTCDate + funcs["utctime"] = helpers.UTCTime + funcs["utcnow"] = helpers.UTCNow + funcs["year"] = helpers.YearNow + funcs["date"] = helpers.Date + funcs["time"] = helpers.Time + funcs["ago"] = helpers.Ago + funcs["numberoptions"] = helpers.NumberOptions + + // String helpers + funcs["blank"] = helpers.Blank + funcs["exists"] = helpers.Exists + + // Math helpers + funcs["mod"] = helpers.Mod + funcs["odd"] = helpers.Odd + funcs["add"] = helpers.Add + funcs["subtract"] = helpers.Subtract + funcs["int64"] = helpers.Int64 + + // Array functions + funcs["array"] = helpers.Array + funcs["append"] = helpers.Append + funcs["contains"] = helpers.Contains + + // Map functions + funcs["map"] = helpers.Map + funcs["set"] = helpers.Set + funcs["setif"] = helpers.SetIf + funcs["empty"] = helpers.Empty + + // Numeric helpers - clean up and accept currency and other options in centstoprice + funcs["centstobase"] = helpers.CentsToBase + funcs["centstoprice"] = helpers.CentsToPrice + funcs["centstopriceshort"] = helpers.CentsToPriceShort + funcs["pricetocents"] = helpers.PriceToCents + funcs["numbertohuman"] = helpers.NumberToHuman + funcs["numbertocommas"] = helpers.NumberToCommas + + return funcs +} + +// LoadTemplatesAtPaths loads our templates given the paths provided +func LoadTemplatesAtPaths(paths []string, helpers parser.FuncMap) error { + + mu.Lock() + defer mu.Unlock() + + // Scan all templates within the given paths, using the helpers provided + var err error + scanner, err = parser.NewScanner(paths, helpers) + if err != nil { + return err + } + + err = scanner.ScanPaths() + if err != nil { + return err + } + + return nil +} + +// ReloadTemplates reloads the templates for our scanner +func ReloadTemplates() error { + mu.Lock() + defer mu.Unlock() + return scanner.ScanPaths() +} + +// PrintTemplates prints out our list of templates for debug +func PrintTemplates() { + mu.RLock() + defer mu.RUnlock() + for k := range scanner.Templates { + fmt.Printf("%s\n", k) + } + fmt.Printf("Finished scan of templates\n") +} diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore new file mode 100644 index 0000000..2de28da --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.gitignore @@ -0,0 +1,9 @@ +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +Icon? +ehthumbs.db +Thumbs.db +.idea diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml new file mode 100644 index 0000000..56fcf25 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.travis.yml @@ -0,0 +1,129 @@ +sudo: false +language: go +go: + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - master + +before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + +before_script: + - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf + - sudo service mysql restart + - .travis/wait_mysql.sh + - mysql -e 'create database gotest;' + +matrix: + include: + - env: DB=MYSQL8 + sudo: required + dist: trusty + go: 1.10.x + services: + - docker + before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - docker pull mysql:8.0 + - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret + mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 + - cp .travis/docker.cnf ~/.my.cnf + - .travis/wait_mysql.sh + before_script: + - export MYSQL_TEST_USER=gotest + - export MYSQL_TEST_PASS=secret + - export MYSQL_TEST_ADDR=127.0.0.1:3307 + - export MYSQL_TEST_CONCURRENT=1 + + - env: DB=MYSQL57 + sudo: required + dist: trusty + go: 1.10.x + services: + - docker + before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - docker pull mysql:5.7 + - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret + mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 + - cp .travis/docker.cnf ~/.my.cnf + - .travis/wait_mysql.sh + before_script: + - export MYSQL_TEST_USER=gotest + - export MYSQL_TEST_PASS=secret + - export MYSQL_TEST_ADDR=127.0.0.1:3307 + - export MYSQL_TEST_CONCURRENT=1 + + - env: DB=MARIA55 + sudo: required + dist: trusty + go: 1.10.x + services: + - docker + before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - docker pull mariadb:5.5 + - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret + mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 + - cp .travis/docker.cnf ~/.my.cnf + - .travis/wait_mysql.sh + before_script: + - export MYSQL_TEST_USER=gotest + - export MYSQL_TEST_PASS=secret + - export MYSQL_TEST_ADDR=127.0.0.1:3307 + - export MYSQL_TEST_CONCURRENT=1 + + - env: DB=MARIA10_1 + sudo: required + dist: trusty + go: 1.10.x + services: + - docker + before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - docker pull mariadb:10.1 + - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret + mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1 + - cp .travis/docker.cnf ~/.my.cnf + - .travis/wait_mysql.sh + before_script: + - export MYSQL_TEST_USER=gotest + - export MYSQL_TEST_PASS=secret + - export MYSQL_TEST_ADDR=127.0.0.1:3307 + - export MYSQL_TEST_CONCURRENT=1 + + - os: osx + osx_image: xcode10.1 + addons: + homebrew: + packages: + - mysql + update: true + go: 1.12.x + before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + before_script: + - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB\nlocal_infile=1" >> /usr/local/etc/my.cnf + - mysql.server start + - mysql -uroot -e 'CREATE USER gotest IDENTIFIED BY "secret"' + - mysql -uroot -e 'GRANT ALL ON *.* TO gotest' + - mysql -uroot -e 'create database gotest;' + - export MYSQL_TEST_USER=gotest + - export MYSQL_TEST_PASS=secret + - export MYSQL_TEST_ADDR=127.0.0.1:3306 + - export MYSQL_TEST_CONCURRENT=1 + +script: + - go test -v -covermode=count -coverprofile=coverage.out + - go vet ./... + - .travis/gofmt.sh +after_script: + - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS new file mode 100644 index 0000000..ad59898 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -0,0 +1,105 @@ +# This is the official list of Go-MySQL-Driver authors for copyright purposes. + +# If you are submitting a patch, please add your name or the name of the +# organization which holds the copyright to this list in alphabetical order. + +# Names should be added to this file as +# Name +# The email address is not required for organizations. +# Please keep the list sorted. + + +# Individual Persons + +Aaron Hopkins +Achille Roussel +Alexey Palazhchenko +Andrew Reid +Arne Hormann +Asta Xie +Bulat Gaifullin +Carlos Nieto +Chris Moos +Craig Wilson +Daniel Montoya +Daniel Nichter +Daniël van Eeden +Dave Protasowski +DisposaBoy +Egor Smolyakov +Erwan Martin +Evan Shaw +Frederick Mayle +Gustavo Kristic +Hajime Nakagami +Hanno Braun +Henri Yandell +Hirotaka Yamamoto +Huyiguang +ICHINOSE Shogo +Ilia Cimpoes +INADA Naoki +Jacek Szwec +James Harr +Jeff Hodges +Jeffrey Charles +Jerome Meyer +Jiajia Zhong +Jian Zhen +Joshua Prunier +Julien Lefevre +Julien Schmidt +Justin Li +Justin Nuß +Kamil Dziedzic +Kevin Malachowski +Kieron Woodhouse +Lennart Rudolph +Leonardo YongUk Kim +Linh Tran Tuan +Lion Yang +Luca Looz +Lucas Liu +Luke Scott +Maciej Zimnoch +Michael Woolnough +Nathanial Murphy +Nicola Peduzzi +Olivier Mengué +oscarzhao +Paul Bonser +Peter Schultz +Rebecca Chin +Reed Allman +Richard Wilkes +Robert Russell +Runrioter Wung +Shuode Li +Simon J Mudd +Soroush Pour +Stan Putrya +Stanley Gunawan +Steven Hartland +Thomas Wodarek +Tim Ruffles +Tom Jenkinson +Vladimir Kovpak +Xiangyu Hu +Xiaobing Jiang +Xiuming Chen +Zhenye Xie + +# Organizations + +Barracuda Networks, Inc. +Counting Ltd. +DigitalOcean Inc. +Facebook Inc. +GitHub Inc. +Google Inc. +InfoSum Ltd. +Keybase Inc. +Multiplay Ltd. +Percona LLC +Pivotal Inc. +Stripe Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md new file mode 100644 index 0000000..9cb97b3 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -0,0 +1,206 @@ +## Version 1.5 (2020-01-07) + +Changes: + + - Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017) + - Improve buffer handling (#890) + - Document potentially insecure TLS configs (#901) + - Use a double-buffering scheme to prevent data races (#943) + - Pass uint64 values without converting them to string (#838, #955) + - Update collations and make utf8mb4 default (#877, #1054) + - Make NullTime compatible with sql.NullTime in Go 1.13+ (#995) + - Removed CloudSQL support (#993, #1007) + - Add Go Module support (#1003) + +New Features: + + - Implement support of optional TLS (#900) + - Check connection liveness (#934, #964, #997, #1048, #1051, #1052) + - Implement Connector Interface (#941, #958, #1020, #1035) + +Bugfixes: + + - Mark connections as bad on error during ping (#875) + - Mark connections as bad on error during dial (#867) + - Fix connection leak caused by rapid context cancellation (#1024) + - Mark connections as bad on error during Conn.Prepare (#1030) + + +## Version 1.4.1 (2018-11-14) + +Bugfixes: + + - Fix TIME format for binary columns (#818) + - Fix handling of empty auth plugin names (#835) + - Fix caching_sha2_password with empty password (#826) + - Fix canceled context broke mysqlConn (#862) + - Fix OldAuthSwitchRequest support (#870) + - Fix Auth Response packet for cleartext password (#887) + +## Version 1.4 (2018-06-03) + +Changes: + + - Documentation fixes (#530, #535, #567) + - Refactoring (#575, #579, #580, #581, #603, #615, #704) + - Cache column names (#444) + - Sort the DSN parameters in DSNs generated from a config (#637) + - Allow native password authentication by default (#644) + - Use the default port if it is missing in the DSN (#668) + - Removed the `strict` mode (#676) + - Do not query `max_allowed_packet` by default (#680) + - Dropped support Go 1.6 and lower (#696) + - Updated `ConvertValue()` to match the database/sql/driver implementation (#760) + - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783) + - Improved the compatibility of the authentication system (#807) + +New Features: + + - Multi-Results support (#537) + - `rejectReadOnly` DSN option (#604) + - `context.Context` support (#608, #612, #627, #761) + - Transaction isolation level support (#619, #744) + - Read-Only transactions support (#618, #634) + - `NewConfig` function which initializes a config with default values (#679) + - Implemented the `ColumnType` interfaces (#667, #724) + - Support for custom string types in `ConvertValue` (#623) + - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710) + - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802) + - Implemented `driver.SessionResetter` (#779) + - `sha256_password` authentication plugin support (#808) + +Bugfixes: + + - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718) + - Fixed LOAD LOCAL DATA INFILE for empty files (#590) + - Removed columns definition cache since it sometimes cached invalid data (#592) + - Don't mutate registered TLS configs (#600) + - Make RegisterTLSConfig concurrency-safe (#613) + - Handle missing auth data in the handshake packet correctly (#646) + - Do not retry queries when data was written to avoid data corruption (#302, #736) + - Cache the connection pointer for error handling before invalidating it (#678) + - Fixed imports for appengine/cloudsql (#700) + - Fix sending STMT_LONG_DATA for 0 byte data (#734) + - Set correct capacity for []bytes read from length-encoded strings (#766) + - Make RegisterDial concurrency-safe (#773) + + +## Version 1.3 (2016-12-01) + +Changes: + + - Go 1.1 is no longer supported + - Use decimals fields in MySQL to format time types (#249) + - Buffer optimizations (#269) + - TLS ServerName defaults to the host (#283) + - Refactoring (#400, #410, #437) + - Adjusted documentation for second generation CloudSQL (#485) + - Documented DSN system var quoting rules (#502) + - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512) + +New Features: + + - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) + - Support for returning table alias on Columns() (#289, #359, #382) + - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490) + - Support for uint64 parameters with high bit set (#332, #345) + - Cleartext authentication plugin support (#327) + - Exported ParseDSN function and the Config struct (#403, #419, #429) + - Read / Write timeouts (#401) + - Support for JSON field type (#414) + - Support for multi-statements and multi-results (#411, #431) + - DSN parameter to set the driver-side max_allowed_packet value manually (#489) + - Native password authentication plugin support (#494, #524) + +Bugfixes: + + - Fixed handling of queries without columns and rows (#255) + - Fixed a panic when SetKeepAlive() failed (#298) + - Handle ERR packets while reading rows (#321) + - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349) + - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356) + - Actually zero out bytes in handshake response (#378) + - Fixed race condition in registering LOAD DATA INFILE handler (#383) + - Fixed tests with MySQL 5.7.9+ (#380) + - QueryUnescape TLS config names (#397) + - Fixed "broken pipe" error by writing to closed socket (#390) + - Fixed LOAD LOCAL DATA INFILE buffering (#424) + - Fixed parsing of floats into float64 when placeholders are used (#434) + - Fixed DSN tests with Go 1.7+ (#459) + - Handle ERR packets while waiting for EOF (#473) + - Invalidate connection on error while discarding additional results (#513) + - Allow terminating packets of length 0 (#516) + + +## Version 1.2 (2014-06-03) + +Changes: + + - We switched back to a "rolling release". `go get` installs the current master branch again + - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver + - Exported errors to allow easy checking from application code + - Enabled TCP Keepalives on TCP connections + - Optimized INFILE handling (better buffer size calculation, lazy init, ...) + - The DSN parser also checks for a missing separating slash + - Faster binary date / datetime to string formatting + - Also exported the MySQLWarning type + - mysqlConn.Close returns the first error encountered instead of ignoring all errors + - writePacket() automatically writes the packet size to the header + - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets + +New Features: + + - `RegisterDial` allows the usage of a custom dial function to establish the network connection + - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter + - Logging of critical errors is configurable with `SetLogger` + - Google CloudSQL support + +Bugfixes: + + - Allow more than 32 parameters in prepared statements + - Various old_password fixes + - Fixed TestConcurrent test to pass Go's race detection + - Fixed appendLengthEncodedInteger for large numbers + - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) + + +## Version 1.1 (2013-11-02) + +Changes: + + - Go-MySQL-Driver now requires Go 1.1 + - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore + - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors + - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` + - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. + - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries + - Optimized the buffer for reading + - stmt.Query now caches column metadata + - New Logo + - Changed the copyright header to include all contributors + - Improved the LOAD INFILE documentation + - The driver struct is now exported to make the driver directly accessible + - Refactored the driver tests + - Added more benchmarks and moved all to a separate file + - Other small refactoring + +New Features: + + - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure + - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs + - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used + +Bugfixes: + + - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification + - Convert to DB timezone when inserting `time.Time` + - Splitted packets (more than 16MB) are now merged correctly + - Fixed false positive `io.EOF` errors when the data was fully read + - Avoid panics on reuse of closed connections + - Fixed empty string producing false nil values + - Fixed sign byte for positive TIME fields + + +## Version 1.0 (2013-05-14) + +Initial Release diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE new file mode 100644 index 0000000..14e2f77 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md new file mode 100644 index 0000000..d2627a4 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -0,0 +1,501 @@ +# Go-MySQL-Driver + +A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package + +![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") + +--------------------------------------- + * [Features](#features) + * [Requirements](#requirements) + * [Installation](#installation) + * [Usage](#usage) + * [DSN (Data Source Name)](#dsn-data-source-name) + * [Password](#password) + * [Protocol](#protocol) + * [Address](#address) + * [Parameters](#parameters) + * [Examples](#examples) + * [Connection pool and timeouts](#connection-pool-and-timeouts) + * [context.Context Support](#contextcontext-support) + * [ColumnType Support](#columntype-support) + * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) + * [time.Time support](#timetime-support) + * [Unicode support](#unicode-support) + * [Testing / Development](#testing--development) + * [License](#license) + +--------------------------------------- + +## Features + * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") + * Native Go implementation. No C-bindings, just pure Go + * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc) + * Automatic handling of broken connections + * Automatic Connection Pooling *(by database/sql package)* + * Supports queries larger than 16MB + * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support. + * Intelligent `LONG DATA` handling in prepared statements + * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support + * Optional `time.Time` parsing + * Optional placeholder interpolation + +## Requirements + * Go 1.10 or higher. We aim to support the 3 latest versions of Go. + * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) + +--------------------------------------- + +## Installation +Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell: +```bash +$ go get -u github.com/go-sql-driver/mysql +``` +Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`. + +## Usage +_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then. + +Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: +```go +import "database/sql" +import _ "github.com/go-sql-driver/mysql" + +db, err := sql.Open("mysql", "user:password@/dbname") +``` + +[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). + + +### DSN (Data Source Name) + +The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): +``` +[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] +``` + +A DSN in its fullest form: +``` +username:password@protocol(address)/dbname?param=value +``` + +Except for the databasename, all values are optional. So the minimal DSN is: +``` +/dbname +``` + +If you do not want to preselect a database, leave `dbname` empty: +``` +/ +``` +This has the same effect as an empty DSN string: +``` + +``` + +Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct. + +#### Password +Passwords can consist of any character. Escaping is **not** necessary. + +#### Protocol +See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available. +In general you should use an Unix domain socket if available and TCP otherwise for best performance. + +#### Address +For TCP and UDP networks, addresses have the form `host[:port]`. +If `port` is omitted, the default port will be used. +If `host` is a literal IPv6 address, it must be enclosed in square brackets. +The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. + +For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. + +#### Parameters +*Parameters are case-sensitive!* + +Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. + +##### `allowAllFiles` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. +[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) + +##### `allowCleartextPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. + +##### `allowNativePasswords` + +``` +Type: bool +Valid Values: true, false +Default: true +``` +`allowNativePasswords=false` disallows the usage of MySQL native password method. + +##### `allowOldPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). + +##### `charset` + +``` +Type: string +Valid Values: +Default: none +``` + +Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). + +Usage of the `charset` parameter is discouraged because it issues additional queries to the server. +Unless you need the fallback behavior, please use `collation` instead. + +##### `checkConnLiveness` + +``` +Type: bool +Valid Values: true, false +Default: true +``` + +On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection. +`checkConnLiveness=false` disables this liveness check of connections. + +##### `collation` + +``` +Type: string +Valid Values: +Default: utf8mb4_general_ci +``` + +Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. + +A list of valid charsets for a server is retrievable with `SHOW COLLATION`. + +The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL. + +Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)). + + +##### `clientFoundRows` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. + +##### `columnsWithAlias` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: + +``` +SELECT u.id FROM users as u +``` + +will return `u.id` instead of just `id` if `columnsWithAlias=true`. + +##### `interpolateParams` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. + +*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* + +##### `loc` + +``` +Type: string +Valid Values: +Default: UTC +``` + +Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details. + +Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. + +Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. + +##### `maxAllowedPacket` +``` +Type: decimal number +Default: 4194304 +``` + +Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*. + +##### `multiStatements` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded. + +When `multiStatements` is used, `?` parameters must only be used in the first statement. + +##### `parseTime` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` +The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`. + + +##### `readTimeout` + +``` +Type: duration +Default: 0 +``` + +I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + +##### `rejectReadOnly` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + + +`rejectReadOnly=true` causes the driver to reject read-only connections. This +is for a possible race condition during an automatic failover, where the mysql +client gets connected to a read-only replica after the failover. + +Note that this should be a fairly rare case, as an automatic failover normally +happens when the primary is down, and the race condition shouldn't happen +unless it comes back up online as soon as the failover is kicked off. On the +other hand, when this happens, a MySQL application can get stuck on a +read-only connection until restarted. It is however fairly easy to reproduce, +for example, using a manual failover on AWS Aurora's MySQL-compatible cluster. + +If you are not relying on read-only transactions to reject writes that aren't +supposed to happen, setting this on some MySQL providers (such as AWS Aurora) +is safer for failovers. + +Note that ERROR 1290 can be returned for a `read-only` server and this option will +cause a retry for that error. However the same error number is used for some +other cases. You should ensure your application will never cause an ERROR 1290 +except for `read-only` mode when enabling this option. + + +##### `serverPubKey` + +``` +Type: string +Valid Values: +Default: none +``` + +Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN. +Public keys are used to transmit encrypted data, e.g. for authentication. +If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required. + + +##### `timeout` + +``` +Type: duration +Default: OS default +``` + +Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + + +##### `tls` + +``` +Type: bool / string +Valid Values: true, false, skip-verify, preferred, +Default: false +``` + +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). + + +##### `writeTimeout` + +``` +Type: duration +Default: 0 +``` + +I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + + +##### System Variables + +Any other parameters are interpreted as system variables: + * `=`: `SET =` + * `=`: `SET =` + * `=%27%27`: `SET =''` + +Rules: +* The values for string variables must be quoted with `'`. +* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed! + (which implies values of string variables must be wrapped with `%27`). + +Examples: + * `autocommit=1`: `SET autocommit=1` + * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'` + * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'` + + +#### Examples +``` +user@unix(/path/to/socket)/dbname +``` + +``` +root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local +``` + +``` +user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true +``` + +Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html): +``` +user:password@/dbname?sql_mode=TRADITIONAL +``` + +TCP via IPv6: +``` +user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci +``` + +TCP on a remote host, e.g. Amazon RDS: +``` +id:password@tcp(your-amazonaws-uri.com:3306)/dbname +``` + +Google Cloud SQL on App Engine: +``` +user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname +``` + +TCP using default port (3306) on localhost: +``` +user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped +``` + +Use the default protocol (tcp) and host (localhost:3306): +``` +user:password@/dbname +``` + +No Database preselected: +``` +user:password@/ +``` + + +### Connection pool and timeouts +The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively. + +## `ColumnType` Support +This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. + +## `context.Context` Support +Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. +See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details. + + +### `LOAD DATA LOCAL INFILE` support +For this feature you need direct access to the package. Therefore you must change the import path (no `_`): +```go +import "github.com/go-sql-driver/mysql" +``` + +Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). + +To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. + +See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. + + +### `time.Time` support +The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program. + +However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter. + +**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). + +Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. + + +### Unicode support +Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default. + +Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. + +Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. + +See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support. + +## Testing / Development +To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. + +Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. +If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). + +See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. + +--------------------------------------- + +## License +Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +Mozilla summarizes the license scope as follows: +> MPL: The copyleft applies to any files containing MPLed code. + + +That means: + * You can **use** the **unchanged** source code both in private and commercially. + * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0). + * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**. + +Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license. + +You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE). + +![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") + diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go new file mode 100644 index 0000000..fec7040 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/auth.go @@ -0,0 +1,422 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/pem" + "sync" +) + +// server pub keys registry +var ( + serverPubKeyLock sync.RWMutex + serverPubKeyRegistry map[string]*rsa.PublicKey +) + +// RegisterServerPubKey registers a server RSA public key which can be used to +// send data in a secure manner to the server without receiving the public key +// in a potentially insecure way from the server first. +// Registered keys can afterwards be used adding serverPubKey= to the DSN. +// +// Note: The provided rsa.PublicKey instance is exclusively owned by the driver +// after registering it and may not be modified. +// +// data, err := ioutil.ReadFile("mykey.pem") +// if err != nil { +// log.Fatal(err) +// } +// +// block, _ := pem.Decode(data) +// if block == nil || block.Type != "PUBLIC KEY" { +// log.Fatal("failed to decode PEM block containing public key") +// } +// +// pub, err := x509.ParsePKIXPublicKey(block.Bytes) +// if err != nil { +// log.Fatal(err) +// } +// +// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok { +// mysql.RegisterServerPubKey("mykey", rsaPubKey) +// } else { +// log.Fatal("not a RSA public key") +// } +// +func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) { + serverPubKeyLock.Lock() + if serverPubKeyRegistry == nil { + serverPubKeyRegistry = make(map[string]*rsa.PublicKey) + } + + serverPubKeyRegistry[name] = pubKey + serverPubKeyLock.Unlock() +} + +// DeregisterServerPubKey removes the public key registered with the given name. +func DeregisterServerPubKey(name string) { + serverPubKeyLock.Lock() + if serverPubKeyRegistry != nil { + delete(serverPubKeyRegistry, name) + } + serverPubKeyLock.Unlock() +} + +func getServerPubKey(name string) (pubKey *rsa.PublicKey) { + serverPubKeyLock.RLock() + if v, ok := serverPubKeyRegistry[name]; ok { + pubKey = v + } + serverPubKeyLock.RUnlock() + return +} + +// Hash password using pre 4.1 (old password) method +// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +// Pseudo random number generator +func newMyRnd(seed1, seed2 uint32) *myRnd { + return &myRnd{ + seed1: seed1 % myRndMaxVal, + seed2: seed2 % myRndMaxVal, + } +} + +// Tested to be equivalent to MariaDB's floating point variant +// http://play.golang.org/p/QHvhd4qved +// http://play.golang.org/p/RG0q4ElWDx +func (r *myRnd) NextByte() byte { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + + return byte(uint64(r.seed1) * 31 / myRndMaxVal) +} + +// Generate binary hash from byte string using insecure pre 4.1 method +func pwHash(password []byte) (result [2]uint32) { + var add uint32 = 7 + var tmp uint32 + + result[0] = 1345345333 + result[1] = 0x12345671 + + for _, c := range password { + // skip spaces and tabs in password + if c == ' ' || c == '\t' { + continue + } + + tmp = uint32(c) + result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) + result[1] += (result[1] << 8) ^ result[0] + add += tmp + } + + // Remove sign bit (1<<31)-1) + result[0] &= 0x7FFFFFFF + result[1] &= 0x7FFFFFFF + + return +} + +// Hash password using insecure pre 4.1 method +func scrambleOldPassword(scramble []byte, password string) []byte { + if len(password) == 0 { + return nil + } + + scramble = scramble[:8] + + hashPw := pwHash([]byte(password)) + hashSc := pwHash(scramble) + + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + + var out [8]byte + for i := range out { + out[i] = r.NextByte() + 64 + } + + mask := r.NextByte() + for i := range out { + out[i] ^= mask + } + + return out[:] +} + +// Hash password using 4.1+ method (SHA1) +func scramblePassword(scramble []byte, password string) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write([]byte(password)) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + + // outer Hash + crypt.Reset() + crypt.Write(scramble) + crypt.Write(hash) + scramble = crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} + +// Hash password using MySQL 8+ method (SHA256) +func scrambleSHA256Password(scramble []byte, password string) []byte { + if len(password) == 0 { + return nil + } + + // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble)) + + crypt := sha256.New() + crypt.Write([]byte(password)) + message1 := crypt.Sum(nil) + + crypt.Reset() + crypt.Write(message1) + message1Hash := crypt.Sum(nil) + + crypt.Reset() + crypt.Write(message1Hash) + crypt.Write(scramble) + message2 := crypt.Sum(nil) + + for i := range message1 { + message1[i] ^= message2[i] + } + + return message1 +} + +func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) { + plain := make([]byte, len(password)+1) + copy(plain, password) + for i := range plain { + j := i % len(seed) + plain[i] ^= seed[j] + } + sha1 := sha1.New() + return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil) +} + +func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error { + enc, err := encryptPassword(mc.cfg.Passwd, seed, pub) + if err != nil { + return err + } + return mc.writeAuthSwitchPacket(enc) +} + +func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) { + switch plugin { + case "caching_sha2_password": + authResp := scrambleSHA256Password(authData, mc.cfg.Passwd) + return authResp, nil + + case "mysql_old_password": + if !mc.cfg.AllowOldPasswords { + return nil, ErrOldPassword + } + // Note: there are edge cases where this should work but doesn't; + // this is currently "wontfix": + // https://github.com/go-sql-driver/mysql/issues/184 + authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0) + return authResp, nil + + case "mysql_clear_password": + if !mc.cfg.AllowCleartextPasswords { + return nil, ErrCleartextPassword + } + // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html + // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html + return append([]byte(mc.cfg.Passwd), 0), nil + + case "mysql_native_password": + if !mc.cfg.AllowNativePasswords { + return nil, ErrNativePassword + } + // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html + // Native password authentication only need and will need 20-byte challenge. + authResp := scramblePassword(authData[:20], mc.cfg.Passwd) + return authResp, nil + + case "sha256_password": + if len(mc.cfg.Passwd) == 0 { + return []byte{0}, nil + } + if mc.cfg.tls != nil || mc.cfg.Net == "unix" { + // write cleartext auth packet + return append([]byte(mc.cfg.Passwd), 0), nil + } + + pubKey := mc.cfg.pubKey + if pubKey == nil { + // request public key from server + return []byte{1}, nil + } + + // encrypted password + enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey) + return enc, err + + default: + errLog.Print("unknown auth plugin:", plugin) + return nil, ErrUnknownPlugin + } +} + +func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { + // Read Result Packet + authData, newPlugin, err := mc.readAuthResult() + if err != nil { + return err + } + + // handle auth plugin switch, if requested + if newPlugin != "" { + // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is + // sent and we have to keep using the cipher sent in the init packet. + if authData == nil { + authData = oldAuthData + } else { + // copy data from read buffer to owned slice + copy(oldAuthData, authData) + } + + plugin = newPlugin + + authResp, err := mc.auth(authData, plugin) + if err != nil { + return err + } + if err = mc.writeAuthSwitchPacket(authResp); err != nil { + return err + } + + // Read Result Packet + authData, newPlugin, err = mc.readAuthResult() + if err != nil { + return err + } + + // Do not allow to change the auth plugin more than once + if newPlugin != "" { + return ErrMalformPkt + } + } + + switch plugin { + + // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/ + case "caching_sha2_password": + switch len(authData) { + case 0: + return nil // auth successful + case 1: + switch authData[0] { + case cachingSha2PasswordFastAuthSuccess: + if err = mc.readResultOK(); err == nil { + return nil // auth successful + } + + case cachingSha2PasswordPerformFullAuthentication: + if mc.cfg.tls != nil || mc.cfg.Net == "unix" { + // write cleartext auth packet + err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0)) + if err != nil { + return err + } + } else { + pubKey := mc.cfg.pubKey + if pubKey == nil { + // request public key from server + data, err := mc.buf.takeSmallBuffer(4 + 1) + if err != nil { + return err + } + data[4] = cachingSha2PasswordRequestPublicKey + mc.writePacket(data) + + // parse public key + if data, err = mc.readPacket(); err != nil { + return err + } + + block, _ := pem.Decode(data[1:]) + pkix, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return err + } + pubKey = pkix.(*rsa.PublicKey) + } + + // send encrypted password + err = mc.sendEncryptedPassword(oldAuthData, pubKey) + if err != nil { + return err + } + } + return mc.readResultOK() + + default: + return ErrMalformPkt + } + default: + return ErrMalformPkt + } + + case "sha256_password": + switch len(authData) { + case 0: + return nil // auth successful + default: + block, _ := pem.Decode(authData) + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return err + } + + // send encrypted password + err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey)) + if err != nil { + return err + } + return mc.readResultOK() + } + + default: + return nil // auth successful + } + + return err +} diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go new file mode 100644 index 0000000..0774c5c --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/buffer.go @@ -0,0 +1,182 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "io" + "net" + "time" +) + +const defaultBufSize = 4096 +const maxCachedBufSize = 256 * 1024 + +// A buffer which is used for both reading and writing. +// This is possible since communication on each connection is synchronous. +// In other words, we can't write and read simultaneously on the same connection. +// The buffer is similar to bufio.Reader / Writer but zero-copy-ish +// Also highly optimized for this particular use case. +// This buffer is backed by two byte slices in a double-buffering scheme +type buffer struct { + buf []byte // buf is a byte buffer who's length and capacity are equal. + nc net.Conn + idx int + length int + timeout time.Duration + dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer + flipcnt uint // flipccnt is the current buffer counter for double-buffering +} + +// newBuffer allocates and returns a new buffer. +func newBuffer(nc net.Conn) buffer { + fg := make([]byte, defaultBufSize) + return buffer{ + buf: fg, + nc: nc, + dbuf: [2][]byte{fg, nil}, + } +} + +// flip replaces the active buffer with the background buffer +// this is a delayed flip that simply increases the buffer counter; +// the actual flip will be performed the next time we call `buffer.fill` +func (b *buffer) flip() { + b.flipcnt += 1 +} + +// fill reads into the buffer until at least _need_ bytes are in it +func (b *buffer) fill(need int) error { + n := b.length + // fill data into its double-buffering target: if we've called + // flip on this buffer, we'll be copying to the background buffer, + // and then filling it with network data; otherwise we'll just move + // the contents of the current buffer to the front before filling it + dest := b.dbuf[b.flipcnt&1] + + // grow buffer if necessary to fit the whole packet. + if need > len(dest) { + // Round up to the next multiple of the default size + dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) + + // if the allocated buffer is not too large, move it to backing storage + // to prevent extra allocations on applications that perform large reads + if len(dest) <= maxCachedBufSize { + b.dbuf[b.flipcnt&1] = dest + } + } + + // if we're filling the fg buffer, move the existing data to the start of it. + // if we're filling the bg buffer, copy over the data + if n > 0 { + copy(dest[:n], b.buf[b.idx:]) + } + + b.buf = dest + b.idx = 0 + + for { + if b.timeout > 0 { + if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil { + return err + } + } + + nn, err := b.nc.Read(b.buf[n:]) + n += nn + + switch err { + case nil: + if n < need { + continue + } + b.length = n + return nil + + case io.EOF: + if n >= need { + b.length = n + return nil + } + return io.ErrUnexpectedEOF + + default: + return err + } + } +} + +// returns next N bytes from buffer. +// The returned slice is only guaranteed to be valid until the next read +func (b *buffer) readNext(need int) ([]byte, error) { + if b.length < need { + // refill + if err := b.fill(need); err != nil { + return nil, err + } + } + + offset := b.idx + b.idx += need + b.length -= need + return b.buf[offset:b.idx], nil +} + +// takeBuffer returns a buffer with the requested size. +// If possible, a slice from the existing buffer is returned. +// Otherwise a bigger buffer is made. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeBuffer(length int) ([]byte, error) { + if b.length > 0 { + return nil, ErrBusyBuffer + } + + // test (cheap) general case first + if length <= cap(b.buf) { + return b.buf[:length], nil + } + + if length < maxPacketSize { + b.buf = make([]byte, length) + return b.buf, nil + } + + // buffer is larger than we want to store. + return make([]byte, length), nil +} + +// takeSmallBuffer is shortcut which can be used if length is +// known to be smaller than defaultBufSize. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeSmallBuffer(length int) ([]byte, error) { + if b.length > 0 { + return nil, ErrBusyBuffer + } + return b.buf[:length], nil +} + +// takeCompleteBuffer returns the complete existing buffer. +// This can be used if the necessary buffer size is unknown. +// cap and len of the returned buffer will be equal. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeCompleteBuffer() ([]byte, error) { + if b.length > 0 { + return nil, ErrBusyBuffer + } + return b.buf, nil +} + +// store stores buf, an updated buffer, if its suitable to do so. +func (b *buffer) store(buf []byte) error { + if b.length > 0 { + return ErrBusyBuffer + } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) { + b.buf = buf[:cap(buf)] + } + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go new file mode 100644 index 0000000..8d2b556 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/collations.go @@ -0,0 +1,265 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const defaultCollation = "utf8mb4_general_ci" +const binaryCollation = "binary" + +// A list of available collations mapped to the internal ID. +// To update this map use the following MySQL query: +// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID +// +// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255. +// +// ucs2, utf16, and utf32 can't be used for connection charset. +// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset +// They are commented out to reduce this map. +var collations = map[string]byte{ + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + //"ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + //"utf16_general_ci": 54, + //"utf16_bin": 55, + //"utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + //"utf32_general_ci": 60, + //"utf32_bin": 61, + //"utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "utf8_tolower_ci": 76, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + //"ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + //"utf16_unicode_ci": 101, + //"utf16_icelandic_ci": 102, + //"utf16_latvian_ci": 103, + //"utf16_romanian_ci": 104, + //"utf16_slovenian_ci": 105, + //"utf16_polish_ci": 106, + //"utf16_estonian_ci": 107, + //"utf16_spanish_ci": 108, + //"utf16_swedish_ci": 109, + //"utf16_turkish_ci": 110, + //"utf16_czech_ci": 111, + //"utf16_danish_ci": 112, + //"utf16_lithuanian_ci": 113, + //"utf16_slovak_ci": 114, + //"utf16_spanish2_ci": 115, + //"utf16_roman_ci": 116, + //"utf16_persian_ci": 117, + //"utf16_esperanto_ci": 118, + //"utf16_hungarian_ci": 119, + //"utf16_sinhala_ci": 120, + //"utf16_german2_ci": 121, + //"utf16_croatian_ci": 122, + //"utf16_unicode_520_ci": 123, + //"utf16_vietnamese_ci": 124, + //"ucs2_unicode_ci": 128, + //"ucs2_icelandic_ci": 129, + //"ucs2_latvian_ci": 130, + //"ucs2_romanian_ci": 131, + //"ucs2_slovenian_ci": 132, + //"ucs2_polish_ci": 133, + //"ucs2_estonian_ci": 134, + //"ucs2_spanish_ci": 135, + //"ucs2_swedish_ci": 136, + //"ucs2_turkish_ci": 137, + //"ucs2_czech_ci": 138, + //"ucs2_danish_ci": 139, + //"ucs2_lithuanian_ci": 140, + //"ucs2_slovak_ci": 141, + //"ucs2_spanish2_ci": 142, + //"ucs2_roman_ci": 143, + //"ucs2_persian_ci": 144, + //"ucs2_esperanto_ci": 145, + //"ucs2_hungarian_ci": 146, + //"ucs2_sinhala_ci": 147, + //"ucs2_german2_ci": 148, + //"ucs2_croatian_ci": 149, + //"ucs2_unicode_520_ci": 150, + //"ucs2_vietnamese_ci": 151, + //"ucs2_general_mysql500_ci": 159, + //"utf32_unicode_ci": 160, + //"utf32_icelandic_ci": 161, + //"utf32_latvian_ci": 162, + //"utf32_romanian_ci": 163, + //"utf32_slovenian_ci": 164, + //"utf32_polish_ci": 165, + //"utf32_estonian_ci": 166, + //"utf32_spanish_ci": 167, + //"utf32_swedish_ci": 168, + //"utf32_turkish_ci": 169, + //"utf32_czech_ci": 170, + //"utf32_danish_ci": 171, + //"utf32_lithuanian_ci": 172, + //"utf32_slovak_ci": 173, + //"utf32_spanish2_ci": 174, + //"utf32_roman_ci": 175, + //"utf32_persian_ci": 176, + //"utf32_esperanto_ci": 177, + //"utf32_hungarian_ci": 178, + //"utf32_sinhala_ci": 179, + //"utf32_german2_ci": 180, + //"utf32_croatian_ci": 181, + //"utf32_unicode_520_ci": 182, + //"utf32_vietnamese_ci": 183, + "utf8_unicode_ci": 192, + "utf8_icelandic_ci": 193, + "utf8_latvian_ci": 194, + "utf8_romanian_ci": 195, + "utf8_slovenian_ci": 196, + "utf8_polish_ci": 197, + "utf8_estonian_ci": 198, + "utf8_spanish_ci": 199, + "utf8_swedish_ci": 200, + "utf8_turkish_ci": 201, + "utf8_czech_ci": 202, + "utf8_danish_ci": 203, + "utf8_lithuanian_ci": 204, + "utf8_slovak_ci": 205, + "utf8_spanish2_ci": 206, + "utf8_roman_ci": 207, + "utf8_persian_ci": 208, + "utf8_esperanto_ci": 209, + "utf8_hungarian_ci": 210, + "utf8_sinhala_ci": 211, + "utf8_german2_ci": 212, + "utf8_croatian_ci": 213, + "utf8_unicode_520_ci": 214, + "utf8_vietnamese_ci": 215, + "utf8_general_mysql500_ci": 223, + "utf8mb4_unicode_ci": 224, + "utf8mb4_icelandic_ci": 225, + "utf8mb4_latvian_ci": 226, + "utf8mb4_romanian_ci": 227, + "utf8mb4_slovenian_ci": 228, + "utf8mb4_polish_ci": 229, + "utf8mb4_estonian_ci": 230, + "utf8mb4_spanish_ci": 231, + "utf8mb4_swedish_ci": 232, + "utf8mb4_turkish_ci": 233, + "utf8mb4_czech_ci": 234, + "utf8mb4_danish_ci": 235, + "utf8mb4_lithuanian_ci": 236, + "utf8mb4_slovak_ci": 237, + "utf8mb4_spanish2_ci": 238, + "utf8mb4_roman_ci": 239, + "utf8mb4_persian_ci": 240, + "utf8mb4_esperanto_ci": 241, + "utf8mb4_hungarian_ci": 242, + "utf8mb4_sinhala_ci": 243, + "utf8mb4_german2_ci": 244, + "utf8mb4_croatian_ci": 245, + "utf8mb4_unicode_520_ci": 246, + "utf8mb4_vietnamese_ci": 247, + "gb18030_chinese_ci": 248, + "gb18030_bin": 249, + "gb18030_unicode_520_ci": 250, + "utf8mb4_0900_ai_ci": 255, +} + +// A blacklist of collations which is unsafe to interpolate parameters. +// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. +var unsafeCollations = map[string]bool{ + "big5_chinese_ci": true, + "sjis_japanese_ci": true, + "gbk_chinese_ci": true, + "big5_bin": true, + "gb2312_bin": true, + "gbk_bin": true, + "sjis_bin": true, + "cp932_japanese_ci": true, + "cp932_bin": true, + "gb18030_chinese_ci": true, + "gb18030_bin": true, + "gb18030_unicode_520_ci": true, +} diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck.go b/vendor/github.com/go-sql-driver/mysql/conncheck.go new file mode 100644 index 0000000..024eb28 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/conncheck.go @@ -0,0 +1,54 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos + +package mysql + +import ( + "errors" + "io" + "net" + "syscall" +) + +var errUnexpectedRead = errors.New("unexpected read from socket") + +func connCheck(conn net.Conn) error { + var sysErr error + + sysConn, ok := conn.(syscall.Conn) + if !ok { + return nil + } + rawConn, err := sysConn.SyscallConn() + if err != nil { + return err + } + + err = rawConn.Read(func(fd uintptr) bool { + var buf [1]byte + n, err := syscall.Read(int(fd), buf[:]) + switch { + case n == 0 && err == nil: + sysErr = io.EOF + case n > 0: + sysErr = errUnexpectedRead + case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK: + sysErr = nil + default: + sysErr = err + } + return true + }) + if err != nil { + return err + } + + return sysErr +} diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go new file mode 100644 index 0000000..ea7fb60 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go @@ -0,0 +1,17 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos + +package mysql + +import "net" + +func connCheck(conn net.Conn) error { + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go new file mode 100644 index 0000000..e4bb59e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -0,0 +1,651 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "context" + "database/sql" + "database/sql/driver" + "io" + "net" + "strconv" + "strings" + "time" +) + +type mysqlConn struct { + buf buffer + netConn net.Conn + rawConn net.Conn // underlying connection when netConn is TLS connection. + affectedRows uint64 + insertId uint64 + cfg *Config + maxAllowedPacket int + maxWriteSize int + writeTimeout time.Duration + flags clientFlag + status statusFlag + sequence uint8 + parseTime bool + reset bool // set when the Go SQL package calls ResetSession + + // for context support (Go 1.8+) + watching bool + watcher chan<- context.Context + closech chan struct{} + finished chan<- struct{} + canceled atomicError // set non-nil if conn is canceled + closed atomicBool // set when conn is closed, before closech is closed +} + +// Handles parameters set in DSN after the connection is established +func (mc *mysqlConn) handleParams() (err error) { + for param, val := range mc.cfg.Params { + switch param { + // Charset + case "charset": + charsets := strings.Split(val, ",") + for i := range charsets { + // ignore errors here - a charset may not exist + err = mc.exec("SET NAMES " + charsets[i]) + if err == nil { + break + } + } + if err != nil { + return + } + + // System Vars + default: + err = mc.exec("SET " + param + "=" + val + "") + if err != nil { + return + } + } + } + + return +} + +func (mc *mysqlConn) markBadConn(err error) error { + if mc == nil { + return err + } + if err != errBadConnNoWrite { + return err + } + return driver.ErrBadConn +} + +func (mc *mysqlConn) Begin() (driver.Tx, error) { + return mc.begin(false) +} + +func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + var q string + if readOnly { + q = "START TRANSACTION READ ONLY" + } else { + q = "START TRANSACTION" + } + err := mc.exec(q) + if err == nil { + return &mysqlTx{mc}, err + } + return nil, mc.markBadConn(err) +} + +func (mc *mysqlConn) Close() (err error) { + // Makes Close idempotent + if !mc.closed.IsSet() { + err = mc.writeCommandPacket(comQuit) + } + + mc.cleanup() + + return +} + +// Closes the network connection and unsets internal variables. Do not call this +// function after successfully authentication, call Close instead. This function +// is called before auth or on auth failure because MySQL will have already +// closed the network connection. +func (mc *mysqlConn) cleanup() { + if !mc.closed.TrySet(true) { + return + } + + // Makes cleanup idempotent + close(mc.closech) + if mc.netConn == nil { + return + } + if err := mc.netConn.Close(); err != nil { + errLog.Print(err) + } +} + +func (mc *mysqlConn) error() error { + if mc.closed.IsSet() { + if err := mc.canceled.Value(); err != nil { + return err + } + return ErrInvalidConn + } + return nil +} + +func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := mc.writeCommandPacketStr(comStmtPrepare, query) + if err != nil { + // STMT_PREPARE is safe to retry. So we can return ErrBadConn here. + errLog.Print(err) + return nil, driver.ErrBadConn + } + + stmt := &mysqlStmt{ + mc: mc, + } + + // Read Result + columnCount, err := stmt.readPrepareResultPacket() + if err == nil { + if stmt.paramCount > 0 { + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if columnCount > 0 { + err = mc.readUntilEOF() + } + } + + return stmt, err +} + +func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { + // Number of ? should be same to len(args) + if strings.Count(query, "?") != len(args) { + return "", driver.ErrSkip + } + + buf, err := mc.buf.takeCompleteBuffer() + if err != nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(err) + return "", ErrInvalidConn + } + buf = buf[:0] + argPos := 0 + + for i := 0; i < len(query); i++ { + q := strings.IndexByte(query[i:], '?') + if q == -1 { + buf = append(buf, query[i:]...) + break + } + buf = append(buf, query[i:i+q]...) + i += q + + arg := args[argPos] + argPos++ + + if arg == nil { + buf = append(buf, "NULL"...) + continue + } + + switch v := arg.(type) { + case int64: + buf = strconv.AppendInt(buf, v, 10) + case uint64: + // Handle uint64 explicitly because our custom ConvertValue emits unsigned values + buf = strconv.AppendUint(buf, v, 10) + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case bool: + if v { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + } else { + v := v.In(mc.cfg.Loc) + v = v.Add(time.Nanosecond * 500) // To round under microsecond + year := v.Year() + year100 := year / 100 + year1 := year % 100 + month := v.Month() + day := v.Day() + hour := v.Hour() + minute := v.Minute() + second := v.Second() + micro := v.Nanosecond() / 1000 + + buf = append(buf, []byte{ + '\'', + digits10[year100], digits01[year100], + digits10[year1], digits01[year1], + '-', + digits10[month], digits01[month], + '-', + digits10[day], digits01[day], + ' ', + digits10[hour], digits01[hour], + ':', + digits10[minute], digits01[minute], + ':', + digits10[second], digits01[second], + }...) + + if micro != 0 { + micro10000 := micro / 10000 + micro100 := micro / 100 % 100 + micro1 := micro % 100 + buf = append(buf, []byte{ + '.', + digits10[micro10000], digits01[micro10000], + digits10[micro100], digits01[micro100], + digits10[micro1], digits01[micro1], + }...) + } + buf = append(buf, '\'') + } + case []byte: + if v == nil { + buf = append(buf, "NULL"...) + } else { + buf = append(buf, "_binary'"...) + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + } + case string: + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeStringBackslash(buf, v) + } else { + buf = escapeStringQuotes(buf, v) + } + buf = append(buf, '\'') + default: + return "", driver.ErrSkip + } + + if len(buf)+4 > mc.maxAllowedPacket { + return "", driver.ErrSkip + } + } + if argPos != len(args) { + return "", driver.ErrSkip + } + return string(buf), nil +} + +func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + } + mc.affectedRows = 0 + mc.insertId = 0 + + err := mc.exec(query) + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + return nil, mc.markBadConn(err) +} + +// Internal function to execute commands +func (mc *mysqlConn) exec(query string) error { + // Send command + if err := mc.writeCommandPacketStr(comQuery, query); err != nil { + return mc.markBadConn(err) + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return err + } + + if resLen > 0 { + // columns + if err := mc.readUntilEOF(); err != nil { + return err + } + + // rows + if err := mc.readUntilEOF(); err != nil { + return err + } + } + + return mc.discardResults() +} + +func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { + return mc.query(query, args) +} + +func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + } + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err == nil { + // Read Result + var resLen int + resLen, err = mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen == 0 { + rows.rs.done = true + + switch err := rows.NextResultSet(); err { + case nil, io.EOF: + return rows, nil + default: + return nil, err + } + } + + // Columns + rows.rs.columns, err = mc.readColumns(resLen) + return rows, err + } + } + return nil, mc.markBadConn(err) +} + +// Gets the value of the given MySQL System Variable +// The returned byte slice is only valid until the next read +func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { + // Send command + if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { + return nil, err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}} + + if resLen > 0 { + // Columns + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + dest := make([]driver.Value, resLen) + if err = rows.readRow(dest); err == nil { + return dest[0].([]byte), mc.readUntilEOF() + } + } + return nil, err +} + +// finish is called when the query has canceled. +func (mc *mysqlConn) cancel(err error) { + mc.canceled.Set(err) + mc.cleanup() +} + +// finish is called when the query has succeeded. +func (mc *mysqlConn) finish() { + if !mc.watching || mc.finished == nil { + return + } + select { + case mc.finished <- struct{}{}: + mc.watching = false + case <-mc.closech: + } +} + +// Ping implements driver.Pinger interface +func (mc *mysqlConn) Ping(ctx context.Context) (err error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + if err = mc.watchCancel(ctx); err != nil { + return + } + defer mc.finish() + + if err = mc.writeCommandPacket(comPing); err != nil { + return mc.markBadConn(err) + } + + return mc.readResultOK() +} + +// BeginTx implements driver.ConnBeginTx interface +func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + defer mc.finish() + + if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault { + level, err := mapIsolationLevel(opts.Isolation) + if err != nil { + return nil, err + } + err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level) + if err != nil { + return nil, err + } + } + + return mc.begin(opts.ReadOnly) +} + +func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + + rows, err := mc.query(query, dargs) + if err != nil { + mc.finish() + return nil, err + } + rows.finish = mc.finish + return rows, err +} + +func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + defer mc.finish() + + return mc.Exec(query, dargs) +} + +func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + + stmt, err := mc.Prepare(query) + mc.finish() + if err != nil { + return nil, err + } + + select { + default: + case <-ctx.Done(): + stmt.Close() + return nil, ctx.Err() + } + return stmt, nil +} + +func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := stmt.mc.watchCancel(ctx); err != nil { + return nil, err + } + + rows, err := stmt.query(dargs) + if err != nil { + stmt.mc.finish() + return nil, err + } + rows.finish = stmt.mc.finish + return rows, err +} + +func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := stmt.mc.watchCancel(ctx); err != nil { + return nil, err + } + defer stmt.mc.finish() + + return stmt.Exec(dargs) +} + +func (mc *mysqlConn) watchCancel(ctx context.Context) error { + if mc.watching { + // Reach here if canceled, + // so the connection is already invalid + mc.cleanup() + return nil + } + // When ctx is already cancelled, don't watch it. + if err := ctx.Err(); err != nil { + return err + } + // When ctx is not cancellable, don't watch it. + if ctx.Done() == nil { + return nil + } + // When watcher is not alive, can't watch it. + if mc.watcher == nil { + return nil + } + + mc.watching = true + mc.watcher <- ctx + return nil +} + +func (mc *mysqlConn) startWatcher() { + watcher := make(chan context.Context, 1) + mc.watcher = watcher + finished := make(chan struct{}) + mc.finished = finished + go func() { + for { + var ctx context.Context + select { + case ctx = <-watcher: + case <-mc.closech: + return + } + + select { + case <-ctx.Done(): + mc.cancel(ctx.Err()) + case <-finished: + case <-mc.closech: + return + } + } + }() +} + +func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) { + nv.Value, err = converter{}.ConvertValue(nv.Value) + return +} + +// ResetSession implements driver.SessionResetter. +// (From Go 1.10) +func (mc *mysqlConn) ResetSession(ctx context.Context) error { + if mc.closed.IsSet() { + return driver.ErrBadConn + } + mc.reset = true + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go new file mode 100644 index 0000000..d567b4e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connector.go @@ -0,0 +1,146 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "context" + "database/sql/driver" + "net" +) + +type connector struct { + cfg *Config // immutable private copy. +} + +// Connect implements driver.Connector interface. +// Connect returns a connection to the database. +func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxAllowedPacket: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + closech: make(chan struct{}), + cfg: c.cfg, + } + mc.parseTime = mc.cfg.ParseTime + + // Connect to Server + dialsLock.RLock() + dial, ok := dials[mc.cfg.Net] + dialsLock.RUnlock() + if ok { + dctx := ctx + if mc.cfg.Timeout > 0 { + var cancel context.CancelFunc + dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout) + defer cancel() + } + mc.netConn, err = dial(dctx, mc.cfg.Addr) + } else { + nd := net.Dialer{Timeout: mc.cfg.Timeout} + mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr) + } + + if err != nil { + return nil, err + } + + // Enable TCP Keepalives on TCP connections + if tc, ok := mc.netConn.(*net.TCPConn); ok { + if err := tc.SetKeepAlive(true); err != nil { + // Don't send COM_QUIT before handshake. + mc.netConn.Close() + mc.netConn = nil + return nil, err + } + } + + // Call startWatcher for context support (From Go 1.8) + mc.startWatcher() + if err := mc.watchCancel(ctx); err != nil { + mc.cleanup() + return nil, err + } + defer mc.finish() + + mc.buf = newBuffer(mc.netConn) + + // Set I/O timeouts + mc.buf.timeout = mc.cfg.ReadTimeout + mc.writeTimeout = mc.cfg.WriteTimeout + + // Reading Handshake Initialization Packet + authData, plugin, err := mc.readHandshakePacket() + if err != nil { + mc.cleanup() + return nil, err + } + + if plugin == "" { + plugin = defaultAuthPlugin + } + + // Send Client Authentication Packet + authResp, err := mc.auth(authData, plugin) + if err != nil { + // try the default auth plugin, if using the requested plugin failed + errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error()) + plugin = defaultAuthPlugin + authResp, err = mc.auth(authData, plugin) + if err != nil { + mc.cleanup() + return nil, err + } + } + if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil { + mc.cleanup() + return nil, err + } + + // Handle response to auth packet, switch methods if possible + if err = mc.handleAuthResult(authData, plugin); err != nil { + // Authentication failed and MySQL has already closed the connection + // (https://dev.mysql.com/doc/internals/en/authentication-fails.html). + // Do not send COM_QUIT, just cleanup and return the error. + mc.cleanup() + return nil, err + } + + if mc.cfg.MaxAllowedPacket > 0 { + mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket + } else { + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxAllowedPacket = stringToInt(maxap) - 1 + } + if mc.maxAllowedPacket < maxPacketSize { + mc.maxWriteSize = mc.maxAllowedPacket + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +// Driver implements driver.Connector interface. +// Driver returns &MySQLDriver{}. +func (c *connector) Driver() driver.Driver { + return &MySQLDriver{} +} diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go new file mode 100644 index 0000000..b1e6b85 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/const.go @@ -0,0 +1,174 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const ( + defaultAuthPlugin = "mysql_native_password" + defaultMaxAllowedPacket = 4 << 20 // 4 MiB + minProtocolVersion = 10 + maxPacketSize = 1<<24 - 1 + timeFormat = "2006-01-02 15:04:05.999999" +) + +// MySQL constants documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +const ( + iOK byte = 0x00 + iAuthMoreData byte = 0x01 + iLocalInFile byte = 0xfb + iEOF byte = 0xfe + iERR byte = 0xff +) + +// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +type clientFlag uint32 + +const ( + clientLongPassword clientFlag = 1 << iota + clientFoundRows + clientLongFlag + clientConnectWithDB + clientNoSchema + clientCompress + clientODBC + clientLocalFiles + clientIgnoreSpace + clientProtocol41 + clientInteractive + clientSSL + clientIgnoreSIGPIPE + clientTransactions + clientReserved + clientSecureConn + clientMultiStatements + clientMultiResults + clientPSMultiResults + clientPluginAuth + clientConnectAttrs + clientPluginAuthLenEncClientData + clientCanHandleExpiredPasswords + clientSessionTrack + clientDeprecateEOF +) + +const ( + comQuit byte = iota + 1 + comInitDB + comQuery + comFieldList + comCreateDB + comDropDB + comRefresh + comShutdown + comStatistics + comProcessInfo + comConnect + comProcessKill + comDebug + comPing + comTime + comDelayedInsert + comChangeUser + comBinlogDump + comTableDump + comConnectOut + comRegisterSlave + comStmtPrepare + comStmtExecute + comStmtSendLongData + comStmtClose + comStmtReset + comSetOption + comStmtFetch +) + +// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType +type fieldType byte + +const ( + fieldTypeDecimal fieldType = iota + fieldTypeTiny + fieldTypeShort + fieldTypeLong + fieldTypeFloat + fieldTypeDouble + fieldTypeNULL + fieldTypeTimestamp + fieldTypeLongLong + fieldTypeInt24 + fieldTypeDate + fieldTypeTime + fieldTypeDateTime + fieldTypeYear + fieldTypeNewDate + fieldTypeVarChar + fieldTypeBit +) +const ( + fieldTypeJSON fieldType = iota + 0xf5 + fieldTypeNewDecimal + fieldTypeEnum + fieldTypeSet + fieldTypeTinyBLOB + fieldTypeMediumBLOB + fieldTypeLongBLOB + fieldTypeBLOB + fieldTypeVarString + fieldTypeString + fieldTypeGeometry +) + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota + flagPriKey + flagUniqueKey + flagMultipleKey + flagBLOB + flagUnsigned + flagZeroFill + flagBinary + flagEnum + flagAutoIncrement + flagTimestamp + flagSet + flagUnknown1 + flagUnknown2 + flagUnknown3 + flagUnknown4 +) + +// http://dev.mysql.com/doc/internals/en/status-flags.html +type statusFlag uint16 + +const ( + statusInTrans statusFlag = 1 << iota + statusInAutocommit + statusReserved // Not in documentation + statusMoreResultsExists + statusNoGoodIndexUsed + statusNoIndexUsed + statusCursorExists + statusLastRowSent + statusDbDropped + statusNoBackslashEscapes + statusMetadataChanged + statusQueryWasSlow + statusPsOutParams + statusInTransReadonly + statusSessionStateChanged +) + +const ( + cachingSha2PasswordRequestPublicKey = 2 + cachingSha2PasswordFastAuthSuccess = 3 + cachingSha2PasswordPerformFullAuthentication = 4 +) diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go new file mode 100644 index 0000000..c1bdf11 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -0,0 +1,107 @@ +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// Package mysql provides a MySQL driver for Go's database/sql package. +// +// The driver should be used via the database/sql package: +// +// import "database/sql" +// import _ "github.com/go-sql-driver/mysql" +// +// db, err := sql.Open("mysql", "user:password@/dbname") +// +// See https://github.com/go-sql-driver/mysql#usage for details +package mysql + +import ( + "context" + "database/sql" + "database/sql/driver" + "net" + "sync" +) + +// MySQLDriver is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type MySQLDriver struct{} + +// DialFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDial +// +// Deprecated: users should register a DialContextFunc instead +type DialFunc func(addr string) (net.Conn, error) + +// DialContextFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDialContext +type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error) + +var ( + dialsLock sync.RWMutex + dials map[string]DialContextFunc +) + +// RegisterDialContext registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// The current context for the connection and its address is passed to the dial function. +func RegisterDialContext(net string, dial DialContextFunc) { + dialsLock.Lock() + defer dialsLock.Unlock() + if dials == nil { + dials = make(map[string]DialContextFunc) + } + dials[net] = dial +} + +// RegisterDial registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// addr is passed as a parameter to the dial function. +// +// Deprecated: users should call RegisterDialContext instead +func RegisterDial(network string, dial DialFunc) { + RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) { + return dial(addr) + }) +} + +// Open new Connection. +// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how +// the DSN string is formatted +func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { + cfg, err := ParseDSN(dsn) + if err != nil { + return nil, err + } + c := &connector{ + cfg: cfg, + } + return c.Connect(context.Background()) +} + +func init() { + sql.Register("mysql", &MySQLDriver{}) +} + +// NewConnector returns new driver.Connector. +func NewConnector(cfg *Config) (driver.Connector, error) { + cfg = cfg.Clone() + // normalize the contents of cfg so calls to NewConnector have the same + // behavior as MySQLDriver.OpenConnector + if err := cfg.normalize(); err != nil { + return nil, err + } + return &connector{cfg: cfg}, nil +} + +// OpenConnector implements driver.DriverContext. +func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) { + cfg, err := ParseDSN(dsn) + if err != nil { + return nil, err + } + return &connector{ + cfg: cfg, + }, nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go new file mode 100644 index 0000000..75c8c24 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -0,0 +1,560 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/rsa" + "crypto/tls" + "errors" + "fmt" + "math/big" + "net" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +var ( + errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)") + errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name") + errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations") +) + +// Config is a configuration parsed from a DSN string. +// If a new Config is created instead of being parsed from a DSN string, +// the NewConfig function should be used, which sets default values. +type Config struct { + User string // Username + Passwd string // Password (requires User) + Net string // Network type + Addr string // Network address (requires Net) + DBName string // Database name + Params map[string]string // Connection parameters + Collation string // Connection collation + Loc *time.Location // Location for time.Time values + MaxAllowedPacket int // Max packet size allowed + ServerPubKey string // Server public key name + pubKey *rsa.PublicKey // Server public key + TLSConfig string // TLS configuration name + tls *tls.Config // TLS configuration + Timeout time.Duration // Dial timeout + ReadTimeout time.Duration // I/O read timeout + WriteTimeout time.Duration // I/O write timeout + + AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE + AllowCleartextPasswords bool // Allows the cleartext client side plugin + AllowNativePasswords bool // Allows the native password authentication method + AllowOldPasswords bool // Allows the old insecure password method + CheckConnLiveness bool // Check connections for liveness before using them + ClientFoundRows bool // Return number of matching rows instead of rows changed + ColumnsWithAlias bool // Prepend table alias to column names + InterpolateParams bool // Interpolate placeholders into query string + MultiStatements bool // Allow multiple statements in one query + ParseTime bool // Parse time values to time.Time + RejectReadOnly bool // Reject read-only connections +} + +// NewConfig creates a new Config and sets default values. +func NewConfig() *Config { + return &Config{ + Collation: defaultCollation, + Loc: time.UTC, + MaxAllowedPacket: defaultMaxAllowedPacket, + AllowNativePasswords: true, + CheckConnLiveness: true, + } +} + +func (cfg *Config) Clone() *Config { + cp := *cfg + if cp.tls != nil { + cp.tls = cfg.tls.Clone() + } + if len(cp.Params) > 0 { + cp.Params = make(map[string]string, len(cfg.Params)) + for k, v := range cfg.Params { + cp.Params[k] = v + } + } + if cfg.pubKey != nil { + cp.pubKey = &rsa.PublicKey{ + N: new(big.Int).Set(cfg.pubKey.N), + E: cfg.pubKey.E, + } + } + return &cp +} + +func (cfg *Config) normalize() error { + if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { + return errInvalidDSNUnsafeCollation + } + + // Set default network if empty + if cfg.Net == "" { + cfg.Net = "tcp" + } + + // Set default address if empty + if cfg.Addr == "" { + switch cfg.Net { + case "tcp": + cfg.Addr = "127.0.0.1:3306" + case "unix": + cfg.Addr = "/tmp/mysql.sock" + default: + return errors.New("default addr for network '" + cfg.Net + "' unknown") + } + } else if cfg.Net == "tcp" { + cfg.Addr = ensureHavePort(cfg.Addr) + } + + switch cfg.TLSConfig { + case "false", "": + // don't set anything + case "true": + cfg.tls = &tls.Config{} + case "skip-verify", "preferred": + cfg.tls = &tls.Config{InsecureSkipVerify: true} + default: + cfg.tls = getTLSConfigClone(cfg.TLSConfig) + if cfg.tls == nil { + return errors.New("invalid value / unknown config name: " + cfg.TLSConfig) + } + } + + if cfg.tls != nil && cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify { + host, _, err := net.SplitHostPort(cfg.Addr) + if err == nil { + cfg.tls.ServerName = host + } + } + + if cfg.ServerPubKey != "" { + cfg.pubKey = getServerPubKey(cfg.ServerPubKey) + if cfg.pubKey == nil { + return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey) + } + } + + return nil +} + +func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) { + buf.Grow(1 + len(name) + 1 + len(value)) + if !*hasParam { + *hasParam = true + buf.WriteByte('?') + } else { + buf.WriteByte('&') + } + buf.WriteString(name) + buf.WriteByte('=') + buf.WriteString(value) +} + +// FormatDSN formats the given Config into a DSN string which can be passed to +// the driver. +func (cfg *Config) FormatDSN() string { + var buf bytes.Buffer + + // [username[:password]@] + if len(cfg.User) > 0 { + buf.WriteString(cfg.User) + if len(cfg.Passwd) > 0 { + buf.WriteByte(':') + buf.WriteString(cfg.Passwd) + } + buf.WriteByte('@') + } + + // [protocol[(address)]] + if len(cfg.Net) > 0 { + buf.WriteString(cfg.Net) + if len(cfg.Addr) > 0 { + buf.WriteByte('(') + buf.WriteString(cfg.Addr) + buf.WriteByte(')') + } + } + + // /dbname + buf.WriteByte('/') + buf.WriteString(cfg.DBName) + + // [?param1=value1&...¶mN=valueN] + hasParam := false + + if cfg.AllowAllFiles { + hasParam = true + buf.WriteString("?allowAllFiles=true") + } + + if cfg.AllowCleartextPasswords { + writeDSNParam(&buf, &hasParam, "allowCleartextPasswords", "true") + } + + if !cfg.AllowNativePasswords { + writeDSNParam(&buf, &hasParam, "allowNativePasswords", "false") + } + + if cfg.AllowOldPasswords { + writeDSNParam(&buf, &hasParam, "allowOldPasswords", "true") + } + + if !cfg.CheckConnLiveness { + writeDSNParam(&buf, &hasParam, "checkConnLiveness", "false") + } + + if cfg.ClientFoundRows { + writeDSNParam(&buf, &hasParam, "clientFoundRows", "true") + } + + if col := cfg.Collation; col != defaultCollation && len(col) > 0 { + writeDSNParam(&buf, &hasParam, "collation", col) + } + + if cfg.ColumnsWithAlias { + writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true") + } + + if cfg.InterpolateParams { + writeDSNParam(&buf, &hasParam, "interpolateParams", "true") + } + + if cfg.Loc != time.UTC && cfg.Loc != nil { + writeDSNParam(&buf, &hasParam, "loc", url.QueryEscape(cfg.Loc.String())) + } + + if cfg.MultiStatements { + writeDSNParam(&buf, &hasParam, "multiStatements", "true") + } + + if cfg.ParseTime { + writeDSNParam(&buf, &hasParam, "parseTime", "true") + } + + if cfg.ReadTimeout > 0 { + writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String()) + } + + if cfg.RejectReadOnly { + writeDSNParam(&buf, &hasParam, "rejectReadOnly", "true") + } + + if len(cfg.ServerPubKey) > 0 { + writeDSNParam(&buf, &hasParam, "serverPubKey", url.QueryEscape(cfg.ServerPubKey)) + } + + if cfg.Timeout > 0 { + writeDSNParam(&buf, &hasParam, "timeout", cfg.Timeout.String()) + } + + if len(cfg.TLSConfig) > 0 { + writeDSNParam(&buf, &hasParam, "tls", url.QueryEscape(cfg.TLSConfig)) + } + + if cfg.WriteTimeout > 0 { + writeDSNParam(&buf, &hasParam, "writeTimeout", cfg.WriteTimeout.String()) + } + + if cfg.MaxAllowedPacket != defaultMaxAllowedPacket { + writeDSNParam(&buf, &hasParam, "maxAllowedPacket", strconv.Itoa(cfg.MaxAllowedPacket)) + } + + // other params + if cfg.Params != nil { + var params []string + for param := range cfg.Params { + params = append(params, param) + } + sort.Strings(params) + for _, param := range params { + writeDSNParam(&buf, &hasParam, param, url.QueryEscape(cfg.Params[param])) + } + } + + return buf.String() +} + +// ParseDSN parses the DSN string to a Config +func ParseDSN(dsn string) (cfg *Config, err error) { + // New config with some default values + cfg = NewConfig() + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + foundSlash := false + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + foundSlash = true + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.Passwd = dsn[k+1 : j] + break + } + } + cfg.User = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an address is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.Addr = dsn[k+1 : i-1] + break + } + } + cfg.Net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.DBName = dsn[i+1 : j] + + break + } + } + + if !foundSlash && len(dsn) > 0 { + return nil, errInvalidDSNNoSlash + } + + if err = cfg.normalize(); err != nil { + return nil, err + } + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *Config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + // Disable INFILE whitelist / enable all files + case "allowAllFiles": + var isBool bool + cfg.AllowAllFiles, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use cleartext authentication mode (MySQL 5.5.10+) + case "allowCleartextPasswords": + var isBool bool + cfg.AllowCleartextPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use native password authentication + case "allowNativePasswords": + var isBool bool + cfg.AllowNativePasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use old authentication mode (pre MySQL 4.1) + case "allowOldPasswords": + var isBool bool + cfg.AllowOldPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Check connections for Liveness before using them + case "checkConnLiveness": + var isBool bool + cfg.CheckConnLiveness, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Switch "rowsAffected" mode + case "clientFoundRows": + var isBool bool + cfg.ClientFoundRows, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Collation + case "collation": + cfg.Collation = value + break + + case "columnsWithAlias": + var isBool bool + cfg.ColumnsWithAlias, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Compression + case "compress": + return errors.New("compression not implemented yet") + + // Enable client side placeholder substitution + case "interpolateParams": + var isBool bool + cfg.InterpolateParams, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.Loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // multiple statements in one query + case "multiStatements": + var isBool bool + cfg.MultiStatements, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // time.Time parsing + case "parseTime": + var isBool bool + cfg.ParseTime, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // I/O read Timeout + case "readTimeout": + cfg.ReadTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // Reject read-only connections + case "rejectReadOnly": + var isBool bool + cfg.RejectReadOnly, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Server public key + case "serverPubKey": + name, err := url.QueryUnescape(value) + if err != nil { + return fmt.Errorf("invalid value for server pub key name: %v", err) + } + cfg.ServerPubKey = name + + // Strict mode + case "strict": + panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode") + + // Dial Timeout + case "timeout": + cfg.Timeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // TLS-Encryption + case "tls": + boolValue, isBool := readBool(value) + if isBool { + if boolValue { + cfg.TLSConfig = "true" + } else { + cfg.TLSConfig = "false" + } + } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" { + cfg.TLSConfig = vl + } else { + name, err := url.QueryUnescape(value) + if err != nil { + return fmt.Errorf("invalid value for TLS config name: %v", err) + } + cfg.TLSConfig = name + } + + // I/O write Timeout + case "writeTimeout": + cfg.WriteTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + case "maxAllowedPacket": + cfg.MaxAllowedPacket, err = strconv.Atoi(value) + if err != nil { + return + } + default: + // lazy init + if cfg.Params == nil { + cfg.Params = make(map[string]string) + } + + if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} + +func ensureHavePort(addr string) string { + if _, _, err := net.SplitHostPort(addr); err != nil { + return net.JoinHostPort(addr, "3306") + } + return addr +} diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go new file mode 100644 index 0000000..760782f --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/errors.go @@ -0,0 +1,65 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "errors" + "fmt" + "log" + "os" +) + +// Various errors the driver might return. Can change between driver versions. +var ( + ErrInvalidConn = errors.New("invalid connection") + ErrMalformPkt = errors.New("malformed packet") + ErrNoTLS = errors.New("TLS requested but server does not support TLS") + ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN") + ErrNativePassword = errors.New("this user requires mysql native password authentication.") + ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") + ErrUnknownPlugin = errors.New("this authentication plugin is not supported") + ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+") + ErrPktSync = errors.New("commands out of sync. You can't run this command now") + ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?") + ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server") + ErrBusyBuffer = errors.New("busy buffer") + + // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet. + // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn + // to trigger a resend. + // See https://github.com/go-sql-driver/mysql/pull/302 + errBadConnNoWrite = errors.New("bad connection") +) + +var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) + +// Logger is used to log critical error messages. +type Logger interface { + Print(v ...interface{}) +} + +// SetLogger is used to set the logger for critical errors. +// The initial logger is os.Stderr. +func SetLogger(logger Logger) error { + if logger == nil { + return errors.New("logger is nil") + } + errLog = logger + return nil +} + +// MySQLError is an error type which represents a single MySQL error +type MySQLError struct { + Number uint16 + Message string +} + +func (me *MySQLError) Error() string { + return fmt.Sprintf("Error %d: %s", me.Number, me.Message) +} diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go new file mode 100644 index 0000000..e1e2ece --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/fields.go @@ -0,0 +1,194 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql" + "reflect" +) + +func (mf *mysqlField) typeDatabaseName() string { + switch mf.fieldType { + case fieldTypeBit: + return "BIT" + case fieldTypeBLOB: + if mf.charSet != collations[binaryCollation] { + return "TEXT" + } + return "BLOB" + case fieldTypeDate: + return "DATE" + case fieldTypeDateTime: + return "DATETIME" + case fieldTypeDecimal: + return "DECIMAL" + case fieldTypeDouble: + return "DOUBLE" + case fieldTypeEnum: + return "ENUM" + case fieldTypeFloat: + return "FLOAT" + case fieldTypeGeometry: + return "GEOMETRY" + case fieldTypeInt24: + return "MEDIUMINT" + case fieldTypeJSON: + return "JSON" + case fieldTypeLong: + return "INT" + case fieldTypeLongBLOB: + if mf.charSet != collations[binaryCollation] { + return "LONGTEXT" + } + return "LONGBLOB" + case fieldTypeLongLong: + return "BIGINT" + case fieldTypeMediumBLOB: + if mf.charSet != collations[binaryCollation] { + return "MEDIUMTEXT" + } + return "MEDIUMBLOB" + case fieldTypeNewDate: + return "DATE" + case fieldTypeNewDecimal: + return "DECIMAL" + case fieldTypeNULL: + return "NULL" + case fieldTypeSet: + return "SET" + case fieldTypeShort: + return "SMALLINT" + case fieldTypeString: + if mf.charSet == collations[binaryCollation] { + return "BINARY" + } + return "CHAR" + case fieldTypeTime: + return "TIME" + case fieldTypeTimestamp: + return "TIMESTAMP" + case fieldTypeTiny: + return "TINYINT" + case fieldTypeTinyBLOB: + if mf.charSet != collations[binaryCollation] { + return "TINYTEXT" + } + return "TINYBLOB" + case fieldTypeVarChar: + if mf.charSet == collations[binaryCollation] { + return "VARBINARY" + } + return "VARCHAR" + case fieldTypeVarString: + if mf.charSet == collations[binaryCollation] { + return "VARBINARY" + } + return "VARCHAR" + case fieldTypeYear: + return "YEAR" + default: + return "" + } +} + +var ( + scanTypeFloat32 = reflect.TypeOf(float32(0)) + scanTypeFloat64 = reflect.TypeOf(float64(0)) + scanTypeInt8 = reflect.TypeOf(int8(0)) + scanTypeInt16 = reflect.TypeOf(int16(0)) + scanTypeInt32 = reflect.TypeOf(int32(0)) + scanTypeInt64 = reflect.TypeOf(int64(0)) + scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{}) + scanTypeNullInt = reflect.TypeOf(sql.NullInt64{}) + scanTypeNullTime = reflect.TypeOf(NullTime{}) + scanTypeUint8 = reflect.TypeOf(uint8(0)) + scanTypeUint16 = reflect.TypeOf(uint16(0)) + scanTypeUint32 = reflect.TypeOf(uint32(0)) + scanTypeUint64 = reflect.TypeOf(uint64(0)) + scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{}) + scanTypeUnknown = reflect.TypeOf(new(interface{})) +) + +type mysqlField struct { + tableName string + name string + length uint32 + flags fieldFlag + fieldType fieldType + decimals byte + charSet uint8 +} + +func (mf *mysqlField) scanType() reflect.Type { + switch mf.fieldType { + case fieldTypeTiny: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint8 + } + return scanTypeInt8 + } + return scanTypeNullInt + + case fieldTypeShort, fieldTypeYear: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint16 + } + return scanTypeInt16 + } + return scanTypeNullInt + + case fieldTypeInt24, fieldTypeLong: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint32 + } + return scanTypeInt32 + } + return scanTypeNullInt + + case fieldTypeLongLong: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint64 + } + return scanTypeInt64 + } + return scanTypeNullInt + + case fieldTypeFloat: + if mf.flags&flagNotNULL != 0 { + return scanTypeFloat32 + } + return scanTypeNullFloat + + case fieldTypeDouble: + if mf.flags&flagNotNULL != 0 { + return scanTypeFloat64 + } + return scanTypeNullFloat + + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON, + fieldTypeTime: + return scanTypeRawBytes + + case fieldTypeDate, fieldTypeNewDate, + fieldTypeTimestamp, fieldTypeDateTime: + // NullTime is always returned for more consistent behavior as it can + // handle both cases of parseTime regardless if the field is nullable. + return scanTypeNullTime + + default: + return scanTypeUnknown + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/go.mod b/vendor/github.com/go-sql-driver/mysql/go.mod new file mode 100644 index 0000000..fffbf6a --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/go.mod @@ -0,0 +1,3 @@ +module github.com/go-sql-driver/mysql + +go 1.10 diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go new file mode 100644 index 0000000..273cb0b --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/infile.go @@ -0,0 +1,182 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "fmt" + "io" + "os" + "strings" + "sync" +) + +var ( + fileRegister map[string]bool + fileRegisterLock sync.RWMutex + readerRegister map[string]func() io.Reader + readerRegisterLock sync.RWMutex +) + +// RegisterLocalFile adds the given file to the file whitelist, +// so that it can be used by "LOAD DATA LOCAL INFILE ". +// Alternatively you can allow the use of all local files with +// the DSN parameter 'allowAllFiles=true' +// +// filePath := "/home/gopher/data.csv" +// mysql.RegisterLocalFile(filePath) +// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterLocalFile(filePath string) { + fileRegisterLock.Lock() + // lazy map init + if fileRegister == nil { + fileRegister = make(map[string]bool) + } + + fileRegister[strings.Trim(filePath, `"`)] = true + fileRegisterLock.Unlock() +} + +// DeregisterLocalFile removes the given filepath from the whitelist. +func DeregisterLocalFile(filePath string) { + fileRegisterLock.Lock() + delete(fileRegister, strings.Trim(filePath, `"`)) + fileRegisterLock.Unlock() +} + +// RegisterReaderHandler registers a handler function which is used +// to receive a io.Reader. +// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". +// If the handler returns a io.ReadCloser Close() is called when the +// request is finished. +// +// mysql.RegisterReaderHandler("data", func() io.Reader { +// var csvReader io.Reader // Some Reader that returns CSV data +// ... // Open Reader here +// return csvReader +// }) +// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterReaderHandler(name string, handler func() io.Reader) { + readerRegisterLock.Lock() + // lazy map init + if readerRegister == nil { + readerRegister = make(map[string]func() io.Reader) + } + + readerRegister[name] = handler + readerRegisterLock.Unlock() +} + +// DeregisterReaderHandler removes the ReaderHandler function with +// the given name from the registry. +func DeregisterReaderHandler(name string) { + readerRegisterLock.Lock() + delete(readerRegister, name) + readerRegisterLock.Unlock() +} + +func deferredClose(err *error, closer io.Closer) { + closeErr := closer.Close() + if *err == nil { + *err = closeErr + } +} + +func (mc *mysqlConn) handleInFileRequest(name string) (err error) { + var rdr io.Reader + var data []byte + packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP + if mc.maxWriteSize < packetSize { + packetSize = mc.maxWriteSize + } + + if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader + // The server might return an an absolute path. See issue #355. + name = name[idx+8:] + + readerRegisterLock.RLock() + handler, inMap := readerRegister[name] + readerRegisterLock.RUnlock() + + if inMap { + rdr = handler() + if rdr != nil { + if cl, ok := rdr.(io.Closer); ok { + defer deferredClose(&err, cl) + } + } else { + err = fmt.Errorf("Reader '%s' is ", name) + } + } else { + err = fmt.Errorf("Reader '%s' is not registered", name) + } + } else { // File + name = strings.Trim(name, `"`) + fileRegisterLock.RLock() + fr := fileRegister[name] + fileRegisterLock.RUnlock() + if mc.cfg.AllowAllFiles || fr { + var file *os.File + var fi os.FileInfo + + if file, err = os.Open(name); err == nil { + defer deferredClose(&err, file) + + // get file size + if fi, err = file.Stat(); err == nil { + rdr = file + if fileSize := int(fi.Size()); fileSize < packetSize { + packetSize = fileSize + } + } + } + } else { + err = fmt.Errorf("local file '%s' is not registered", name) + } + } + + // send content packets + // if packetSize == 0, the Reader contains no data + if err == nil && packetSize > 0 { + data := make([]byte, 4+packetSize) + var n int + for err == nil { + n, err = rdr.Read(data[4:]) + if n > 0 { + if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { + return ioErr + } + } + } + if err == io.EOF { + err = nil + } + } + + // send empty packet (termination) + if data == nil { + data = make([]byte, 4) + } + if ioErr := mc.writePacket(data[:4]); ioErr != nil { + return ioErr + } + + // read OK packet + if err == nil { + return mc.readResultOK() + } + + mc.readPacket() + return err +} diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go new file mode 100644 index 0000000..afa8a89 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go @@ -0,0 +1,50 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "time" +) + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go new file mode 100644 index 0000000..c392594 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.13 + +package mysql + +import ( + "database/sql" +) + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime sql.NullTime diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go new file mode 100644 index 0000000..86d159d --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go @@ -0,0 +1,34 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build !go1.13 + +package mysql + +import ( + "time" +) + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go new file mode 100644 index 0000000..82ad7a2 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -0,0 +1,1342 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "time" +) + +// Packets documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +// Read packet to buffer 'data' +func (mc *mysqlConn) readPacket() ([]byte, error) { + var prevData []byte + for { + // read packet header + data, err := mc.buf.readNext(4) + if err != nil { + if cerr := mc.canceled.Value(); cerr != nil { + return nil, cerr + } + errLog.Print(err) + mc.Close() + return nil, ErrInvalidConn + } + + // packet length [24 bit] + pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + + // check packet sync [8 bit] + if data[3] != mc.sequence { + if data[3] > mc.sequence { + return nil, ErrPktSyncMul + } + return nil, ErrPktSync + } + mc.sequence++ + + // packets with length 0 terminate a previous packet which is a + // multiple of (2^24)-1 bytes long + if pktLen == 0 { + // there was no previous packet + if prevData == nil { + errLog.Print(ErrMalformPkt) + mc.Close() + return nil, ErrInvalidConn + } + + return prevData, nil + } + + // read packet body [pktLen bytes] + data, err = mc.buf.readNext(pktLen) + if err != nil { + if cerr := mc.canceled.Value(); cerr != nil { + return nil, cerr + } + errLog.Print(err) + mc.Close() + return nil, ErrInvalidConn + } + + // return data if this was the last packet + if pktLen < maxPacketSize { + // zero allocations for non-split packets + if prevData == nil { + return data, nil + } + + return append(prevData, data...), nil + } + + prevData = append(prevData, data...) + } +} + +// Write packet buffer 'data' +func (mc *mysqlConn) writePacket(data []byte) error { + pktLen := len(data) - 4 + + if pktLen > mc.maxAllowedPacket { + return ErrPktTooLarge + } + + // Perform a stale connection check. We only perform this check for + // the first query on a connection that has been checked out of the + // connection pool: a fresh connection from the pool is more likely + // to be stale, and it has not performed any previous writes that + // could cause data corruption, so it's safe to return ErrBadConn + // if the check fails. + if mc.reset { + mc.reset = false + conn := mc.netConn + if mc.rawConn != nil { + conn = mc.rawConn + } + var err error + // If this connection has a ReadTimeout which we've been setting on + // reads, reset it to its default value before we attempt a non-blocking + // read, otherwise the scheduler will just time us out before we can read + if mc.cfg.ReadTimeout != 0 { + err = conn.SetReadDeadline(time.Time{}) + } + if err == nil && mc.cfg.CheckConnLiveness { + err = connCheck(conn) + } + if err != nil { + errLog.Print("closing bad idle connection: ", err) + mc.Close() + return driver.ErrBadConn + } + } + + for { + var size int + if pktLen >= maxPacketSize { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + size = maxPacketSize + } else { + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + size = pktLen + } + data[3] = mc.sequence + + // Write packet + if mc.writeTimeout > 0 { + if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil { + return err + } + } + + n, err := mc.netConn.Write(data[:4+size]) + if err == nil && n == 4+size { + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] + continue + } + + // Handle error + if err == nil { // n != len(data) + mc.cleanup() + errLog.Print(ErrMalformPkt) + } else { + if cerr := mc.canceled.Value(); cerr != nil { + return cerr + } + if n == 0 && pktLen == len(data)-4 { + // only for the first loop iteration when nothing was written yet + return errBadConnNoWrite + } + mc.cleanup() + errLog.Print(err) + } + return ErrInvalidConn + } +} + +/****************************************************************************** +* Initialization Process * +******************************************************************************/ + +// Handshake Initialization Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake +func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) { + data, err = mc.readPacket() + if err != nil { + // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since + // in connection initialization we don't risk retrying non-idempotent actions. + if err == ErrInvalidConn { + return nil, "", driver.ErrBadConn + } + return + } + + if data[0] == iERR { + return nil, "", mc.handleErrorPacket(data) + } + + // protocol version [1 byte] + if data[0] < minProtocolVersion { + return nil, "", fmt.Errorf( + "unsupported protocol version %d. Version %d or higher is required", + data[0], + minProtocolVersion, + ) + } + + // server version [null terminated string] + // connection id [4 bytes] + pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 + + // first part of the password cipher [8 bytes] + authData := data[pos : pos+8] + + // (filler) always 0x00 [1 byte] + pos += 8 + 1 + + // capability flags (lower 2 bytes) [2 bytes] + mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + if mc.flags&clientProtocol41 == 0 { + return nil, "", ErrOldProtocol + } + if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { + if mc.cfg.TLSConfig == "preferred" { + mc.cfg.tls = nil + } else { + return nil, "", ErrNoTLS + } + } + pos += 2 + + if len(data) > pos { + // character set [1 byte] + // status flags [2 bytes] + // capability flags (upper 2 bytes) [2 bytes] + // length of auth-plugin-data [1 byte] + // reserved (all [00]) [10 bytes] + pos += 1 + 2 + 2 + 1 + 10 + + // second part of the password cipher [mininum 13 bytes], + // where len=MAX(13, length of auth-plugin-data - 8) + // + // The web documentation is ambiguous about the length. However, + // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, + // the 13th byte is "\0 byte, terminating the second part of + // a scramble". So the second part of the password cipher is + // a NULL terminated string that's at least 13 bytes with the + // last byte being NULL. + // + // The official Python library uses the fixed length 12 + // which seems to work but technically could have a hidden bug. + authData = append(authData, data[pos:pos+12]...) + pos += 13 + + // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) + // \NUL otherwise + if end := bytes.IndexByte(data[pos:], 0x00); end != -1 { + plugin = string(data[pos : pos+end]) + } else { + plugin = string(data[pos:]) + } + + // make a memory safe copy of the cipher slice + var b [20]byte + copy(b[:], authData) + return b[:], plugin, nil + } + + // make a memory safe copy of the cipher slice + var b [8]byte + copy(b[:], authData) + return b[:], plugin, nil +} + +// Client Authentication Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse +func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error { + // Adjust client flags based on server support + clientFlags := clientProtocol41 | + clientSecureConn | + clientLongPassword | + clientTransactions | + clientLocalFiles | + clientPluginAuth | + clientMultiResults | + mc.flags&clientLongFlag + + if mc.cfg.ClientFoundRows { + clientFlags |= clientFoundRows + } + + // To enable TLS / SSL + if mc.cfg.tls != nil { + clientFlags |= clientSSL + } + + if mc.cfg.MultiStatements { + clientFlags |= clientMultiStatements + } + + // encode length of the auth plugin data + var authRespLEIBuf [9]byte + authRespLen := len(authResp) + authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen)) + if len(authRespLEI) > 1 { + // if the length can not be written in 1 byte, it must be written as a + // length encoded integer + clientFlags |= clientPluginAuthLenEncClientData + } + + pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1 + + // To specify a db name + if n := len(mc.cfg.DBName); n > 0 { + clientFlags |= clientConnectWithDB + pktLen += n + 1 + } + + // Calculate packet length and get buffer with that size + data, err := mc.buf.takeSmallBuffer(pktLen + 4) + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // ClientFlags [32 bit] + data[4] = byte(clientFlags) + data[5] = byte(clientFlags >> 8) + data[6] = byte(clientFlags >> 16) + data[7] = byte(clientFlags >> 24) + + // MaxPacketSize [32 bit] (none) + data[8] = 0x00 + data[9] = 0x00 + data[10] = 0x00 + data[11] = 0x00 + + // Charset [1 byte] + var found bool + data[12], found = collations[mc.cfg.Collation] + if !found { + // Note possibility for false negatives: + // could be triggered although the collation is valid if the + // collations map does not contain entries the server supports. + return errors.New("unknown collation") + } + + // SSL Connection Request Packet + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if mc.cfg.tls != nil { + // Send TLS / SSL request packet + if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { + return err + } + + // Switch to TLS + tlsConn := tls.Client(mc.netConn, mc.cfg.tls) + if err := tlsConn.Handshake(); err != nil { + return err + } + mc.rawConn = mc.netConn + mc.netConn = tlsConn + mc.buf.nc = tlsConn + } + + // Filler [23 bytes] (all 0x00) + pos := 13 + for ; pos < 13+23; pos++ { + data[pos] = 0 + } + + // User [null terminated string] + if len(mc.cfg.User) > 0 { + pos += copy(data[pos:], mc.cfg.User) + } + data[pos] = 0x00 + pos++ + + // Auth Data [length encoded integer] + pos += copy(data[pos:], authRespLEI) + pos += copy(data[pos:], authResp) + + // Databasename [null terminated string] + if len(mc.cfg.DBName) > 0 { + pos += copy(data[pos:], mc.cfg.DBName) + data[pos] = 0x00 + pos++ + } + + pos += copy(data[pos:], plugin) + data[pos] = 0x00 + pos++ + + // Send Auth packet + return mc.writePacket(data[:pos]) +} + +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { + pktLen := 4 + len(authData) + data, err := mc.buf.takeSmallBuffer(pktLen) + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // Add the auth data [EOF] + copy(data[4:], authData) + return mc.writePacket(data) +} + +/****************************************************************************** +* Command Packets * +******************************************************************************/ + +func (mc *mysqlConn) writeCommandPacket(command byte) error { + // Reset Packet Sequence + mc.sequence = 0 + + data, err := mc.buf.takeSmallBuffer(4 + 1) + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { + // Reset Packet Sequence + mc.sequence = 0 + + pktLen := 1 + len(arg) + data, err := mc.buf.takeBuffer(pktLen + 4) + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Add arg + copy(data[5:], arg) + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { + // Reset Packet Sequence + mc.sequence = 0 + + data, err := mc.buf.takeSmallBuffer(4 + 1 + 4) + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Add arg [32 bit] + data[5] = byte(arg) + data[6] = byte(arg >> 8) + data[7] = byte(arg >> 16) + data[8] = byte(arg >> 24) + + // Send CMD packet + return mc.writePacket(data) +} + +/****************************************************************************** +* Result Packets * +******************************************************************************/ + +func (mc *mysqlConn) readAuthResult() ([]byte, string, error) { + data, err := mc.readPacket() + if err != nil { + return nil, "", err + } + + // packet indicator + switch data[0] { + + case iOK: + return nil, "", mc.handleOkPacket(data) + + case iAuthMoreData: + return data[1:], "", err + + case iEOF: + if len(data) == 1 { + // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest + return nil, "mysql_old_password", nil + } + pluginEndIndex := bytes.IndexByte(data, 0x00) + if pluginEndIndex < 0 { + return nil, "", ErrMalformPkt + } + plugin := string(data[1:pluginEndIndex]) + authData := data[pluginEndIndex+1:] + return authData, plugin, nil + + default: // Error otherwise + return nil, "", mc.handleErrorPacket(data) + } +} + +// Returns error if Packet is not an 'Result OK'-Packet +func (mc *mysqlConn) readResultOK() error { + data, err := mc.readPacket() + if err != nil { + return err + } + + if data[0] == iOK { + return mc.handleOkPacket(data) + } + return mc.handleErrorPacket(data) +} + +// Result Set Header Packet +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { + data, err := mc.readPacket() + if err == nil { + switch data[0] { + + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + num, _, n := readLengthEncodedInteger(data) + if n-len(data) == 0 { + return int(num), nil + } + + return 0, ErrMalformPkt + } + return 0, err +} + +// Error Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet +func (mc *mysqlConn) handleErrorPacket(data []byte) error { + if data[0] != iERR { + return ErrMalformPkt + } + + // 0xff [1 byte] + + // Error Number [16 bit uint] + errno := binary.LittleEndian.Uint16(data[1:3]) + + // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION + // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover) + if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly { + // Oops; we are connected to a read-only connection, and won't be able + // to issue any write statements. Since RejectReadOnly is configured, + // we throw away this connection hoping this one would have write + // permission. This is specifically for a possible race condition + // during failover (e.g. on AWS Aurora). See README.md for more. + // + // We explicitly close the connection before returning + // driver.ErrBadConn to ensure that `database/sql` purges this + // connection and initiates a new one for next statement next time. + mc.Close() + return driver.ErrBadConn + } + + pos := 3 + + // SQL State [optional: # + 5bytes string] + if data[3] == 0x23 { + //sqlstate := string(data[4 : 4+5]) + pos = 9 + } + + // Error Message [string] + return &MySQLError{ + Number: errno, + Message: string(data[pos:]), + } +} + +func readStatus(b []byte) statusFlag { + return statusFlag(b[0]) | statusFlag(b[1])<<8 +} + +// Ok Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet +func (mc *mysqlConn) handleOkPacket(data []byte) error { + var n, m int + + // 0x00 [1 byte] + + // Affected rows [Length Coded Binary] + mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + + // Insert id [Length Coded Binary] + mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // server_status [2 bytes] + mc.status = readStatus(data[1+n+m : 1+n+m+2]) + if mc.status&statusMoreResultsExists != 0 { + return nil + } + + // warning count [2 bytes] + + return nil +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { + columns := make([]mysqlField, count) + + for i := 0; ; i++ { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + // EOF Packet + if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { + if i == count { + return columns, nil + } + return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns)) + } + + // Catalog + pos, err := skipLengthEncodedString(data) + if err != nil { + return nil, err + } + + // Database [len coded string] + n, err := skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Table [len coded string] + if mc.cfg.ColumnsWithAlias { + tableName, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + columns[i].tableName = string(tableName) + } else { + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + } + + // Original table [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Name [len coded string] + name, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + columns[i].name = string(name) + pos += n + + // Original name [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Filler [uint8] + pos++ + + // Charset [charset, collation uint8] + columns[i].charSet = data[pos] + pos += 2 + + // Length [uint32] + columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4]) + pos += 4 + + // Field type [uint8] + columns[i].fieldType = fieldType(data[pos]) + pos++ + + // Flags [uint16] + columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + pos += 2 + + // Decimals [uint8] + columns[i].decimals = data[pos] + //pos++ + + // Default value [len coded binary] + //if pos < len(data) { + // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) + //} + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (rows *textRows) readRow(dest []driver.Value) error { + mc := rows.mc + + if rows.rs.done { + return io.EOF + } + + data, err := mc.readPacket() + if err != nil { + return err + } + + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + // server_status [2 bytes] + rows.mc.status = readStatus(data[3:]) + rows.rs.done = true + if !rows.HasNextResultSet() { + rows.mc = nil + } + return io.EOF + } + if data[0] == iERR { + rows.mc = nil + return mc.handleErrorPacket(data) + } + + // RowSet Packet + var n int + var isNull bool + pos := 0 + + for i := range dest { + // Read bytes and convert to string + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + if !mc.parseTime { + continue + } else { + switch rows.rs.columns[i].fieldType { + case fieldTypeTimestamp, fieldTypeDateTime, + fieldTypeDate, fieldTypeNewDate: + dest[i], err = parseDateTime( + string(dest[i].([]byte)), + mc.cfg.Loc, + ) + if err == nil { + continue + } + default: + continue + } + } + + } else { + dest[i] = nil + continue + } + } + return err // err != nil + } + + return nil +} + +// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read +func (mc *mysqlConn) readUntilEOF() error { + for { + data, err := mc.readPacket() + if err != nil { + return err + } + + switch data[0] { + case iERR: + return mc.handleErrorPacket(data) + case iEOF: + if len(data) == 5 { + mc.status = readStatus(data[3:]) + } + return nil + } + } +} + +/****************************************************************************** +* Prepared Statements * +******************************************************************************/ + +// Prepare Result Packets +// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html +func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { + data, err := stmt.mc.readPacket() + if err == nil { + // packet indicator [1 byte] + if data[0] != iOK { + return 0, stmt.mc.handleErrorPacket(data) + } + + // statement id [4 bytes] + stmt.id = binary.LittleEndian.Uint32(data[1:5]) + + // Column count [16 bit uint] + columnCount := binary.LittleEndian.Uint16(data[5:7]) + + // Param count [16 bit uint] + stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) + + // Reserved [8 bit] + + // Warning count [16 bit uint] + + return columnCount, nil + } + return 0, err +} + +// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html +func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { + maxLen := stmt.mc.maxAllowedPacket - 1 + pktLen := maxLen + + // After the header (bytes 0-3) follows before the data: + // 1 byte command + // 4 bytes stmtID + // 2 bytes paramID + const dataOffset = 1 + 4 + 2 + + // Cannot use the write buffer since + // a) the buffer is too small + // b) it is in use + data := make([]byte, 4+1+4+2+len(arg)) + + copy(data[4+dataOffset:], arg) + + for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { + if dataOffset+argLen < maxLen { + pktLen = dataOffset + argLen + } + + stmt.mc.sequence = 0 + // Add command byte [1 byte] + data[4] = comStmtSendLongData + + // Add stmtID [32 bit] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // Add paramID [16 bit] + data[9] = byte(paramID) + data[10] = byte(paramID >> 8) + + // Send CMD packet + err := stmt.mc.writePacket(data[:4+pktLen]) + if err == nil { + data = data[pktLen-dataOffset:] + continue + } + return err + + } + + // Reset Packet Sequence + stmt.mc.sequence = 0 + return nil +} + +// Execute Prepared Statement +// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html +func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { + if len(args) != stmt.paramCount { + return fmt.Errorf( + "argument count mismatch (got: %d; has: %d)", + len(args), + stmt.paramCount, + ) + } + + const minPktLen = 4 + 1 + 4 + 1 + 4 + mc := stmt.mc + + // Determine threshold dynamically to avoid packet size shortage. + longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1) + if longDataSize < 64 { + longDataSize = 64 + } + + // Reset packet-sequence + mc.sequence = 0 + + var data []byte + var err error + + if len(args) == 0 { + data, err = mc.buf.takeBuffer(minPktLen) + } else { + data, err = mc.buf.takeCompleteBuffer() + // In this case the len(data) == cap(data) which is used to optimise the flow below. + } + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // command [1 byte] + data[4] = comStmtExecute + + // statement_id [4 bytes] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] + data[9] = 0x00 + + // iteration_count (uint32(1)) [4 bytes] + data[10] = 0x01 + data[11] = 0x00 + data[12] = 0x00 + data[13] = 0x00 + + if len(args) > 0 { + pos := minPktLen + + var nullMask []byte + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) { + // buffer has to be extended but we don't know by how much so + // we depend on append after all data with known sizes fit. + // We stop at that because we deal with a lot of columns here + // which makes the required allocation size hard to guess. + tmp := make([]byte, pos+maskLen+typesLen) + copy(tmp[:pos], data[:pos]) + data = tmp + nullMask = data[pos : pos+maskLen] + // No need to clean nullMask as make ensures that. + pos += maskLen + } else { + nullMask = data[pos : pos+maskLen] + for i := range nullMask { + nullMask[i] = 0 + } + pos += maskLen + } + + // newParameterBoundFlag 1 [1 byte] + data[pos] = 0x01 + pos++ + + // type of each parameter [len(args)*2 bytes] + paramTypes := data[pos:] + pos += len(args) * 2 + + // value of each parameter [n bytes] + paramValues := data[pos:pos] + valuesCap := cap(paramValues) + + for i, arg := range args { + // build NULL-bitmap + if arg == nil { + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = byte(fieldTypeNULL) + paramTypes[i+i+1] = 0x00 + continue + } + + // cache types and values + switch v := arg.(type) { + case int64: + paramTypes[i+i] = byte(fieldTypeLongLong) + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case uint64: + paramTypes[i+i] = byte(fieldTypeLongLong) + paramTypes[i+i+1] = 0x80 // type is unsigned + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case float64: + paramTypes[i+i] = byte(fieldTypeDouble) + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + math.Float64bits(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(math.Float64bits(v))..., + ) + } + + case bool: + paramTypes[i+i] = byte(fieldTypeTiny) + paramTypes[i+i+1] = 0x00 + + if v { + paramValues = append(paramValues, 0x01) + } else { + paramValues = append(paramValues, 0x00) + } + + case []byte: + // Common case (non-nil value) first + if v != nil { + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + if len(v) < longDataSize { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, v); err != nil { + return err + } + } + continue + } + + // Handle []byte(nil) as a NULL value + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = byte(fieldTypeNULL) + paramTypes[i+i+1] = 0x00 + + case string: + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + if len(v) < longDataSize { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { + return err + } + } + + case time.Time: + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + var a [64]byte + var b = a[:0] + + if v.IsZero() { + b = append(b, "0000-00-00"...) + } else { + b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat) + } + + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(b)), + ) + paramValues = append(paramValues, b...) + + default: + return fmt.Errorf("cannot convert type: %T", arg) + } + } + + // Check if param values exceeded the available buffer + // In that case we must build the data packet with the new values buffer + if valuesCap != cap(paramValues) { + data = append(data[:pos], paramValues...) + if err = mc.buf.store(data); err != nil { + errLog.Print(err) + return errBadConnNoWrite + } + } + + pos += len(paramValues) + data = data[:pos] + } + + return mc.writePacket(data) +} + +func (mc *mysqlConn) discardResults() error { + for mc.status&statusMoreResultsExists != 0 { + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return err + } + if resLen > 0 { + // columns + if err := mc.readUntilEOF(); err != nil { + return err + } + // rows + if err := mc.readUntilEOF(); err != nil { + return err + } + } + } + return nil +} + +// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html +func (rows *binaryRows) readRow(dest []driver.Value) error { + data, err := rows.mc.readPacket() + if err != nil { + return err + } + + // packet indicator [1 byte] + if data[0] != iOK { + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + rows.mc.status = readStatus(data[3:]) + rows.rs.done = true + if !rows.HasNextResultSet() { + rows.mc = nil + } + return io.EOF + } + mc := rows.mc + rows.mc = nil + + // Error otherwise + return mc.handleErrorPacket(data) + } + + // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] + pos := 1 + (len(dest)+7+2)>>3 + nullMask := data[1:pos] + + for i := range dest { + // Field is NULL + // (byte >> bit-pos) % 2 == 1 + if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { + dest[i] = nil + continue + } + + // Convert to byte-coded string + switch rows.rs.columns[i].fieldType { + case fieldTypeNULL: + dest[i] = nil + continue + + // Numeric Types + case fieldTypeTiny: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(data[pos]) + } else { + dest[i] = int64(int8(data[pos])) + } + pos++ + continue + + case fieldTypeShort, fieldTypeYear: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) + } else { + dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) + } + pos += 2 + continue + + case fieldTypeInt24, fieldTypeLong: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) + } else { + dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) + } + pos += 4 + continue + + case fieldTypeLongLong: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if val > math.MaxInt64 { + dest[i] = uint64ToString(val) + } else { + dest[i] = int64(val) + } + } else { + dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 + continue + + case fieldTypeFloat: + dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])) + pos += 4 + continue + + case fieldTypeDouble: + dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 + continue + + // Length coded Binary Strings + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON: + var isNull bool + var n int + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + continue + } else { + dest[i] = nil + continue + } + } + return err + + case + fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD + fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] + fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] + + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + switch { + case isNull: + dest[i] = nil + continue + case rows.rs.columns[i].fieldType == fieldTypeTime: + // database/sql does not support an equivalent to TIME, return a string + var dstlen uint8 + switch decimals := rows.rs.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 8 + case 1, 2, 3, 4, 5, 6: + dstlen = 8 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.rs.columns[i].decimals, + ) + } + dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen) + case rows.mc.parseTime: + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc) + default: + var dstlen uint8 + if rows.rs.columns[i].fieldType == fieldTypeDate { + dstlen = 10 + } else { + switch decimals := rows.rs.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 19 + case 1, 2, 3, 4, 5, 6: + dstlen = 19 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.rs.columns[i].decimals, + ) + } + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Please report if this happens! + default: + return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType) + } + } + + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go new file mode 100644 index 0000000..c6438d0 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/result.go @@ -0,0 +1,22 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *mysqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *mysqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go new file mode 100644 index 0000000..888bdb5 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/rows.go @@ -0,0 +1,223 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" + "math" + "reflect" +) + +type resultSet struct { + columns []mysqlField + columnNames []string + done bool +} + +type mysqlRows struct { + mc *mysqlConn + rs resultSet + finish func() +} + +type binaryRows struct { + mysqlRows +} + +type textRows struct { + mysqlRows +} + +func (rows *mysqlRows) Columns() []string { + if rows.rs.columnNames != nil { + return rows.rs.columnNames + } + + columns := make([]string, len(rows.rs.columns)) + if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias { + for i := range columns { + if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 { + columns[i] = tableName + "." + rows.rs.columns[i].name + } else { + columns[i] = rows.rs.columns[i].name + } + } + } else { + for i := range columns { + columns[i] = rows.rs.columns[i].name + } + } + + rows.rs.columnNames = columns + return columns +} + +func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string { + return rows.rs.columns[i].typeDatabaseName() +} + +// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) { +// return int64(rows.rs.columns[i].length), true +// } + +func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) { + return rows.rs.columns[i].flags&flagNotNULL == 0, true +} + +func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) { + column := rows.rs.columns[i] + decimals := int64(column.decimals) + + switch column.fieldType { + case fieldTypeDecimal, fieldTypeNewDecimal: + if decimals > 0 { + return int64(column.length) - 2, decimals, true + } + return int64(column.length) - 1, decimals, true + case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime: + return decimals, decimals, true + case fieldTypeFloat, fieldTypeDouble: + if decimals == 0x1f { + return math.MaxInt64, math.MaxInt64, true + } + return math.MaxInt64, decimals, true + } + + return 0, 0, false +} + +func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type { + return rows.rs.columns[i].scanType() +} + +func (rows *mysqlRows) Close() (err error) { + if f := rows.finish; f != nil { + f() + rows.finish = nil + } + + mc := rows.mc + if mc == nil { + return nil + } + if err := mc.error(); err != nil { + return err + } + + // flip the buffer for this connection if we need to drain it. + // note that for a successful query (i.e. one where rows.next() + // has been called until it returns false), `rows.mc` will be nil + // by the time the user calls `(*Rows).Close`, so we won't reach this + // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47 + mc.buf.flip() + + // Remove unread packets from stream + if !rows.rs.done { + err = mc.readUntilEOF() + } + if err == nil { + if err = mc.discardResults(); err != nil { + return err + } + } + + rows.mc = nil + return err +} + +func (rows *mysqlRows) HasNextResultSet() (b bool) { + if rows.mc == nil { + return false + } + return rows.mc.status&statusMoreResultsExists != 0 +} + +func (rows *mysqlRows) nextResultSet() (int, error) { + if rows.mc == nil { + return 0, io.EOF + } + if err := rows.mc.error(); err != nil { + return 0, err + } + + // Remove unread packets from stream + if !rows.rs.done { + if err := rows.mc.readUntilEOF(); err != nil { + return 0, err + } + rows.rs.done = true + } + + if !rows.HasNextResultSet() { + rows.mc = nil + return 0, io.EOF + } + rows.rs = resultSet{} + return rows.mc.readResultSetHeaderPacket() +} + +func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) { + for { + resLen, err := rows.nextResultSet() + if err != nil { + return 0, err + } + + if resLen > 0 { + return resLen, nil + } + + rows.rs.done = true + } +} + +func (rows *binaryRows) NextResultSet() error { + resLen, err := rows.nextNotEmptyResultSet() + if err != nil { + return err + } + + rows.rs.columns, err = rows.mc.readColumns(resLen) + return err +} + +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if err := mc.error(); err != nil { + return err + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows *textRows) NextResultSet() (err error) { + resLen, err := rows.nextNotEmptyResultSet() + if err != nil { + return err + } + + rows.rs.columns, err = rows.mc.readColumns(resLen) + return err +} + +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if err := mc.error(); err != nil { + return err + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go new file mode 100644 index 0000000..f7e3709 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/statement.go @@ -0,0 +1,204 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "io" + "reflect" +) + +type mysqlStmt struct { + mc *mysqlConn + id uint32 + paramCount int +} + +func (stmt *mysqlStmt) Close() error { + if stmt.mc == nil || stmt.mc.closed.IsSet() { + // driver.Stmt.Close can be called more than once, thus this function + // has to be idempotent. + // See also Issue #450 and golang/go#16019. + //errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) + stmt.mc = nil + return err +} + +func (stmt *mysqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { + return converter{} +} + +func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, stmt.mc.markBadConn(err) + } + + mc := stmt.mc + + mc.affectedRows = 0 + mc.insertId = 0 + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + if resLen > 0 { + // Columns + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + + // Rows + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if err := mc.discardResults(); err != nil { + return nil, err + } + + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, nil +} + +func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { + return stmt.query(args) +} + +func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) { + if stmt.mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, stmt.mc.markBadConn(err) + } + + mc := stmt.mc + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + rows := new(binaryRows) + + if resLen > 0 { + rows.mc = mc + rows.rs.columns, err = mc.readColumns(resLen) + } else { + rows.rs.done = true + + switch err := rows.NextResultSet(); err { + case nil, io.EOF: + return rows, nil + default: + return nil, err + } + } + + return rows, err +} + +type converter struct{} + +// ConvertValue mirrors the reference/default converter in database/sql/driver +// with _one_ exception. We support uint64 with their high bit and the default +// implementation does not. This function should be kept in sync with +// database/sql/driver defaultConverter.ConvertValue() except for that +// deliberate difference. +func (c converter) ConvertValue(v interface{}) (driver.Value, error) { + if driver.IsValue(v) { + return v, nil + } + + if vr, ok := v.(driver.Valuer); ok { + sv, err := callValuerValue(vr) + if err != nil { + return nil, err + } + if !driver.IsValue(sv) { + return nil, fmt.Errorf("non-Value type %T returned from Value", sv) + } + return sv, nil + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Ptr: + // indirect pointers + if rv.IsNil() { + return nil, nil + } else { + return c.ConvertValue(rv.Elem().Interface()) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint(), nil + case reflect.Float32, reflect.Float64: + return rv.Float(), nil + case reflect.Bool: + return rv.Bool(), nil + case reflect.Slice: + ek := rv.Type().Elem().Kind() + if ek == reflect.Uint8 { + return rv.Bytes(), nil + } + return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek) + case reflect.String: + return rv.String(), nil + } + return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) +} + +var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// callValuerValue returns vr.Value(), with one exception: +// If vr.Value is an auto-generated method on a pointer type and the +// pointer is nil, it would panic at runtime in the panicwrap +// method. Treat it like nil instead. +// +// This is so people can implement driver.Value on value types and +// still use nil pointers to those types to mean nil/NULL, just like +// string/*string. +// +// This is an exact copy of the same-named unexported function from the +// database/sql package. +func callValuerValue(vr driver.Valuer) (v driver.Value, err error) { + if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr && + rv.IsNil() && + rv.Type().Elem().Implements(valuerReflectType) { + return nil, nil + } + return vr.Value() +} diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go new file mode 100644 index 0000000..417d727 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/transaction.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlTx struct { + mc *mysqlConn +} + +func (tx *mysqlTx) Commit() (err error) { + if tx.mc == nil || tx.mc.closed.IsSet() { + return ErrInvalidConn + } + err = tx.mc.exec("COMMIT") + tx.mc = nil + return +} + +func (tx *mysqlTx) Rollback() (err error) { + if tx.mc == nil || tx.mc.closed.IsSet() { + return ErrInvalidConn + } + err = tx.mc.exec("ROLLBACK") + tx.mc = nil + return +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go new file mode 100644 index 0000000..9552e80 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils.go @@ -0,0 +1,701 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Registry for custom tls.Configs +var ( + tlsConfigLock sync.RWMutex + tlsConfigRegistry map[string]*tls.Config +) + +// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. +// Use the key as a value in the DSN where tls=value. +// +// Note: The provided tls.Config is exclusively owned by the driver after +// registering it. +// +// rootCertPool := x509.NewCertPool() +// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// if err != nil { +// log.Fatal(err) +// } +// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { +// log.Fatal("Failed to append PEM.") +// } +// clientCert := make([]tls.Certificate, 0, 1) +// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") +// if err != nil { +// log.Fatal(err) +// } +// clientCert = append(clientCert, certs) +// mysql.RegisterTLSConfig("custom", &tls.Config{ +// RootCAs: rootCertPool, +// Certificates: clientCert, +// }) +// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") +// +func RegisterTLSConfig(key string, config *tls.Config) error { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" { + return fmt.Errorf("key '%s' is reserved", key) + } + + tlsConfigLock.Lock() + if tlsConfigRegistry == nil { + tlsConfigRegistry = make(map[string]*tls.Config) + } + + tlsConfigRegistry[key] = config + tlsConfigLock.Unlock() + return nil +} + +// DeregisterTLSConfig removes the tls.Config associated with key. +func DeregisterTLSConfig(key string) { + tlsConfigLock.Lock() + if tlsConfigRegistry != nil { + delete(tlsConfigRegistry, key) + } + tlsConfigLock.Unlock() +} + +func getTLSConfigClone(key string) (config *tls.Config) { + tlsConfigLock.RLock() + if v, ok := tlsConfigRegistry[key]; ok { + config = v.Clone() + } + tlsConfigLock.RUnlock() + return +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { + base := "0000-00-00 00:00:00.0000000" + switch len(str) { + case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" + if str == base[:len(str)] { + return + } + t, err = time.Parse(timeFormat[:len(str)], str) + default: + err = fmt.Errorf("invalid time string: %s", str) + return + } + + // Adjust location + if err == nil && loc != time.UTC { + y, mo, d := t.Date() + h, mi, s := t.Clock() + t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil + } + + return +} + +func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { + switch num { + case 0: + return time.Time{}, nil + case 4: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + 0, 0, 0, 0, + loc, + ), nil + case 7: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + 0, + loc, + ), nil + case 11: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds + loc, + ), nil + } + return nil, fmt.Errorf("invalid DATETIME packet length %d", num) +} + +// zeroDateTime is used in formatBinaryDateTime to avoid an allocation +// if the DATE or DATETIME has the zero value. +// It must never be changed. +// The current behavior depends on database/sql copying the result. +var zeroDateTime = []byte("0000-00-00 00:00:00.000000") + +const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" + +func appendMicrosecs(dst, src []byte, decimals int) []byte { + if decimals <= 0 { + return dst + } + if len(src) == 0 { + return append(dst, ".000000"[:decimals+1]...) + } + + microsecs := binary.LittleEndian.Uint32(src[:4]) + p1 := byte(microsecs / 10000) + microsecs -= 10000 * uint32(p1) + p2 := byte(microsecs / 100) + microsecs -= 100 * uint32(p2) + p3 := byte(microsecs) + + switch decimals { + default: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], digits01[p3], + ) + case 1: + return append(dst, '.', + digits10[p1], + ) + case 2: + return append(dst, '.', + digits10[p1], digits01[p1], + ) + case 3: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], + ) + case 4: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + ) + case 5: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], + ) + } +} + +func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + return zeroDateTime[:length], nil + } + var dst []byte // return value + var p1, p2, p3 byte // current digit pair + + switch length { + case 10, 19, 21, 22, 23, 24, 25, 26: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s length %d", t, length) + } + switch len(src) { + case 4, 7, 11: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s packet length %d", t, len(src)) + } + dst = make([]byte, 0, length) + // start with the date + year := binary.LittleEndian.Uint16(src[:2]) + pt := year / 100 + p1 = byte(year - 100*uint16(pt)) + p2, p3 = src[2], src[3] + dst = append(dst, + digits10[pt], digits01[pt], + digits10[p1], digits01[p1], '-', + digits10[p2], digits01[p2], '-', + digits10[p3], digits01[p3], + ) + if length == 10 { + return dst, nil + } + if len(src) == 4 { + return append(dst, zeroDateTime[10:length]...), nil + } + dst = append(dst, ' ') + p1 = src[4] // hour + src = src[5:] + + // p1 is 2-digit hour, src is after hour + p2, p3 = src[0], src[1] + dst = append(dst, + digits10[p1], digits01[p1], ':', + digits10[p2], digits01[p2], ':', + digits10[p3], digits01[p3], + ) + return appendMicrosecs(dst, src[2:], int(length)-20), nil +} + +func formatBinaryTime(src []byte, length uint8) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + return zeroDateTime[11 : 11+length], nil + } + var dst []byte // return value + + switch length { + case + 8, // time (can be up to 10 when negative and 100+ hours) + 10, 11, 12, 13, 14, 15: // time with fractional seconds + default: + return nil, fmt.Errorf("illegal TIME length %d", length) + } + switch len(src) { + case 8, 12: + default: + return nil, fmt.Errorf("invalid TIME packet length %d", len(src)) + } + // +2 to enable negative time and 100+ hours + dst = make([]byte, 0, length+2) + if src[0] == 1 { + dst = append(dst, '-') + } + days := binary.LittleEndian.Uint32(src[1:5]) + hours := int64(days)*24 + int64(src[5]) + + if hours >= 100 { + dst = strconv.AppendInt(dst, hours, 10) + } else { + dst = append(dst, digits10[hours], digits01[hours]) + } + + min, sec := src[6], src[7] + dst = append(dst, ':', + digits10[min], digits01[min], ':', + digits10[sec], digits01[sec], + ) + return appendMicrosecs(dst, src[8:], int(length)-9), nil +} + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// returns the string read as a bytes slice, wheter the value is NULL, +// the number of bytes read and an error, in case the string is longer than +// the input slice +func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := readLengthEncodedInteger(b) + if num < 1 { + return b[n:n], isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n : n], false, n, nil + } + return nil, false, n, io.EOF +} + +// returns the number of bytes skipped and an error, in case the string is +// longer than the input slice +func skipLengthEncodedString(b []byte) (int, error) { + // Get length + num, _, n := readLengthEncodedInteger(b) + if num < 1 { + return n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return n, nil + } + return n, io.EOF +} + +// returns the number read, whether the value is NULL and the number of bytes read +func readLengthEncodedInteger(b []byte) (uint64, bool, int) { + // See issue #349 + if len(b) == 0 { + return 0, true, 1 + } + + switch b[0] { + // 251: NULL + case 0xfb: + return 0, true, 1 + + // 252: value of following 2 + case 0xfc: + return uint64(b[1]) | uint64(b[2])<<8, false, 3 + + // 253: value of following 3 + case 0xfd: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + + // 254: value of following 8 + case 0xfe: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<56, + false, 9 + } + + // 0-250: value of first byte + return uint64(b[0]), false, 1 +} + +// encodes a uint64 value and appends it to the given bytes slice +func appendLengthEncodedInteger(b []byte, n uint64) []byte { + switch { + case n <= 250: + return append(b, byte(n)) + + case n <= 0xffff: + return append(b, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + } + return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), + byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) +} + +// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. +// If cap(buf) is not enough, reallocate new buffer. +func reserveBuffer(buf []byte, appendSize int) []byte { + newSize := len(buf) + appendSize + if cap(buf) < newSize { + // Grow buffer exponentially + newBuf := make([]byte, len(buf)*2+appendSize) + copy(newBuf, buf) + buf = newBuf + } + return buf[:newSize] +} + +// escapeBytesBackslash escapes []byte with backslashes (\) +// This escapes the contents of a string (provided as []byte) by adding backslashes before special +// characters, and turning others into specific escape sequences, such as +// turning newlines into \n and null bytes into \0. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 +func escapeBytesBackslash(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringBackslash is similar to escapeBytesBackslash but for string. +func escapeStringBackslash(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. +// This escapes the contents of a string by doubling up any apostrophes that +// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in +// effect on the server. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 +func escapeBytesQuotes(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringQuotes is similar to escapeBytesQuotes but for string. +func escapeStringQuotes(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +/****************************************************************************** +* Sync utils * +******************************************************************************/ + +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://github.com/golang/go/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} + +// atomicBool is a wrapper around uint32 for usage as a boolean value with +// atomic access. +type atomicBool struct { + _noCopy noCopy + value uint32 +} + +// IsSet returns whether the current boolean value is true +func (ab *atomicBool) IsSet() bool { + return atomic.LoadUint32(&ab.value) > 0 +} + +// Set sets the value of the bool regardless of the previous value +func (ab *atomicBool) Set(value bool) { + if value { + atomic.StoreUint32(&ab.value, 1) + } else { + atomic.StoreUint32(&ab.value, 0) + } +} + +// TrySet sets the value of the bool and returns whether the value changed +func (ab *atomicBool) TrySet(value bool) bool { + if value { + return atomic.SwapUint32(&ab.value, 1) == 0 + } + return atomic.SwapUint32(&ab.value, 0) > 0 +} + +// atomicError is a wrapper for atomically accessed error values +type atomicError struct { + _noCopy noCopy + value atomic.Value +} + +// Set sets the error value regardless of the previous value. +// The value must not be nil +func (ae *atomicError) Set(value error) { + ae.value.Store(value) +} + +// Value returns the current error value +func (ae *atomicError) Value() error { + if v := ae.value.Load(); v != nil { + // this will panic if the value doesn't implement the error interface + return v.(error) + } + return nil +} + +func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) { + dargs := make([]driver.Value, len(named)) + for n, param := range named { + if len(param.Name) > 0 { + // TODO: support the use of Named Parameters #561 + return nil, errors.New("mysql: driver does not support the use of Named Parameters") + } + dargs[n] = param.Value + } + return dargs, nil +} + +func mapIsolationLevel(level driver.IsolationLevel) (string, error) { + switch sql.IsolationLevel(level) { + case sql.LevelRepeatableRead: + return "REPEATABLE READ", nil + case sql.LevelReadCommitted: + return "READ COMMITTED", nil + case sql.LevelReadUncommitted: + return "READ UNCOMMITTED", nil + case sql.LevelSerializable: + return "SERIALIZABLE", nil + default: + return "", fmt.Errorf("mysql: unsupported isolation level: %v", level) + } +} diff --git a/vendor/github.com/kennygrant/sanitize/.gitignore b/vendor/github.com/kennygrant/sanitize/.gitignore new file mode 100644 index 0000000..0026861 --- /dev/null +++ b/vendor/github.com/kennygrant/sanitize/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/kennygrant/sanitize/.travis.yml b/vendor/github.com/kennygrant/sanitize/.travis.yml new file mode 100644 index 0000000..4f2ee4d --- /dev/null +++ b/vendor/github.com/kennygrant/sanitize/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/kennygrant/sanitize/LICENSE b/vendor/github.com/kennygrant/sanitize/LICENSE new file mode 100644 index 0000000..749ebb2 --- /dev/null +++ b/vendor/github.com/kennygrant/sanitize/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 Mechanism Design. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/kennygrant/sanitize/README.md b/vendor/github.com/kennygrant/sanitize/README.md new file mode 100644 index 0000000..4401ef7 --- /dev/null +++ b/vendor/github.com/kennygrant/sanitize/README.md @@ -0,0 +1,62 @@ +sanitize [![GoDoc](https://godoc.org/github.com/kennygrant/sanitize?status.svg)](https://godoc.org/github.com/kennygrant/sanitize) [![Go Report Card](https://goreportcard.com/badge/github.com/kennygrant/sanitize)](https://goreportcard.com/report/github.com/kennygrant/sanitize) [![CircleCI](https://circleci.com/gh/kennygrant/sanitize.svg?style=svg)](https://circleci.com/gh/kennygrant/sanitize) +======== + +Package sanitize provides functions to sanitize html and paths with go (golang). + +FUNCTIONS + + +```go +sanitize.Accents(s string) string +``` + +Accents replaces a set of accented characters with ascii equivalents. + +```go +sanitize.BaseName(s string) string +``` + +BaseName makes a string safe to use in a file name, producing a sanitized basename replacing . or / with -. Unlike Name no attempt is made to normalise text as a path. + +```go +sanitize.HTML(s string) string +``` + +HTML strips html tags with a very simple parser, replace common entities, and escape < and > in the result. The result is intended to be used as plain text. + +```go +sanitize.HTMLAllowing(s string, args...[]string) (string, error) +``` + +HTMLAllowing parses html and allow certain tags and attributes from the lists optionally specified by args - args[0] is a list of allowed tags, args[1] is a list of allowed attributes. If either is missing default sets are used. + +```go +sanitize.Name(s string) string +``` + +Name makes a string safe to use in a file name by first finding the path basename, then replacing non-ascii characters. + +```go +sanitize.Path(s string) string +``` + +Path makes a string safe to use as an url path. + + +Changes +------- + +Version 1.2 + +Adjusted HTML function to avoid linter warning +Added more tests from https://githubengineering.com/githubs-post-csp-journey/ +Chnaged name of license file +Added badges and change log to readme + +Version 1.1 +Fixed type in comments. +Merge pull request from Povilas Balzaravicius Pawka + - replace br tags with newline even when they contain a space + +Version 1.0 +First release \ No newline at end of file diff --git a/vendor/github.com/kennygrant/sanitize/sanitize.go b/vendor/github.com/kennygrant/sanitize/sanitize.go new file mode 100644 index 0000000..2932209 --- /dev/null +++ b/vendor/github.com/kennygrant/sanitize/sanitize.go @@ -0,0 +1,388 @@ +// Package sanitize provides functions for sanitizing text. +package sanitize + +import ( + "bytes" + "html" + "html/template" + "io" + "path" + "regexp" + "strings" + + parser "golang.org/x/net/html" +) + +var ( + ignoreTags = []string{"title", "script", "style", "iframe", "frame", "frameset", "noframes", "noembed", "embed", "applet", "object", "base"} + + defaultTags = []string{"h1", "h2", "h3", "h4", "h5", "h6", "div", "span", "hr", "p", "br", "b", "i", "strong", "em", "ol", "ul", "li", "a", "img", "pre", "code", "blockquote", "article", "section"} + + defaultAttributes = []string{"id", "class", "src", "href", "title", "alt", "name", "rel"} +) + +// HTMLAllowing sanitizes html, allowing some tags. +// Arrays of allowed tags and allowed attributes may optionally be passed as the second and third arguments. +func HTMLAllowing(s string, args ...[]string) (string, error) { + + allowedTags := defaultTags + if len(args) > 0 { + allowedTags = args[0] + } + allowedAttributes := defaultAttributes + if len(args) > 1 { + allowedAttributes = args[1] + } + + // Parse the html + tokenizer := parser.NewTokenizer(strings.NewReader(s)) + + buffer := bytes.NewBufferString("") + ignore := "" + + for { + tokenType := tokenizer.Next() + token := tokenizer.Token() + + switch tokenType { + + case parser.ErrorToken: + err := tokenizer.Err() + if err == io.EOF { + return buffer.String(), nil + } + return "", err + + case parser.StartTagToken: + + if len(ignore) == 0 && includes(allowedTags, token.Data) { + token.Attr = cleanAttributes(token.Attr, allowedAttributes) + buffer.WriteString(token.String()) + } else if includes(ignoreTags, token.Data) { + ignore = token.Data + } + + case parser.SelfClosingTagToken: + + if len(ignore) == 0 && includes(allowedTags, token.Data) { + token.Attr = cleanAttributes(token.Attr, allowedAttributes) + buffer.WriteString(token.String()) + } else if token.Data == ignore { + ignore = "" + } + + case parser.EndTagToken: + if len(ignore) == 0 && includes(allowedTags, token.Data) { + token.Attr = []parser.Attribute{} + buffer.WriteString(token.String()) + } else if token.Data == ignore { + ignore = "" + } + + case parser.TextToken: + // We allow text content through, unless ignoring this entire tag and its contents (including other tags) + if ignore == "" { + buffer.WriteString(token.String()) + } + case parser.CommentToken: + // We ignore comments by default + case parser.DoctypeToken: + // We ignore doctypes by default - html5 does not require them and this is intended for sanitizing snippets of text + default: + // We ignore unknown token types by default + + } + + } + +} + +// HTML strips html tags, replace common entities, and escapes <>&;'" in the result. +// Note the returned text may contain entities as it is escaped by HTMLEscapeString, and most entities are not translated. +func HTML(s string) (output string) { + + // Shortcut strings with no tags in them + if !strings.ContainsAny(s, "<>") { + output = s + } else { + + // First remove line breaks etc as these have no meaning outside html tags (except pre) + // this means pre sections will lose formatting... but will result in less unintentional paras. + s = strings.Replace(s, "\n", "", -1) + + // Then replace line breaks with newlines, to preserve that formatting + s = strings.Replace(s, "

", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + + // Walk through the string removing all tags + b := bytes.NewBufferString("") + inTag := false + for _, r := range s { + switch r { + case '<': + inTag = true + case '>': + inTag = false + default: + if !inTag { + b.WriteRune(r) + } + } + } + output = b.String() + } + + // Remove a few common harmless entities, to arrive at something more like plain text + output = strings.Replace(output, "‘", "'", -1) + output = strings.Replace(output, "’", "'", -1) + output = strings.Replace(output, "“", "\"", -1) + output = strings.Replace(output, "”", "\"", -1) + output = strings.Replace(output, " ", " ", -1) + output = strings.Replace(output, """, "\"", -1) + output = strings.Replace(output, "'", "'", -1) + + // Translate some entities into their plain text equivalent (for example accents, if encoded as entities) + output = html.UnescapeString(output) + + // In case we have missed any tags above, escape the text - removes <, >, &, ' and ". + output = template.HTMLEscapeString(output) + + // After processing, remove some harmless entities &, ' and " which are encoded by HTMLEscapeString + output = strings.Replace(output, """, "\"", -1) + output = strings.Replace(output, "'", "'", -1) + output = strings.Replace(output, "& ", "& ", -1) // NB space after + output = strings.Replace(output, "&amp; ", "& ", -1) // NB space after + + return output +} + +// We are very restrictive as this is intended for ascii url slugs +var illegalPath = regexp.MustCompile(`[^[:alnum:]\~\-\./]`) + +// Path makes a string safe to use as a URL path, +// removing accents and replacing separators with -. +// The path may still start at / and is not intended +// for use as a file system path without prefix. +func Path(s string) string { + // Start with lowercase string + filePath := strings.ToLower(s) + filePath = strings.Replace(filePath, "..", "", -1) + filePath = path.Clean(filePath) + + // Remove illegal characters for paths, flattening accents + // and replacing some common separators with - + filePath = cleanString(filePath, illegalPath) + + // NB this may be of length 0, caller must check + return filePath +} + +// Remove all other unrecognised characters apart from +var illegalName = regexp.MustCompile(`[^[:alnum:]-.]`) + +// Name makes a string safe to use in a file name by first finding the path basename, then replacing non-ascii characters. +func Name(s string) string { + // Start with lowercase string + fileName := strings.ToLower(s) + fileName = path.Clean(path.Base(fileName)) + + // Remove illegal characters for names, replacing some common separators with - + fileName = cleanString(fileName, illegalName) + + // NB this may be of length 0, caller must check + return fileName +} + +// Replace these separators with - +var baseNameSeparators = regexp.MustCompile(`[./]`) + +// BaseName makes a string safe to use in a file name, producing a sanitized basename replacing . or / with -. +// No attempt is made to normalise a path or normalise case. +func BaseName(s string) string { + + // Replace certain joining characters with a dash + baseName := baseNameSeparators.ReplaceAllString(s, "-") + + // Remove illegal characters for names, replacing some common separators with - + baseName = cleanString(baseName, illegalName) + + // NB this may be of length 0, caller must check + return baseName +} + +// A very limited list of transliterations to catch common european names translated to urls. +// This set could be expanded with at least caps and many more characters. +var transliterations = map[rune]string{ + 'À': "A", + 'Á': "A", + 'Â': "A", + 'Ã': "A", + 'Ä': "A", + 'Å': "AA", + 'Æ': "AE", + 'Ç': "C", + 'È': "E", + 'É': "E", + 'Ê': "E", + 'Ë': "E", + 'Ì': "I", + 'Í': "I", + 'Î': "I", + 'Ï': "I", + 'Ð': "D", + 'Ł': "L", + 'Ñ': "N", + 'Ò': "O", + 'Ó': "O", + 'Ô': "O", + 'Õ': "O", + 'Ö': "OE", + 'Ø': "OE", + 'Œ': "OE", + 'Ù': "U", + 'Ú': "U", + 'Ü': "UE", + 'Û': "U", + 'Ý': "Y", + 'Þ': "TH", + 'ẞ': "SS", + 'à': "a", + 'á': "a", + 'â': "a", + 'ã': "a", + 'ä': "ae", + 'å': "aa", + 'æ': "ae", + 'ç': "c", + 'è': "e", + 'é': "e", + 'ê': "e", + 'ë': "e", + 'ì': "i", + 'í': "i", + 'î': "i", + 'ï': "i", + 'ð': "d", + 'ł': "l", + 'ñ': "n", + 'ń': "n", + 'ò': "o", + 'ó': "o", + 'ô': "o", + 'õ': "o", + 'ō': "o", + 'ö': "oe", + 'ø': "oe", + 'œ': "oe", + 'ś': "s", + 'ù': "u", + 'ú': "u", + 'û': "u", + 'ū': "u", + 'ü': "ue", + 'ý': "y", + 'ÿ': "y", + 'ż': "z", + 'þ': "th", + 'ß': "ss", +} + +// Accents replaces a set of accented characters with ascii equivalents. +func Accents(s string) string { + // Replace some common accent characters + b := bytes.NewBufferString("") + for _, c := range s { + // Check transliterations first + if val, ok := transliterations[c]; ok { + b.WriteString(val) + } else { + b.WriteRune(c) + } + } + return b.String() +} + +var ( + // If the attribute contains data: or javascript: anywhere, ignore it + // we don't allow this in attributes as it is so frequently used for xss + // NB we allow spaces in the value, and lowercase. + illegalAttr = regexp.MustCompile(`(d\s*a\s*t\s*a|j\s*a\s*v\s*a\s*s\s*c\s*r\s*i\s*p\s*t\s*)\s*:`) + + // We are far more restrictive with href attributes. + legalHrefAttr = regexp.MustCompile(`\A[/#][^/\\]?|mailto:|http://|https://`) +) + +// cleanAttributes returns an array of attributes after removing malicious ones. +func cleanAttributes(a []parser.Attribute, allowed []string) []parser.Attribute { + if len(a) == 0 { + return a + } + + var cleaned []parser.Attribute + for _, attr := range a { + if includes(allowed, attr.Key) { + + val := strings.ToLower(attr.Val) + + // Check for illegal attribute values + if illegalAttr.FindString(val) != "" { + attr.Val = "" + } + + // Check for legal href values - / mailto:// http:// or https:// + if attr.Key == "href" { + if legalHrefAttr.FindString(val) == "" { + attr.Val = "" + } + } + + // If we still have an attribute, append it to the array + if attr.Val != "" { + cleaned = append(cleaned, attr) + } + } + } + return cleaned +} + +// A list of characters we consider separators in normal strings and replace with our canonical separator - rather than removing. +var ( + separators = regexp.MustCompile(`[ &_=+:]`) + + dashes = regexp.MustCompile(`[\-]+`) +) + +// cleanString replaces separators with - and removes characters listed in the regexp provided from string. +// Accents, spaces, and all characters not in A-Za-z0-9 are replaced. +func cleanString(s string, r *regexp.Regexp) string { + + // Remove any trailing space to avoid ending on - + s = strings.Trim(s, " ") + + // Flatten accents first so that if we remove non-ascii we still get a legible name + s = Accents(s) + + // Replace certain joining characters with a dash + s = separators.ReplaceAllString(s, "-") + + // Remove all other unrecognised characters - NB we do allow any printable characters + s = r.ReplaceAllString(s, "") + + // Remove any multiple dashes caused by replacements above + s = dashes.ReplaceAllString(s, "-") + + return s +} + +// includes checks for inclusion of a string in a []string. +func includes(a []string, s string) bool { + for _, as := range a { + if as == s { + return true + } + } + return false +} diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore new file mode 100644 index 0000000..0f1d00e --- /dev/null +++ b/vendor/github.com/lib/pq/.gitignore @@ -0,0 +1,4 @@ +.db +*.test +*~ +*.swp diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh new file mode 100644 index 0000000..ebf4470 --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +set -eu + +client_configure() { + sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key +} + +pgdg_repository() { + local sourcelist='sources.list.d/postgresql.list' + + curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add - + echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist" + sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update +} + +postgresql_configure() { + sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config + local all all trust + hostnossl all pqgossltest 127.0.0.1/32 reject + hostnossl all pqgosslcert 127.0.0.1/32 reject + hostssl all pqgossltest 127.0.0.1/32 trust + hostssl all pqgosslcert 127.0.0.1/32 cert + host all all 127.0.0.1/32 trust + hostnossl all pqgossltest ::1/128 reject + hostnossl all pqgosslcert ::1/128 reject + hostssl all pqgossltest ::1/128 trust + hostssl all pqgosslcert ::1/128 cert + host all all ::1/128 trust + config + + xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates + certs/root.crt + certs/server.crt + certs/server.key + certificates + + sort -VCu <<-versions || + $PGVERSION + 9.2 + versions + sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config + ssl_ca_file = 'root.crt' + ssl_cert_file = 'server.crt' + ssl_key_file = 'server.key' + config + + echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null + + sudo service postgresql restart +} + +postgresql_install() { + xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages + postgresql-$PGVERSION + postgresql-server-dev-$PGVERSION + postgresql-contrib-$PGVERSION + packages +} + +postgresql_uninstall() { + sudo service postgresql stop + xargs sudo apt-get -y --purge remove <<-packages + libpq-dev + libpq5 + postgresql + postgresql-client-common + postgresql-common + packages + sudo rm -rf /var/lib/postgresql +} + +$1 diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml new file mode 100644 index 0000000..3498c53 --- /dev/null +++ b/vendor/github.com/lib/pq/.travis.yml @@ -0,0 +1,44 @@ +language: go + +go: + - 1.13.x + - 1.14.x + - master + +sudo: true + +env: + global: + - PGUSER=postgres + - PQGOSSLTESTS=1 + - PQSSLCERTTEST_PATH=$PWD/certs + - PGHOST=127.0.0.1 + matrix: + - PGVERSION=10 + - PGVERSION=9.6 + - PGVERSION=9.5 + - PGVERSION=9.4 + +before_install: + - ./.travis.sh postgresql_uninstall + - ./.travis.sh pgdg_repository + - ./.travis.sh postgresql_install + - ./.travis.sh postgresql_configure + - ./.travis.sh client_configure + - go get golang.org/x/tools/cmd/goimports + - go get golang.org/x/lint/golint + - GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2020.1.3 + +before_script: + - createdb pqgotest + - createuser -DRS pqgossltest + - createuser -DRS pqgosslcert + +script: + - > + goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' + - go vet ./... + - staticcheck -go 1.13 ./... + - golint ./... + - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... + - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md new file mode 100644 index 0000000..5773904 --- /dev/null +++ b/vendor/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md new file mode 100644 index 0000000..c972a86 --- /dev/null +++ b/vendor/github.com/lib/pq/README.md @@ -0,0 +1,30 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) + +## Install + + go get github.com/lib/pq + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support +* GSS (Kerberos) auth + +## Tests + +`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. + +## Status + +This package is effectively in maintenance mode and is not actively developed. Small patches and features are only rarely reviewed and merged. We recommend using [pgx](https://github.com/jackc/pgx) which is actively maintained. diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md new file mode 100644 index 0000000..f050211 --- /dev/null +++ b/vendor/github.com/lib/pq/TESTS.md @@ -0,0 +1,33 @@ +# Tests + +## Running Tests + +`go test` is used for testing. A running PostgreSQL +server is required, with the ability to log in. The +database to connect to test with is "pqgotest," on +"localhost" but these can be overridden using [environment +variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). + +Example: + + PGHOST=/run/postgresql go test + +## Benchmarks + +A benchmark suite can be run as part of the tests: + + go test -bench . + +## Example setup (Docker) + +Run a postgres container: + +``` +docker run --expose 5432:5432 postgres +``` + +Run tests: + +``` +PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test +``` diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go new file mode 100644 index 0000000..e4933e2 --- /dev/null +++ b/vendor/github.com/lib/pq/array.go @@ -0,0 +1,756 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" +) + +var typeByteSlice = reflect.TypeOf([]byte{}) +var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// Array returns the optimal driver.Valuer and sql.Scanner for an array or +// slice of any dimension. +// +// For example: +// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) +// +// var x []sql.NullInt64 +// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x)) +// +// Scanning multi-dimensional arrays is not supported. Arrays where the lower +// bound is not one (such as `[0:0]={1}') are not supported. +func Array(a interface{}) interface { + driver.Valuer + sql.Scanner +} { + switch a := a.(type) { + case []bool: + return (*BoolArray)(&a) + case []float64: + return (*Float64Array)(&a) + case []int64: + return (*Int64Array)(&a) + case []string: + return (*StringArray)(&a) + + case *[]bool: + return (*BoolArray)(a) + case *[]float64: + return (*Float64Array)(a) + case *[]int64: + return (*Int64Array)(a) + case *[]string: + return (*StringArray)(a) + } + + return GenericArray{a} +} + +// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner +// to override the array delimiter used by GenericArray. +type ArrayDelimiter interface { + // ArrayDelimiter returns the delimiter character(s) for this element's type. + ArrayDelimiter() string +} + +// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. +type BoolArray []bool + +// Scan implements the sql.Scanner interface. +func (a *BoolArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to BoolArray", src) +} + +func (a *BoolArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "BoolArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(BoolArray, len(elems)) + for i, v := range elems { + if len(v) != 1 { + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + switch v[0] { + case 't': + b[i] = true + case 'f': + b[i] = false + default: + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a BoolArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be exactly two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1+2*n) + + for i := 0; i < n; i++ { + b[2*i] = ',' + if a[i] { + b[1+2*i] = 't' + } else { + b[1+2*i] = 'f' + } + } + + b[0] = '{' + b[2*n] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. +type ByteaArray [][]byte + +// Scan implements the sql.Scanner interface. +func (a *ByteaArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) +} + +func (a *ByteaArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(ByteaArray, len(elems)) + for i, v := range elems { + b[i], err = parseBytea(v) + if err != nil { + return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. It uses the "hex" format which +// is only supported on PostgreSQL 9.0 or newer. +func (a ByteaArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // 3*N bytes of hex formatting, and N-1 bytes of delimiters. + size := 1 + 6*n + for _, x := range a { + size += hex.EncodedLen(len(x)) + } + + b := make([]byte, size) + + for i, s := 0, b; i < n; i++ { + o := copy(s, `,"\\x`) + o += hex.Encode(s[o:], a[i]) + s[o] = '"' + s = s[o+1:] + } + + b[0] = '{' + b[size-1] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// Float64Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float64Array []float64 + +// Scan implements the sql.Scanner interface. +func (a *Float64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float64Array", src) +} + +func (a *Float64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, a[0], 'f', -1, 64) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, a[i], 'f', -1, 64) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// GenericArray implements the driver.Valuer and sql.Scanner interfaces for +// an array or slice of any dimension. +type GenericArray struct{ A interface{} } + +func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { + var assign func([]byte, reflect.Value) error + var del = "," + + // TODO calculate the assign function for other types + // TODO repeat this section on the element type of arrays or slices (multidimensional) + { + if reflect.PtrTo(rt).Implements(typeSQLScanner) { + // dest is always addressable because it is an element of a slice. + assign = func(src []byte, dest reflect.Value) (err error) { + ss := dest.Addr().Interface().(sql.Scanner) + if src == nil { + err = ss.Scan(nil) + } else { + err = ss.Scan(src) + } + return + } + goto FoundType + } + + assign = func([]byte, reflect.Value) error { + return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) + } + } + +FoundType: + + if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + return rt, assign, del +} + +// Scan implements the sql.Scanner interface. +func (a GenericArray) Scan(src interface{}) error { + dpv := reflect.ValueOf(a.A) + switch { + case dpv.Kind() != reflect.Ptr: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + case dpv.IsNil(): + return fmt.Errorf("pq: destination %T is nil", a.A) + } + + dv := dpv.Elem() + switch dv.Kind() { + case reflect.Slice: + case reflect.Array: + default: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + } + + switch src := src.(type) { + case []byte: + return a.scanBytes(src, dv) + case string: + return a.scanBytes([]byte(src), dv) + case nil: + if dv.Kind() == reflect.Slice { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + } + + return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) +} + +func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { + dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) + dims, elems, err := parseArray(src, []byte(del)) + if err != nil { + return err + } + + // TODO allow multidimensional + + if len(dims) > 1 { + return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", + strings.Replace(fmt.Sprint(dims), " ", "][", -1)) + } + + // Treat a zero-dimensional array like an array with a single dimension of zero. + if len(dims) == 0 { + dims = append(dims, 0) + } + + for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { + switch rt.Kind() { + case reflect.Slice: + case reflect.Array: + if rt.Len() != dims[i] { + return fmt.Errorf("pq: cannot convert ARRAY%s to %s", + strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) + } + default: + // TODO handle multidimensional + } + } + + values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) + for i, e := range elems { + if err := assign(e, values.Index(i)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + + // TODO handle multidimensional + + switch dv.Kind() { + case reflect.Slice: + dv.Set(values.Slice(0, dims[0])) + case reflect.Array: + for i := 0; i < dims[0]; i++ { + dv.Index(i).Set(values.Index(i)) + } + } + + return nil +} + +// Value implements the driver.Valuer interface. +func (a GenericArray) Value() (driver.Value, error) { + if a.A == nil { + return nil, nil + } + + rv := reflect.ValueOf(a.A) + + switch rv.Kind() { + case reflect.Slice: + if rv.IsNil() { + return nil, nil + } + case reflect.Array: + default: + return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) + } + + if n := rv.Len(); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 0, 1+2*n) + + b, _, err := appendArray(b, rv, n) + return string(b), err + } + + return "{}", nil +} + +// Int64Array represents a one-dimensional array of the PostgreSQL integer types. +type Int64Array []int64 + +// Scan implements the sql.Scanner interface. +func (a *Int64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int64Array", src) +} + +func (a *Int64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, a[0], 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, a[i], 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// StringArray represents a one-dimensional array of the PostgreSQL character types. +type StringArray []string + +// Scan implements the sql.Scanner interface. +func (a *StringArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to StringArray", src) +} + +func (a *StringArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "StringArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(StringArray, len(elems)) + for i, v := range elems { + if b[i] = string(v); v == nil { + return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a StringArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+3*n) + b[0] = '{' + + b = appendArrayQuotedBytes(b, []byte(a[0])) + for i := 1; i < n; i++ { + b = append(b, ',') + b = appendArrayQuotedBytes(b, []byte(a[i])) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// appendArray appends rv to the buffer, returning the extended buffer and +// the delimiter used between elements. +// +// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. +func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { + var del string + var err error + + b = append(b, '{') + + if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { + return b, del, err + } + + for i := 1; i < n; i++ { + b = append(b, del...) + if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { + return b, del, err + } + } + + return append(b, '}'), del, nil +} + +// appendArrayElement appends rv to the buffer, returning the extended buffer +// and the delimiter to use before the next element. +// +// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted +// using driver.DefaultParameterConverter and the resulting []byte or string +// is double-quoted. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { + if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { + if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { + if n := rv.Len(); n > 0 { + return appendArray(b, rv, n) + } + + return b, "", nil + } + } + + var del = "," + var err error + var iv interface{} = rv.Interface() + + if ad, ok := iv.(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { + return b, del, err + } + + switch v := iv.(type) { + case nil: + return append(b, "NULL"...), del, nil + case []byte: + return appendArrayQuotedBytes(b, v), del, nil + case string: + return appendArrayQuotedBytes(b, []byte(v)), del, nil + } + + b, err = appendValue(b, iv) + return b, del, err +} + +func appendArrayQuotedBytes(b, v []byte) []byte { + b = append(b, '"') + for { + i := bytes.IndexAny(v, `"\`) + if i < 0 { + b = append(b, v...) + break + } + if i > 0 { + b = append(b, v[:i]...) + } + b = append(b, '\\', v[i]) + v = v[i+1:] + } + return append(b, '"') +} + +func appendValue(b []byte, v driver.Value) ([]byte, error) { + return append(b, encode(nil, v, 0)...), nil +} + +// parseArray extracts the dimensions and elements of an array represented in +// text format. Only representations emitted by the backend are supported. +// Notably, whitespace around brackets and delimiters is significant, and NULL +// is case-sensitive. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { + var depth, i int + + if len(src) < 1 || src[0] != '{' { + return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) + } + +Open: + for i < len(src) { + switch src[i] { + case '{': + depth++ + i++ + case '}': + elems = make([][]byte, 0) + goto Close + default: + break Open + } + } + dims = make([]int, i) + +Element: + for i < len(src) { + switch src[i] { + case '{': + if depth == len(dims) { + break Element + } + depth++ + dims[depth-1] = 0 + i++ + case '"': + var elem = []byte{} + var escape bool + for i++; i < len(src); i++ { + if escape { + elem = append(elem, src[i]) + escape = false + } else { + switch src[i] { + default: + elem = append(elem, src[i]) + case '\\': + escape = true + case '"': + elems = append(elems, elem) + i++ + break Element + } + } + } + default: + for start := i; i < len(src); i++ { + if bytes.HasPrefix(src[i:], del) || src[i] == '}' { + elem := src[start:i] + if len(elem) == 0 { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + if bytes.Equal(elem, []byte("NULL")) { + elem = nil + } + elems = append(elems, elem) + break Element + } + } + } + } + + for i < len(src) { + if bytes.HasPrefix(src[i:], del) && depth > 0 { + dims[depth-1]++ + i += len(del) + goto Element + } else if src[i] == '}' && depth > 0 { + dims[depth-1]++ + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + +Close: + for i < len(src) { + if src[i] == '}' && depth > 0 { + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + if depth > 0 { + err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) + } + if err == nil { + for _, d := range dims { + if (len(elems) % d) != 0 { + err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") + } + } + } + return +} + +func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { + dims, elems, err := parseArray(src, del) + if err != nil { + return nil, err + } + if len(dims) > 1 { + return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) + } + return elems, err +} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go new file mode 100644 index 0000000..4b0a0a8 --- /dev/null +++ b/vendor/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(append(b.buf, s...), '\000') +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go new file mode 100644 index 0000000..f313c14 --- /dev/null +++ b/vendor/github.com/lib/pq/conn.go @@ -0,0 +1,1996 @@ +package pq + +import ( + "bufio" + "context" + "crypto/md5" + "crypto/sha256" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "time" + "unicode" + + "github.com/lib/pq/oid" + "github.com/lib/pq/scram" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less") + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + + errUnexpectedReady = errors.New("unexpected ReadyForQuery") + errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") + errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") +) + +// Driver is the Postgres database driver. +type Driver struct{} + +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func (d *Driver) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &Driver{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +// DialerContext is the context-aware dialer interface. +type DialerContext interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +type defaultDialer struct { + d net.Dialer +} + +func (d defaultDialer) Dial(network, address string) (net.Conn, error) { + return d.d.Dial(network, address) +} +func (d defaultDialer) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return d.DialContext(ctx, network, address) +} +func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + return d.d.DialContext(ctx, network, address) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + txnFinish func() + + // Save connection arguments to use during CancelRequest. + dialer Dialer + opts values + + // Cancellation key data for use with CancelRequest messages. + processID int + secretKey int + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If true, this connection is bad and all public-facing functions should + // return ErrBadConn. + bad bool + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool + + // If true this connection is in the middle of a COPY + inCopy bool + + // If not nil, notices will be synchronously sent here + noticeHandler func(*Error) + + // If not nil, notifications will be synchronously sent here + notificationHandler func(*Notification) + + // GSSAPI context + gss GSS +} + +// Handle driver-side settings in parsed connection string. +func (cn *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value, ok := o[key]; ok { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) + if err != nil { + return err + } + return boolSetting("binary_parameters", &cn.binaryParameters) +} + +func (cn *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + if _, ok := o["password"]; ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir + } + filename = filepath.Join(userHome, ".pgpass") + } + fileinfo, err := os.Stat(filename) + if err != nil { + return + } + mode := fileinfo.Mode() + if mode&(0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + hostname := o["host"] + ntw, _ := network(o) + port := o["port"] + db := o["dbname"] + username := o["user"] + // From: https://github.com/tg/pgpass/blob/master/reader.go + getFields := func(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) + } + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + split := getFields(line) + if len(split) != 5 { + continue + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return + } + } +} + +func (cn *conn) writeBuf(b byte) *writeBuf { + cn.scratch[0] = b + return &writeBuf{ + buf: cn.scratch[:5], + pos: 1, + } +} + +// Open opens a new connection to the database. dsn is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func Open(dsn string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, dsn) +} + +// DialOpen opens a new connection to the database using a dialer. +func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { + c, err := NewConnector(dsn) + if err != nil { + return nil, err + } + c.dialer = d + return c.open(context.Background()) +} + +func (c *Connector) open(ctx context.Context) (cn *conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + o := c.opts + + cn = &conn{ + opts: o, + dialer: c.dialer, + } + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + cn.handlePgpass(o) + + cn.c, err = dial(ctx, c.dialer, o) + if err != nil { + return nil, err + } + + err = cn.ssl(o) + if err != nil { + if cn.c != nil { + cn.c.Close() + } + return nil, err + } + + // cn.startup panics on error. Make sure we don't leak cn.c. + panicking := true + defer func() { + if panicking { + cn.c.Close() + } + }() + + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + panicking = false + return cn, err +} + +func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { + network, address := network(o) + + // Zero or not specified means wait indefinitely. + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + var conn net.Conn + if dctx, ok := d.(DialerContext); ok { + ctx, cancel := context.WithTimeout(ctx, duration) + defer cancel() + conn, err = dctx.DialContext(ctx, network, address) + } else { + conn, err = d.DialTimeout(network, address, duration) + } + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + if dctx, ok := d.(DialerContext); ok { + return dctx.DialContext(ctx, network, address) + } + return d.Dial(network, address) +} + +func network(o values) (string, string) { + host := o["host"] + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o["port"]) + return "unix", sockPath + } + + return "tcp", net.JoinHostPort(host, o["port"]) +} + +type values map[string]string + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o[string(keyRunes)] = "" + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o[string(keyRunes)] = string(valRunes) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.bad = true + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + return cn.begin("") +} + +func (cn *conn) begin(mode string) (_ driver.Tx, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN" + mode) + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.bad = true + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.bad = true + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) closeTxn() { + if finish := cn.txnFinish; finish != nil { + finish() + } +} + +func (cn *conn) Commit() (err error) { + defer cn.closeTxn() + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "COMMIT" { + cn.bad = true + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + defer cn.closeTxn() + if cn.bad { + return driver.ErrBadConn + } + defer cn.errRecover(&err) + return cn.rollback() +} + +func (cn *conn) rollback() (err error) { + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.bad = true + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + // done + return + case 'E': + err = parseError(r) + case 'I': + res = emptyRows + case 'T', 'D': + // ignore any results + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.bad = true + errorf("unexpected message %q in simple query execution", t) + } + if res == nil { + res = &rows{ + cn: cn, + } + } + // Set the result and tag to the last command complete if there wasn't a + // query already run. Although queries usually return from here and cede + // control to Next, a query with zero results does not. + if t == 'C' && res.colNames == nil { + res.result, res.tag = cn.parseComplete(r.string()) + } + res.done = true + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.bad = true + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.rowsHeader = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.bad = true + errorf("unknown response for simple query: %q", t) + } + } +} + +type noRows struct{} + +var emptyRows noRows + +var _ driver.Result = noRows{} + +func (noRows) LastInsertId() (int64, error) { + return 0, errNoLastInsertID +} + +func (noRows) RowsAffected() (int64, error) { + return 0, errNoRowsAffected +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, t := range colTyps { + switch t.OID { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + fallthrough + case oid.T_uuid: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + s, err := cn.prepareCopyIn(q) + if err == nil { + cn.inCopy = true + } + return s, err + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + // Skip cn.bad return here because we always want to close a connection. + defer cn.errRecover(&err) + + // Ensure that cn.c.Close is always run. Since error handling is done with + // panics and cn.errRecover, the Close must be in a defer. + defer func() { + cerr := cn.c.Close() + if err == nil { + err = cerr + } + }() + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + return cn.sendSimpleMessage('X') +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { + return cn.query(query, args) +} + +func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + if cn.inCopy { + return nil, errCopyInProgress + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.rowsHeader = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + rowsHeader: st.rowsHeader, + }, nil +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if cn.bad { + return nil, driver.ErrBadConn + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err +} + +func (cn *conn) send(m *writeBuf) { + _, err := cn.c.Write(m.wrap()) + if err != nil { + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) error { + _, err := cn.c.Write((m.wrap())[1:]) + return err +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.bad = true + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + switch t { + case 'E': + panic(parseError(r)) + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) error { + upgrade, err := ssl(o) + if err != nil { + return err + } + + if upgrade == nil { + // Nothing to do + return nil + } + + w := cn.writeBuf(0) + w.int32(80877103) + if err = cn.sendStartupPacket(w); err != nil { + return err + } + + b := cn.scratch[:1] + _, err = io.ReadFull(cn.c, b) + if err != nil { + return err + } + + if b[0] != 'S' { + return ErrSSLNotSupported + } + + cn.c, err = upgrade(cn.c) + return err +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + case "krbsrvname": + return true + case "krbspn": + return true + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + if err := cn.sendStartupPacket(w); err != nil { + panic(err) + } + + for { + t, r := cn.recv() + switch t { + case 'K': + cn.processBackendKeyData(r) + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o["password"]) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 7: // GSSAPI, startup + if newGss == nil { + errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") + } + cli, err := newGss() + if err != nil { + errorf("kerberos error: %s", err.Error()) + } + + var token []byte + + if spn, ok := o["krbspn"]; ok { + // Use the supplied SPN if provided.. + token, err = cli.GetInitTokenFromSpn(spn) + } else { + // Allow the kerberos service name to be overridden + service := "postgres" + if val, ok := o["krbsrvname"]; ok { + service = val + } + + token, err = cli.GetInitToken(o["host"], service) + } + + if err != nil { + errorf("failed to get Kerberos ticket: %q", err) + } + + w := cn.writeBuf('p') + w.bytes(token) + cn.send(w) + + // Store for GSSAPI continue message + cn.gss = cli + + case 8: // GSSAPI continue + + if cn.gss == nil { + errorf("GSSAPI protocol error") + } + + b := []byte(*r) + + done, tokOut, err := cn.gss.Continue(b) + if err == nil && !done { + w := cn.writeBuf('p') + w.bytes(tokOut) + cn.send(w) + } + + // Errors fall through and read the more detailed message + // from the server.. + + case 10: + sc := scram.NewClient(sha256.New, o["user"], o["password"]) + sc.Step(nil) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + scOut := sc.Out() + + w := cn.writeBuf('p') + w.string("SCRAM-SHA-256") + w.int32(len(scOut)) + w.bytes(scOut) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 11 { + errorf("unexpected authentication response: %q", t) + } + + nextStep := r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + scOut = sc.Out() + w = cn.writeBuf('p') + w.bytes(scOut) + cn.send(w) + + t, r = cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 12 { + errorf("unexpected authentication response: %q", t) + } + + nextStep = r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText = []byte{0, 0} + +type stmt struct { + cn *conn + name string + rowsHeader + colFmtData []byte + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if st.cn.bad { + return driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.bad = true + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.bad = true + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + rowsHeader: st.rowsHeader, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if st.cn.bad { + return nil, driver.ErrBadConn + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.bad = true + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.bad = true + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rowsHeader struct { + colNames []string + colTyps []fieldDesc + colFmts []format +} + +type rows struct { + cn *conn + finish func() + rowsHeader + done bool + rb readBuf + result driver.Result + tag string + + next *rowsHeader +} + +func (rs *rows) Close() error { + if finish := rs.finish; finish != nil { + defer finish() + } + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row + // description, used with HasNextResultSet). We need to fetch messages until + // we hit a 'Z', which is done by waiting for done to be set. + if rs.done { + return nil + } + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Result() driver.Result { + if rs.result == nil { + return emptyRows + } + return rs.result +} + +func (rs *rows) Tag() string { + return rs.tag +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if conn.bad { + return driver.ErrBadConn + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + if t == 'C' { + rs.result, rs.tag = conn.parseComplete(rs.rb.string()) + } + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.bad = true + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) + } + return + case 'T': + next := parsePortalRowDescribe(&rs.rb) + rs.next = &next + return io.EOF + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +func (rs *rows) HasNextResultSet() bool { + hasNext := rs.next != nil && !rs.done + return hasNext +} + +func (rs *rows) NextResultSet() error { + if rs.next == nil { + return io.EOF + } + rs.rowsHeader = *rs.next + rs.next = nil + return nil +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal +// to DDL and other statements that do not accept parameters) to be used as part +// of an SQL statement. For example: +// +// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") +// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) +// +// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be +// replaced by two backslashes (i.e. "\\") and the C-style escape identifier +// that PostgreSQL provides ('E') will be prepended to the string. +func QuoteLiteral(literal string) string { + // This follows the PostgreSQL internal algorithm for handling quoted literals + // from libpq, which can be found in the "PQEscapeStringInternal" function, + // which is found in the libpq/fe-exec.c source file: + // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c + // + // substitute any single-quotes (') with two single-quotes ('') + literal = strings.Replace(literal, `'`, `''`, -1) + // determine if the string has any backslashes (\) in it. + // if it does, replace any backslashes (\) with two backslashes (\\) + // then, we need to wrap the entire string with a PostgreSQL + // C-style escape. Per how "PQEscapeStringInternal" handles this case, we + // also add a space before the "E" + if strings.Contains(literal, `\`) { + literal = strings.Replace(literal, `\`, `\\`, -1) + literal = ` E'` + literal + `'` + } else { + // otherwise, we can just wrap the literal with a pair of single quotes + literal = `'` + literal + `'` + } + return literal +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (cn *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + var minor int + _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor) + if err == nil { + cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor + } + + case "TimeZone": + cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + cn.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (cn *conn) processReadyForQuery(r *readBuf) { + cn.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.bad = true + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) processBackendKeyData(r *readBuf) { + cn.processID = r.int32() + cn.secretKey = r.int32() +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() rowsHeader { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return rowsHeader{} + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.bad = true + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.bad = true + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + if err != nil { + cn.bad = true + errorf("unexpected CommandComplete after error %s", err) + } + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + if err != nil { + cn.bad = true + errorf("unexpected %q after error %s", t, err) + } + if t == 'I' { + res = emptyRows + } + // ignore any results + default: + cn.bad = true + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) rowsHeader { + n := r.int16() + colNames := make([]string, n) + colFmts := make([]format, n) + colTyps := make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + colFmts[i] = format(r.int16()) + } + return rowsHeader{ + colNames: colNames, + colFmts: colFmts, + colTyps: colTyps, + } +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go new file mode 100644 index 0000000..09e2ea4 --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -0,0 +1,149 @@ +package pq + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "time" +) + +// Implement the "QueryerContext" interface +func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := cn.watchCancel(ctx) + r, err := cn.query(query, list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "ExecerContext" interface +func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + + return cn.Exec(query, list) +} + +// Implement the "ConnBeginTx" interface +func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + var mode string + + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault: + // Don't touch mode: use the server's default + case sql.LevelReadUncommitted: + mode = " ISOLATION LEVEL READ UNCOMMITTED" + case sql.LevelReadCommitted: + mode = " ISOLATION LEVEL READ COMMITTED" + case sql.LevelRepeatableRead: + mode = " ISOLATION LEVEL REPEATABLE READ" + case sql.LevelSerializable: + mode = " ISOLATION LEVEL SERIALIZABLE" + default: + return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) + } + + if opts.ReadOnly { + mode += " READ ONLY" + } else { + mode += " READ WRITE" + } + + tx, err := cn.begin(mode) + if err != nil { + return nil, err + } + cn.txnFinish = cn.watchCancel(ctx) + return tx, nil +} + +func (cn *conn) Ping(ctx context.Context) error { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + rows, err := cn.simpleQuery(";") + if err != nil { + return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger + } + rows.Close() + return nil +} + +func (cn *conn) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}) + go func() { + select { + case <-done: + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + _ = cn.cancel(ctxCancel) + finished <- struct{}{} + case <-finished: + } + }() + return func() { + select { + case <-finished: + case finished <- struct{}{}: + } + } + } + return nil +} + +func (cn *conn) cancel(ctx context.Context) error { + c, err := dial(ctx, cn.dialer, cn.opts) + if err != nil { + return err + } + defer c.Close() + + { + can := conn{ + c: c, + } + err = can.ssl(cn.opts) + if err != nil { + return err + } + + w := can.writeBuf(0) + w.int32(80877102) // cancel request code + w.int32(cn.processID) + w.int32(cn.secretKey) + + if err := can.sendStartupPacket(w); err != nil { + return err + } + } + + // Read until EOF to ensure that the server received the cancel. + { + _, err := io.Copy(ioutil.Discard, c) + return err + } +} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go new file mode 100644 index 0000000..d7d4726 --- /dev/null +++ b/vendor/github.com/lib/pq/connector.go @@ -0,0 +1,115 @@ +package pq + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" +) + +// Connector represents a fixed configuration for the pq driver with a given +// name. Connector satisfies the database/sql/driver Connector interface and +// can be used to create any number of DB Conn's via the database/sql OpenDB +// function. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +type Connector struct { + opts values + dialer Dialer +} + +// Connect returns a connection to the database using the fixed configuration +// of this Connector. Context is not used. +func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { + return c.open(ctx) +} + +// Driver returns the underlying driver of this Connector. +func (c *Connector) Driver() driver.Driver { + return &Driver{} +} + +// NewConnector returns a connector for the pq driver in a fixed configuration +// with the given dsn. The returned connector can be used to create any number +// of equivalent Conn's. The returned connector is intended to be used with +// database/sql.OpenDB. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +func NewConnector(dsn string) (*Connector, error) { + var err error + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o["host"] = "localhost" + o["port"] = "5432" + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o["extra_float_digits"] = "2" + for k, v := range parseEnviron(os.Environ()) { + o[k] = v + } + + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + dsn, err = ParseURL(dsn) + if err != nil { + return nil, err + } + } + + if err := parseOpts(dsn, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback, ok := o["fallback_application_name"]; ok { + if _, ok := o["application_name"]; !ok { + o["application_name"] = fallback + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o["client_encoding"] = "UTF8" + // DateStyle needs a similar treatment. + if datestyle, ok := o["datestyle"]; ok { + if datestyle != "ISO, MDY" { + return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) + } + } else { + o["datestyle"] = "ISO, MDY" + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if _, ok := o["user"]; !ok { + u, err := userCurrent() + if err != nil { + return nil, err + } + o["user"] = u + } + + // SSL is not necessary or supported over UNIX domain sockets + if network, _ := network(o); network == "unix" { + o["sslmode"] = "disable" + } + + return &Connector{opts: o, dialer: defaultDialer{}}, nil +} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go new file mode 100644 index 0000000..38d5bb6 --- /dev/null +++ b/vendor/github.com/lib/pq/copy.go @@ -0,0 +1,307 @@ +package pq + +import ( + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") + errCopyInProgress = errors.New("pq: COPY in progress") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + driver.Result + + closed bool + + sync.Mutex // guards err + err error +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + ci.setBad() + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad() + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad() + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.setBad() + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + res, _ := ci.cn.parseComplete(r.string()) + ci.setResult(res) + case 'N': + if n := ci.cn.noticeHandler; n != nil { + n(parseError(&r)) + } + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.setBad() + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) setBad() { + ci.Lock() + ci.cn.bad = true + ci.Unlock() +} + +func (ci *copyin) isBad() bool { + ci.Lock() + b := ci.cn.bad + ci.Unlock() + return b +} + +func (ci *copyin) isErrorSet() bool { + ci.Lock() + isSet := (ci.err != nil) + ci.Unlock() + return isSet +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.Lock() + if ci.err == nil { + ci.err = err + } + ci.Unlock() +} + +func (ci *copyin) setResult(result driver.Result) { + ci.Lock() + ci.Result = result + ci.Unlock() +} + +func (ci *copyin) getResult() driver.Result { + ci.Lock() + result := ci.Result + if result == nil { + return driver.RowsAffected(0) + } + ci.Unlock() + return result +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if ci.isBad() { + return nil, driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if ci.isErrorSet() { + return nil, ci.err + } + + if len(v) == 0 { + if err := ci.Close(); err != nil { + return driver.RowsAffected(0), err + } + + return ci.getResult(), nil + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { // Don't do anything, we're already closed + return nil + } + ci.closed = true + + if ci.isBad() { + return driver.ErrBadConn + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + ci.cn.inCopy = false + + if ci.isErrorSet() { + err = ci.err + return err + } + return nil +} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go new file mode 100644 index 0000000..b571848 --- /dev/null +++ b/vendor/github.com/lib/pq/doc.go @@ -0,0 +1,268 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid'" + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + + +Queries + + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + + +Data Types + + +Parameters pass through driver.DefaultParameterConverter before they are handled +by this package. When the binary_parameters connection option is enabled, +[]byte values are sent directly to the backend as data in binary format. + +This package returns the following types for values from the PostgreSQL backend: + + - integer types smallint, integer, and bigint are returned as int64 + - floating-point types real and double precision are returned as float64 + - character types char, varchar, and text are returned as string + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time + - the boolean type is returned as bool + - the bytea type is returned as []byte + +All other types are returned directly from the backend as []byte values in text format. + + +Errors + + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +https://godoc.org/github.com/lib/pq/example/listen. + + +Kerberos Support + + +If you need support for Kerberos authentication, add the following to your main +package: + + import "github.com/lib/pq/auth/kerberos" + + func init() { + pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) + } + +This package is in a separate module so that users who don't need Kerberos +don't have to download unnecessary dependencies. + +When imported, additional connection string parameters are supported: + + * krbsrvname - GSS (Kerberos) service name when constructing the + SPN (default is `postgres`). This will be combined with the host + to form the full SPN: `krbsrvname/host`. + * krbspn - GSS (Kerberos) SPN. This takes priority over + `krbsrvname` if present. +*/ +package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go new file mode 100644 index 0000000..c4dafe2 --- /dev/null +++ b/vendor/github.com/lib/pq/encode.go @@ -0,0 +1,622 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + switch f { + case formatBinary: + return binaryDecode(parameterStatus, s, typ) + case formatText: + return textDecode(parameterStatus, s, typ) + default: + panic("not reached") + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + case oid.T_uuid: + b, err := decodeUUIDBinary(s) + if err != nil { + panic(err) + } + return b + + default: + errorf("don't know how to decode binary parameter of type %d", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_char, oid.T_varchar, oid.T_text: + return string(s) + case oid.T_bytea: + b, err := parseBytea(s) + if err != nil { + errorf("%s", err) + } + return b + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + // We always use 64 bit parsing, regardless of whether the input text is for + // a float4 or float8, because clients expect float64s for all float datatypes + // and returning a 32-bit parsed float64 produces lossy results. + f, err := strconv.ParseFloat(string(s), 64) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // check for a 30-minute-offset timezone + if (typ == oid.T_timestamptz || typ == oid.T_timetz) && + str[len(str)-3] == ':' { + f += ":00" + } + // Special case for 24:00 time. + // Unfortunately, golang does not parse 24:00 as a proper time. + // In this case, we want to try "round to the next day", to differentiate. + // As such, we find if the 24:00 time matches at the beginning; if so, + // we default it back to 00:00 but add a day later. + var is2400Time bool + switch typ { + case oid.T_timetz, oid.T_time: + if matches := time2400Regex.FindStringSubmatch(str); matches != nil { + // Concatenate timezone information at the back. + str = "00:00:00" + str[len(matches[1]):] + is2400Time = true + } + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + if is2400Time { + t = t.Add(24 * time.Hour) + } + return t +} + +var errInvalidTimestamp = errors.New("invalid timestamp") + +type timestampParser struct { + err error +} + +func (p *timestampParser) expect(str string, char byte, pos int) { + if p.err != nil { + return + } + if pos+1 > len(str) { + p.err = errInvalidTimestamp + return + } + if c := str[pos]; c != char && p.err == nil { + p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func (p *timestampParser) mustAtoi(str string, begin int, end int) int { + if p.err != nil { + return 0 + } + if begin < 0 || end < 0 || begin > end || end > len(str) { + p.err = errInvalidTimestamp + return 0 + } + result, err := strconv.Atoi(str[begin:end]) + if err != nil { + if p.err == nil { + p.err = fmt.Errorf("expected number; got '%v'", str) + } + return 0 + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +// EnableInfinityTs controls the handling of Postgres' "-infinity" and +// "infinity" "timestamp"s. +// +// If EnableInfinityTs is not called, "-infinity" and "infinity" will return +// []byte("-infinity") and []byte("infinity") respectively, and potentially +// cause error "sql: Scan error on column index 0: unsupported driver -> Scan +// pair: []uint8 -> *time.Time", when scanning into a time.Time value. +// +// Once EnableInfinityTs has been called, all connections created using this +// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", +// "timestamp with time zone" and "date" types to the predefined minimum and +// maximum times, respectively. When encoding time.Time values, any time which +// equals or precedes the predefined minimum time will be encoded to +// "-infinity". Any values at or past the maximum time will similarly be +// encoded to "infinity". +// +// If EnableInfinityTs is called with negative >= positive, it will panic. +// Calling EnableInfinityTs after a connection has been established results in +// undefined behavior. If EnableInfinityTs is called more than once, it will +// panic. +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + t, err := ParseTimestamp(currentLocation, str) + if err != nil { + panic(err) + } + return t +} + +// ParseTimestamp parses Postgres' text format. It returns a time.Time in +// currentLocation iff that time's offset agrees with the offset sent from the +// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the +// fixed offset offset provided by the Postgres server. +func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { + p := timestampParser{} + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := p.mustAtoi(str, 0, monSep) + daySep := monSep + 3 + month := p.mustAtoi(str, monSep+1, daySep) + p.expect(str, '-', daySep) + timeSep := daySep + 3 + day := p.mustAtoi(str, daySep+1, timeSep) + + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } + + var hour, minute, second int + if len(str) > minLen { + p.expect(str, ' ', timeSep) + minSep := timeSep + 3 + p.expect(str, ':', minSep) + hour = p.mustAtoi(str, timeSep+1, minSep) + secSep := minSep + 3 + p.expect(str, ':', secSep) + minute = p.mustAtoi(str, minSep+1, secSep) + secEnd := secSep + 3 + second = p.mustAtoi(str, secSep+1, secEnd) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx] == '.' { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+ ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { + // time zone separator is always '-' or '+' (UTC is +00) + var tzSign int + switch c := str[tzStart]; c { + case '-': + tzSign = -1 + case '+': + tzSign = +1 + default: + return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) + remainderIdx += 3 + var tzMin, tzSec int + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } + var isoYear int + + if isBC { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t, p.err +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) []byte { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + return FormatTimestamp(t) +} + +// FormatTimestamp formats t into Postgres' text format for timestamps. +func FormatTimestamp(t time.Time) []byte { + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) + + _, offset := t.Zone() + offset %= 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte, err error) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + return nil, err + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + return nil, fmt.Errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseInt(string(s[1:4]), 8, 9) + if err != nil { + return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result, nil +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go new file mode 100644 index 0000000..3d66ba7 --- /dev/null +++ b/vendor/github.com/lib/pq/error.go @@ -0,0 +1,515 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "2200H": "sequence_generator_limit_exceeded", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +// TODO(ainar-g) Rename to errorf after removing panics. +func fmterrorf(s string, args ...interface{}) error { + return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (cn *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + cn.bad = true + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + cn.bad = true + *err = v + case error: + if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + cn.bad = true + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + cn.bad = true + } +} diff --git a/vendor/github.com/lib/pq/go.mod b/vendor/github.com/lib/pq/go.mod new file mode 100644 index 0000000..b5a5639 --- /dev/null +++ b/vendor/github.com/lib/pq/go.mod @@ -0,0 +1,3 @@ +module github.com/lib/pq + +go 1.13 diff --git a/vendor/github.com/lib/pq/krb.go b/vendor/github.com/lib/pq/krb.go new file mode 100644 index 0000000..408ec01 --- /dev/null +++ b/vendor/github.com/lib/pq/krb.go @@ -0,0 +1,27 @@ +package pq + +// NewGSSFunc creates a GSS authentication provider, for use with +// RegisterGSSProvider. +type NewGSSFunc func() (GSS, error) + +var newGss NewGSSFunc + +// RegisterGSSProvider registers a GSS authentication provider. For example, if +// you need to use Kerberos to authenticate with your server, add this to your +// main package: +// +// import "github.com/lib/pq/auth/kerberos" +// +// func init() { +// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) +// } +func RegisterGSSProvider(newGssArg NewGSSFunc) { + newGss = newGssArg +} + +// GSS provides GSSAPI authentication (e.g., Kerberos). +type GSS interface { + GetInitToken(host string, service string) ([]byte, error) + GetInitTokenFromSpn(spn string) ([]byte, error) + Continue(inToken []byte) (done bool, outToken []byte, err error) +} diff --git a/vendor/github.com/lib/pq/notice.go b/vendor/github.com/lib/pq/notice.go new file mode 100644 index 0000000..01dd8c7 --- /dev/null +++ b/vendor/github.com/lib/pq/notice.go @@ -0,0 +1,71 @@ +// +build go1.10 + +package pq + +import ( + "context" + "database/sql/driver" +) + +// NoticeHandler returns the notice handler on the given connection, if any. A +// runtime panic occurs if c is not a pq connection. This is rarely used +// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. +func NoticeHandler(c driver.Conn) func(*Error) { + return c.(*conn).noticeHandler +} + +// SetNoticeHandler sets the given notice handler on the given connection. A +// runtime panic occurs if c is not a pq connection. A nil handler may be used +// to unset it. This is rarely used directly, use ConnectorNoticeHandler and +// ConnectorWithNoticeHandler instead. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNoticeHandler(c driver.Conn, handler func(*Error)) { + c.(*conn).noticeHandler = handler +} + +// NoticeHandlerConnector wraps a regular connector and sets a notice handler +// on it. +type NoticeHandlerConnector struct { + driver.Connector + noticeHandler func(*Error) +} + +// Connect calls the underlying connector's connect method and then sets the +// notice handler. +func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNoticeHandler(c, n.noticeHandler) + } + return c, err +} + +// ConnectorNoticeHandler returns the currently set notice handler, if any. If +// the given connector is not a result of ConnectorWithNoticeHandler, nil is +// returned. +func ConnectorNoticeHandler(c driver.Connector) func(*Error) { + if c, ok := c.(*NoticeHandlerConnector); ok { + return c.noticeHandler + } + return nil +} + +// ConnectorWithNoticeHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notice +// handler. A nil notice handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { + if c, ok := c.(*NoticeHandlerConnector); ok { + c.noticeHandler = handler + return c + } + return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} +} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go new file mode 100644 index 0000000..5c421fd --- /dev/null +++ b/vendor/github.com/lib/pq/notify.go @@ -0,0 +1,858 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +// SetNotificationHandler sets the given notification handler on the given +// connection. A runtime panic occurs if c is not a pq connection. A nil handler +// may be used to unset it. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { + c.(*conn).notificationHandler = handler +} + +// NotificationHandlerConnector wraps a regular connector and sets a notification handler +// on it. +type NotificationHandlerConnector struct { + driver.Connector + notificationHandler func(*Notification) +} + +// Connect calls the underlying connector's connect method and then sets the +// notification handler. +func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNotificationHandler(c, n.notificationHandler) + } + return c, err +} + +// ConnectorNotificationHandler returns the currently set notification handler, if any. If +// the given connector is not a result of ConnectorWithNotificationHandler, nil is +// returned. +func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { + if c, ok := c.(*NotificationHandlerConnector); ok { + return c.notificationHandler + } + return nil +} + +// ConnectorWithNotificationHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notification +// handler. A nil notification handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { + if c, ok := c.(*NotificationHandlerConnector); ok { + c.notificationHandler = handler + return c + } + return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// NewListenerConn creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + return newDialListenerConn(defaultDialer{}, name, notificationChan) +} + +func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { + cn, err := DialOpen(d, name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: c, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'S': + // ignore + case 'N': + if n := l.cn.noticeHandler; n != nil { + n(parseError(r)) + } + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +// Close closes the connection. +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +// ListenerEventType is an enumeration of listener event types. +type ListenerEventType int + +const ( + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. + ListenerEventConnected ListenerEventType = iota + + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. + ListenerEventDisconnected + + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. + ListenerEventConnectionAttemptFailed +) + +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + dialer Dialer + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) +} + +// NewDialListener is like NewListener but it takes a Dialer. +func NewDialListener(d Dialer, + name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + dialer: d, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func(notificationChan <-chan *Notification) { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }(notificationChan) + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + // Unblock calls to Listen() + l.reconnectCond.Broadcast() + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(time.Until(nextReconnect)) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go new file mode 100644 index 0000000..caaede2 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go new file mode 100644 index 0000000..ecc84c2 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/types.go @@ -0,0 +1,343 @@ +// Code generated by gen.go. DO NOT EDIT. + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_ddl_command Oid = 32 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_index_am_handler Oid = 325 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_pg_lsn Oid = 3220 + T__pg_lsn Oid = 3221 + T_tsm_handler Oid = 3310 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_jsonb Oid = 3802 + T__jsonb Oid = 3807 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 + T_pg_shseclabel Oid = 4066 + T_regnamespace Oid = 4089 + T__regnamespace Oid = 4090 + T_regrole Oid = 4096 + T__regrole Oid = 4097 +) + +var TypeName = map[Oid]string{ + T_bool: "BOOL", + T_bytea: "BYTEA", + T_char: "CHAR", + T_name: "NAME", + T_int8: "INT8", + T_int2: "INT2", + T_int2vector: "INT2VECTOR", + T_int4: "INT4", + T_regproc: "REGPROC", + T_text: "TEXT", + T_oid: "OID", + T_tid: "TID", + T_xid: "XID", + T_cid: "CID", + T_oidvector: "OIDVECTOR", + T_pg_ddl_command: "PG_DDL_COMMAND", + T_pg_type: "PG_TYPE", + T_pg_attribute: "PG_ATTRIBUTE", + T_pg_proc: "PG_PROC", + T_pg_class: "PG_CLASS", + T_json: "JSON", + T_xml: "XML", + T__xml: "_XML", + T_pg_node_tree: "PG_NODE_TREE", + T__json: "_JSON", + T_smgr: "SMGR", + T_index_am_handler: "INDEX_AM_HANDLER", + T_point: "POINT", + T_lseg: "LSEG", + T_path: "PATH", + T_box: "BOX", + T_polygon: "POLYGON", + T_line: "LINE", + T__line: "_LINE", + T_cidr: "CIDR", + T__cidr: "_CIDR", + T_float4: "FLOAT4", + T_float8: "FLOAT8", + T_abstime: "ABSTIME", + T_reltime: "RELTIME", + T_tinterval: "TINTERVAL", + T_unknown: "UNKNOWN", + T_circle: "CIRCLE", + T__circle: "_CIRCLE", + T_money: "MONEY", + T__money: "_MONEY", + T_macaddr: "MACADDR", + T_inet: "INET", + T__bool: "_BOOL", + T__bytea: "_BYTEA", + T__char: "_CHAR", + T__name: "_NAME", + T__int2: "_INT2", + T__int2vector: "_INT2VECTOR", + T__int4: "_INT4", + T__regproc: "_REGPROC", + T__text: "_TEXT", + T__tid: "_TID", + T__xid: "_XID", + T__cid: "_CID", + T__oidvector: "_OIDVECTOR", + T__bpchar: "_BPCHAR", + T__varchar: "_VARCHAR", + T__int8: "_INT8", + T__point: "_POINT", + T__lseg: "_LSEG", + T__path: "_PATH", + T__box: "_BOX", + T__float4: "_FLOAT4", + T__float8: "_FLOAT8", + T__abstime: "_ABSTIME", + T__reltime: "_RELTIME", + T__tinterval: "_TINTERVAL", + T__polygon: "_POLYGON", + T__oid: "_OID", + T_aclitem: "ACLITEM", + T__aclitem: "_ACLITEM", + T__macaddr: "_MACADDR", + T__inet: "_INET", + T_bpchar: "BPCHAR", + T_varchar: "VARCHAR", + T_date: "DATE", + T_time: "TIME", + T_timestamp: "TIMESTAMP", + T__timestamp: "_TIMESTAMP", + T__date: "_DATE", + T__time: "_TIME", + T_timestamptz: "TIMESTAMPTZ", + T__timestamptz: "_TIMESTAMPTZ", + T_interval: "INTERVAL", + T__interval: "_INTERVAL", + T__numeric: "_NUMERIC", + T_pg_database: "PG_DATABASE", + T__cstring: "_CSTRING", + T_timetz: "TIMETZ", + T__timetz: "_TIMETZ", + T_bit: "BIT", + T__bit: "_BIT", + T_varbit: "VARBIT", + T__varbit: "_VARBIT", + T_numeric: "NUMERIC", + T_refcursor: "REFCURSOR", + T__refcursor: "_REFCURSOR", + T_regprocedure: "REGPROCEDURE", + T_regoper: "REGOPER", + T_regoperator: "REGOPERATOR", + T_regclass: "REGCLASS", + T_regtype: "REGTYPE", + T__regprocedure: "_REGPROCEDURE", + T__regoper: "_REGOPER", + T__regoperator: "_REGOPERATOR", + T__regclass: "_REGCLASS", + T__regtype: "_REGTYPE", + T_record: "RECORD", + T_cstring: "CSTRING", + T_any: "ANY", + T_anyarray: "ANYARRAY", + T_void: "VOID", + T_trigger: "TRIGGER", + T_language_handler: "LANGUAGE_HANDLER", + T_internal: "INTERNAL", + T_opaque: "OPAQUE", + T_anyelement: "ANYELEMENT", + T__record: "_RECORD", + T_anynonarray: "ANYNONARRAY", + T_pg_authid: "PG_AUTHID", + T_pg_auth_members: "PG_AUTH_MEMBERS", + T__txid_snapshot: "_TXID_SNAPSHOT", + T_uuid: "UUID", + T__uuid: "_UUID", + T_txid_snapshot: "TXID_SNAPSHOT", + T_fdw_handler: "FDW_HANDLER", + T_pg_lsn: "PG_LSN", + T__pg_lsn: "_PG_LSN", + T_tsm_handler: "TSM_HANDLER", + T_anyenum: "ANYENUM", + T_tsvector: "TSVECTOR", + T_tsquery: "TSQUERY", + T_gtsvector: "GTSVECTOR", + T__tsvector: "_TSVECTOR", + T__gtsvector: "_GTSVECTOR", + T__tsquery: "_TSQUERY", + T_regconfig: "REGCONFIG", + T__regconfig: "_REGCONFIG", + T_regdictionary: "REGDICTIONARY", + T__regdictionary: "_REGDICTIONARY", + T_jsonb: "JSONB", + T__jsonb: "_JSONB", + T_anyrange: "ANYRANGE", + T_event_trigger: "EVENT_TRIGGER", + T_int4range: "INT4RANGE", + T__int4range: "_INT4RANGE", + T_numrange: "NUMRANGE", + T__numrange: "_NUMRANGE", + T_tsrange: "TSRANGE", + T__tsrange: "_TSRANGE", + T_tstzrange: "TSTZRANGE", + T__tstzrange: "_TSTZRANGE", + T_daterange: "DATERANGE", + T__daterange: "_DATERANGE", + T_int8range: "INT8RANGE", + T__int8range: "_INT8RANGE", + T_pg_shseclabel: "PG_SHSECLABEL", + T_regnamespace: "REGNAMESPACE", + T__regnamespace: "_REGNAMESPACE", + T_regrole: "REGROLE", + T__regrole: "_REGROLE", +} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go new file mode 100644 index 0000000..c6aa5b9 --- /dev/null +++ b/vendor/github.com/lib/pq/rows.go @@ -0,0 +1,93 @@ +package pq + +import ( + "math" + "reflect" + "time" + + "github.com/lib/pq/oid" +) + +const headerSize = 4 + +type fieldDesc struct { + // The object ID of the data type. + OID oid.Oid + // The data type size (see pg_type.typlen). + // Note that negative values denote variable-width types. + Len int + // The type modifier (see pg_attribute.atttypmod). + // The meaning of the modifier is type-specific. + Mod int +} + +func (fd fieldDesc) Type() reflect.Type { + switch fd.OID { + case oid.T_int8: + return reflect.TypeOf(int64(0)) + case oid.T_int4: + return reflect.TypeOf(int32(0)) + case oid.T_int2: + return reflect.TypeOf(int16(0)) + case oid.T_varchar, oid.T_text: + return reflect.TypeOf("") + case oid.T_bool: + return reflect.TypeOf(false) + case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: + return reflect.TypeOf(time.Time{}) + case oid.T_bytea: + return reflect.TypeOf([]byte(nil)) + default: + return reflect.TypeOf(new(interface{})).Elem() + } +} + +func (fd fieldDesc) Name() string { + return oid.TypeName[fd.OID] +} + +func (fd fieldDesc) Length() (length int64, ok bool) { + switch fd.OID { + case oid.T_text, oid.T_bytea: + return math.MaxInt64, true + case oid.T_varchar, oid.T_bpchar: + return int64(fd.Mod - headerSize), true + default: + return 0, false + } +} + +func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { + switch fd.OID { + case oid.T_numeric, oid.T__numeric: + mod := fd.Mod - headerSize + precision = int64((mod >> 16) & 0xffff) + scale = int64(mod & 0xffff) + return precision, scale, true + default: + return 0, 0, false + } +} + +// ColumnTypeScanType returns the value type that can be used to scan types into. +func (rs *rows) ColumnTypeScanType(index int) reflect.Type { + return rs.colTyps[index].Type() +} + +// ColumnTypeDatabaseTypeName return the database system type name. +func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { + return rs.colTyps[index].Name() +} + +// ColumnTypeLength returns the length of the column type if the column is a +// variable length type. If the column is not a variable length type ok +// should return false. +func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { + return rs.colTyps[index].Length() +} + +// ColumnTypePrecisionScale should return the precision and scale for decimal +// types. If not applicable, ok should be false. +func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + return rs.colTyps[index].PrecisionScale() +} diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go new file mode 100644 index 0000000..477216b --- /dev/null +++ b/vendor/github.com/lib/pq/scram/scram.go @@ -0,0 +1,264 @@ +// Copyright (c) 2014 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. +// +// http://tools.ietf.org/html/rfc5802 +// +package scram + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" +) + +// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). +// +// A Client may be used within a SASL conversation with logic resembling: +// +// var in []byte +// var client = scram.NewClient(sha1.New, user, pass) +// for client.Step(in) { +// out := client.Out() +// // send out to server +// in := serverOut +// } +// if client.Err() != nil { +// // auth failed +// } +// +type Client struct { + newHash func() hash.Hash + + user string + pass string + step int + out bytes.Buffer + err error + + clientNonce []byte + serverNonce []byte + saltedPass []byte + authMsg bytes.Buffer +} + +// NewClient returns a new SCRAM-* client with the provided hash algorithm. +// +// For SCRAM-SHA-256, for example, use: +// +// client := scram.NewClient(sha256.New, user, pass) +// +func NewClient(newHash func() hash.Hash, user, pass string) *Client { + c := &Client{ + newHash: newHash, + user: user, + pass: pass, + } + c.out.Grow(256) + c.authMsg.Grow(256) + return c +} + +// Out returns the data to be sent to the server in the current step. +func (c *Client) Out() []byte { + if c.out.Len() == 0 { + return nil + } + return c.out.Bytes() +} + +// Err returns the error that occurred, or nil if there were no errors. +func (c *Client) Err() error { + return c.err +} + +// SetNonce sets the client nonce to the provided value. +// If not set, the nonce is generated automatically out of crypto/rand on the first step. +func (c *Client) SetNonce(nonce []byte) { + c.clientNonce = nonce +} + +var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") + +// Step processes the incoming data from the server and makes the +// next round of data for the server available via Client.Out. +// Step returns false if there are no errors and more data is +// still expected. +func (c *Client) Step(in []byte) bool { + c.out.Reset() + if c.step > 2 || c.err != nil { + return false + } + c.step++ + switch c.step { + case 1: + c.err = c.step1(in) + case 2: + c.err = c.step2(in) + case 3: + c.err = c.step3(in) + } + return c.step > 2 || c.err != nil +} + +func (c *Client) step1(in []byte) error { + if len(c.clientNonce) == 0 { + const nonceLen = 16 + buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) + if _, err := rand.Read(buf[:nonceLen]); err != nil { + return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) + } + c.clientNonce = buf[nonceLen:] + b64.Encode(c.clientNonce, buf[:nonceLen]) + } + c.authMsg.WriteString("n=") + escaper.WriteString(&c.authMsg, c.user) + c.authMsg.WriteString(",r=") + c.authMsg.Write(c.clientNonce) + + c.out.WriteString("n,,") + c.out.Write(c.authMsg.Bytes()) + return nil +} + +var b64 = base64.StdEncoding + +func (c *Client) step2(in []byte) error { + c.authMsg.WriteByte(',') + c.authMsg.Write(in) + + fields := bytes.Split(in, []byte(",")) + if len(fields) != 3 { + return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) + } + if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) + } + if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) + } + if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + + c.serverNonce = fields[0][2:] + if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { + return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) + } + + salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) + n, err := b64.Decode(salt, fields[1][2:]) + if err != nil { + return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) + } + salt = salt[:n] + iterCount, err := strconv.Atoi(string(fields[2][2:])) + if err != nil { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + c.saltPassword(salt, iterCount) + + c.authMsg.WriteString(",c=biws,r=") + c.authMsg.Write(c.serverNonce) + + c.out.WriteString("c=biws,r=") + c.out.Write(c.serverNonce) + c.out.WriteString(",p=") + c.out.Write(c.clientProof()) + return nil +} + +func (c *Client) step3(in []byte) error { + var isv, ise bool + var fields = bytes.Split(in, []byte(",")) + if len(fields) == 1 { + isv = bytes.HasPrefix(fields[0], []byte("v=")) + ise = bytes.HasPrefix(fields[0], []byte("e=")) + } + if ise { + return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) + } else if !isv { + return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) + } + if !bytes.Equal(c.serverSignature(), fields[0][2:]) { + return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) + } + return nil +} + +func (c *Client) saltPassword(salt []byte, iterCount int) { + mac := hmac.New(c.newHash, []byte(c.pass)) + mac.Write(salt) + mac.Write([]byte{0, 0, 0, 1}) + ui := mac.Sum(nil) + hi := make([]byte, len(ui)) + copy(hi, ui) + for i := 1; i < iterCount; i++ { + mac.Reset() + mac.Write(ui) + mac.Sum(ui[:0]) + for j, b := range ui { + hi[j] ^= b + } + } + c.saltedPass = hi +} + +func (c *Client) clientProof() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + hash := c.newHash() + hash.Write(clientKey) + storedKey := hash.Sum(nil) + mac = hmac.New(c.newHash, storedKey) + mac.Write(c.authMsg.Bytes()) + clientProof := mac.Sum(nil) + for i, b := range clientKey { + clientProof[i] ^= b + } + clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) + b64.Encode(clientProof64, clientProof) + return clientProof64 +} + +func (c *Client) serverSignature() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + + mac = hmac.New(c.newHash, serverKey) + mac.Write(c.authMsg.Bytes()) + serverSignature := mac.Sum(nil) + + encoded := make([]byte, b64.EncodedLen(len(serverSignature))) + b64.Encode(encoded, serverSignature) + return encoded +} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go new file mode 100644 index 0000000..d902084 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl.go @@ -0,0 +1,175 @@ +package pq + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" +) + +// ssl generates a function to upgrade a net.Conn based on the "sslmode" and +// related settings. The function is nil when no upgrade should take place. +func ssl(o values) (func(net.Conn) (net.Conn, error), error) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o["sslmode"]; mode { + // "require" is the default. + case "", "require": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + + // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: + // + // Note: For backwards compatibility with earlier versions of + // PostgreSQL, if a root CA file exists, the behavior of + // sslmode=require will be the same as that of verify-ca, meaning the + // server certificate is validated against the CA. Relying on this + // behavior is discouraged, and applications that need certificate + // validation should always use verify-ca or verify-full. + if sslrootcert, ok := o["sslrootcert"]; ok { + if _, err := os.Stat(sslrootcert); err == nil { + verifyCaOnly = true + } else { + delete(o, "sslrootcert") + } + } + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o["host"] + case "disable": + return nil, nil + default: + return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) + } + + err := sslClientCertificates(&tlsConf, o) + if err != nil { + return nil, err + } + err = sslCertificateAuthority(&tlsConf, o) + if err != nil { + return nil, err + } + + // Accept renegotiation requests initiated by the backend. + // + // Renegotiation was deprecated then removed from PostgreSQL 9.5, but + // the default configuration of older versions has it enabled. Redshift + // also initiates renegotiations and cannot be reconfigured. + tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient + + return func(conn net.Conn) (net.Conn, error) { + client := tls.Client(conn, &tlsConf) + if verifyCaOnly { + err := sslVerifyCertificateAuthority(client, &tlsConf) + if err != nil { + return nil, err + } + } + return client, nil + }, nil +} + +// sslClientCertificates adds the certificate specified in the "sslcert" and +// "sslkey" settings, or if they aren't set, from the .postgresql directory +// in the user's home directory. The configured files must exist and have +// the correct permissions. +func sslClientCertificates(tlsConf *tls.Config, o values) error { + // user.Current() might fail when cross-compiling. We have to ignore the + // error and continue without home directory defaults, since we wouldn't + // know from where to load them. + user, _ := user.Current() + + // In libpq, the client certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 + sslcert := o["sslcert"] + if len(sslcert) == 0 && user != nil { + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 + if len(sslcert) == 0 { + return nil + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 + if _, err := os.Stat(sslcert); os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + // In libpq, the ssl key is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 + sslkey := o["sslkey"] + if len(sslkey) == 0 && user != nil { + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + } + + if len(sslkey) > 0 { + if err := sslKeyPermissions(sslkey); err != nil { + return err + } + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + return err + } + + tlsConf.Certificates = []tls.Certificate{cert} + return nil +} + +// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. +func sslCertificateAuthority(tlsConf *tls.Config, o values) error { + // In libpq, the root certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 + if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { + tlsConf.RootCAs = x509.NewCertPool() + + cert, err := ioutil.ReadFile(sslrootcert) + if err != nil { + return err + } + + if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { + return fmterrorf("couldn't parse pem in sslrootcert") + } + } + + return nil +} + +// sslVerifyCertificateAuthority carries out a TLS handshake to the server and +// verifies the presented certificate against the CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { + err := client.Handshake() + if err != nil { + return err + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + return err +} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go new file mode 100644 index 0000000..3b7c3a2 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -0,0 +1,20 @@ +// +build !windows + +package pq + +import "os" + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(sslkey string) error { + info, err := os.Stat(sslkey) + if err != nil { + return err + } + if info.Mode().Perm()&0077 != 0 { + return ErrSSLKeyHasWorldPermissions + } + return nil +} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go new file mode 100644 index 0000000..5d2c763 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package pq + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go new file mode 100644 index 0000000..f4d8a7c --- /dev/null +++ b/vendor/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + "net" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"="+escaper.Replace(v)) + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go new file mode 100644 index 0000000..a510192 --- /dev/null +++ b/vendor/github.com/lib/pq/user_posix.go @@ -0,0 +1,24 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris rumprun + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go new file mode 100644 index 0000000..2b69126 --- /dev/null +++ b/vendor/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go new file mode 100644 index 0000000..9a1b9e0 --- /dev/null +++ b/vendor/github.com/lib/pq/uuid.go @@ -0,0 +1,23 @@ +package pq + +import ( + "encoding/hex" + "fmt" +) + +// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. +func decodeUUIDBinary(src []byte) ([]byte, error) { + if len(src) != 16 { + return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) + } + + dst := make([]byte, 36) + dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' + hex.Encode(dst[0:], src[0:4]) + hex.Encode(dst[9:], src[4:6]) + hex.Encode(dst[14:], src[6:8]) + hex.Encode(dst[19:], src[8:10]) + hex.Encode(dst[24:], src[10:16]) + + return dst, nil +} diff --git a/vendor/github.com/sendgrid/rest/.env_sample b/vendor/github.com/sendgrid/rest/.env_sample new file mode 100644 index 0000000..30857f4 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/.env_sample @@ -0,0 +1 @@ +export SENDGRID_API_KEY='' \ No newline at end of file diff --git a/vendor/github.com/sendgrid/rest/.gitignore b/vendor/github.com/sendgrid/rest/.gitignore new file mode 100644 index 0000000..60ef913 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +.env +.settings.json + +temp.go diff --git a/vendor/github.com/sendgrid/rest/.travis.yml b/vendor/github.com/sendgrid/rest/.travis.yml new file mode 100644 index 0000000..62e2382 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/.travis.yml @@ -0,0 +1,27 @@ +language: go +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - 1.15.x + - tip + +script: + - make test + +after_success: + - bash <(curl -s https://codecov.io/bash) + +notifications: + slack: + if: branch = main + on_pull_requests: false + on_success: never + on_failure: change + rooms: + - secure: gthCH2Cha9FJ6pQMNvQ+Af2gHrqYO7tS/5m0mzu4MaOYZxgBxkdAGXBKchv1mLlc/pMKMCMejUdi+43oZCovntOOV41TPo0KUVVXZcetgsMGJm0g9d1fOhn9hlNC+qsbCGQ+zv5zltZ/eQ3nYG9TgLc+vZgDr4cSpMxZTl7GR4+NqvMTBfgLUuruTe4GXQXz7Sqjx/KNZHNlKm0NxMQtfzvKUEfWy3q5mqXhbITckZD+8oP4WEfdUXVzUcwZS7/HKwgEO1cpNKlMhdxfYUpYCdmPNLQVxdvfFYT/0A/G3bAgnWUo4QLQEbrsHMDeIfK5AC0jl0TnMEFoK7bkJdc3bg+U1Z4VsD5pdjApi34Y+G9iHUYFKmeyHhaG6e+bMWAhsXLopbTDTDQ0YqYRzYCDqKmubcLv1eDdcVHju5fBQh15Q1IGm6pYVeMrQTsuMhVWnZmN+vsx9d1xA0Cv1L5V6RLytrFxe6RNNeiDknmaKDKnRpaCzG/9fsRM1Q8R0MLJQwP5sGdvNAoydLwbWgSka1vqwjPtM+b8com+InzbkkUG4NADz1AKxh0I0Z6Y5FukpLgJFfRdiOsRYVh4cHB8gm8b6CL2vblFVL1un5boCmc0DZioL4mY2V8Kj7cB943X14kXM4yiZv5RiFJzJyhxcLzg22qPREsB4BRhSdex4uQ= diff --git a/vendor/github.com/sendgrid/rest/CHANGELOG.md b/vendor/github.com/sendgrid/rest/CHANGELOG.md new file mode 100644 index 0000000..ef0dc31 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/CHANGELOG.md @@ -0,0 +1,105 @@ +# Change Log +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](http://semver.org/). + +[2020-10-14] Version 2.6.2 +-------------------------- +**Library - Fix** +- [PR #101](https://github.com/sendgrid/rest/pull/101): Pass empty client instead of http.DefaultClient. Thanks to [@mateorider](https://github.com/mateorider)! + + +[2020-08-19] Version 2.6.1 +-------------------------- +**Library - Chore** +- [PR #100](https://github.com/sendgrid/rest/pull/100): update GitHub branch references to use HEAD. Thanks to [@thinkingserious](https://github.com/thinkingserious)! + + +[2020-02-19] Version 2.6.0 +-------------------------- +**Library - Feature** +- [PR #73](https://github.com/sendgrid/rest/pull/73): Dockerize sendgrid/rest. Thanks to [@graystevens](https://github.com/graystevens)! + + +[2020-02-05] Version 2.5.1 +-------------------------- +**Library - Docs** +- [PR #77](https://github.com/sendgrid/rest/pull/77): Run Grammarly on *.md files. Thanks to [@obahareth](https://github.com/obahareth)! +- [PR #86](https://github.com/sendgrid/rest/pull/86): Fixed link to bug report template. Thanks to [@alxshelepenok](https://github.com/alxshelepenok)! + + +[2020-01-30] Version 2.5.0 +-------------------------- +**Library - Docs** +- [PR #97](https://github.com/sendgrid/rest/pull/97): baseline all the templated markdown docs. Thanks to [@childish-sambino](https://github.com/childish-sambino)! +- [PR #88](https://github.com/sendgrid/rest/pull/88): add our Developer Experience Engineer career opportunity to the README. Thanks to [@mptap](https://github.com/mptap)! +- [PR #65](https://github.com/sendgrid/rest/pull/65): added "Code Review" section to CONTRIBUTING.md. Thanks to [@aleien](https://github.com/aleien)! +- [PR #80](https://github.com/sendgrid/rest/pull/80): add first timers guide for newcomers. Thanks to [@daniloff200](https://github.com/daniloff200)! +- [PR #82](https://github.com/sendgrid/rest/pull/82): update contribution guide with new workflow. Thanks to [@radlinskii](https://github.com/radlinskii)! +- [PR #62](https://github.com/sendgrid/rest/pull/62): update CONTRIBUTING.md with environment variables section. Thanks to [@thepriefy](https://github.com/thepriefy)! + +**Library - Chore** +- [PR #96](https://github.com/sendgrid/rest/pull/96): prep repo for automation. Thanks to [@thinkingserious](https://github.com/thinkingserious)! +- [PR #94](https://github.com/sendgrid/rest/pull/94): add current Go version to Travis. Thanks to [@pangaunn](https://github.com/pangaunn)! +- [PR #93](https://github.com/sendgrid/rest/pull/93): add current Go versions to Travis. Thanks to [@gliptak](https://github.com/gliptak)! +- [PR #83](https://github.com/sendgrid/rest/pull/83): follow godoc deprecation standards. Thanks to [@vaskoz](https://github.com/vaskoz)! +- [PR #74](https://github.com/sendgrid/rest/pull/74): create README.md in use-cases. Thanks to [@ajloria](https://github.com/ajloria)! + +**Library - Feature** +- [PR #72](https://github.com/sendgrid/rest/pull/72): do not swallow the error code. Thanks to [@Succo](https://github.com/Succo)! + + +[2018-04-09] Version 2.4.1 +-------------------------- +### Fixed +- Pull #71, Solves #70 +- Fix Travis CI Build +- Special thanks to [Vasko Zdravevski](https://github.com/vaskoz) for the PR! + +## [2.4.0] - 2017-4-10 +### Added +- Pull #18, Solves #17 +- Add RestError Struct for an error handling +- Special thanks to [Takahiro Ikeuchi](https://github.com/iktakahiro) for the PR! + +## [2.3.1] - 2016-10-14 +### Changed +- Pull #15, solves Issue #7 +- Moved QueryParams processing into BuildRequestObject +- Special thanks to [Gábor Lipták](https://github.com/gliptak) for the PR! + +## [2.3.0] - 2016-10-04 +### Added +- Pull [#10] [Allow for custom Content-Types](https://github.com/sendgrid/rest/issues/10) + +## [2.2.0] - 2016-07-28 +### Added +- Pull [#9](https://github.com/sendgrid/rest/pull/9): Allow for setting a custom HTTP client +- [Here](rest_test.go#L127) is an example of usage +- This enables usage of the [sendgrid-go library](https://github.com/sendgrid/sendgrid-go) on [Google App Engine (GAE)](https://cloud.google.com/appengine/) +- Special thanks to [Chris Broadfoot](https://github.com/broady) and [Sridhar Venkatakrishnan](https://github.com/sridharv) for providing code and feedback! + +## [2.1.0] - 2016-06-10 +### Added +- Automatically add Content-Type: application/json when there is a request body + +## [2.0.0] - 2016-06-03 +### Changed +- Made the Request and Response variables non-redundant. e.g. request.RequestBody becomes request.Body + +## [1.0.2] - 2016-04-07 +### Added +- these changes are thanks to [deckarep](https://github.com/deckarep). Thanks! +- more updates to error naming convention +- more error handing on HTTP request + +## [1.0.1] - 2016-04-07 +### Added +- these changes are thanks to [deckarep](https://github.com/deckarep). Thanks! +- update error naming convention +- explicitly define supported HTTP verbs +- better error handling on HTTP request + +## [1.0.0] - 2016-04-05 +### Added +- We are live! diff --git a/vendor/github.com/sendgrid/rest/CODE_OF_CONDUCT.md b/vendor/github.com/sendgrid/rest/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..2f0727e --- /dev/null +++ b/vendor/github.com/sendgrid/rest/CODE_OF_CONDUCT.md @@ -0,0 +1,73 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at open-source@twilio.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org diff --git a/vendor/github.com/sendgrid/rest/CONTRIBUTING.md b/vendor/github.com/sendgrid/rest/CONTRIBUTING.md new file mode 100644 index 0000000..1d09327 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/CONTRIBUTING.md @@ -0,0 +1,169 @@ +Hello! Thank you for choosing to help contribute to one of the SendGrid open source projects. There are many ways you can contribute and help is always welcome. We simply ask that you follow the following contribution policies. + +- [Feature Request](#feature-request) +- [Submit a Bug Report](#submit-a-bug-report) +- [Improvements to the Codebase](#improvements-to-the-codebase) +- [Understanding the Code Base](#understanding-the-codebase) +- [Testing](#testing) +- [Style Guidelines & Naming Conventions](#style-guidelines-and-naming-conventions) +- [Creating a Pull Request](#creating-a-pull-request) +- [Code Reviews](#code-reviews) + + +## Feature Request + +If you'd like to make a feature request, please read this section. + +The GitHub issue tracker is the preferred channel for library feature requests, but please respect the following restrictions: + +- Please **search for existing issues** in order to ensure we don't have duplicate bugs/feature requests. +- Please be respectful and considerate of others when commenting on issues + + +## Submit a Bug Report + +Note: DO NOT include your credentials in ANY code examples, descriptions, or media you make public. + +A software bug is a demonstrable issue in the code base. In order for us to diagnose the issue and respond as quickly as possible, please add as much detail as possible into your bug report. + +Before you decide to create a new issue, please try the following: + +1. Check the Github issues tab if the identified issue has already been reported, if so, please add a +1 to the existing post. +2. Update to the latest version of this code and check if issue has already been fixed +3. Copy and fill in the Bug Report Template we have provided below + +### Please use our Bug Report Template + +In order to make the process easier, we've included a [sample bug report template](ISSUE_TEMPLATE.md). + + +## Improvements to the Codebase + +We welcome direct contributions to the rest code base. Thank you! + +### Development Environment ### + +#### Install and Run Locally #### + +##### Prerequisites ##### + +- Go version 1.6 + +##### Initial setup: ##### + +```bash +git clone https://github.com/sendgrid/rest.git +cd rest +``` + +### Environment Variables + +First, get your free SendGrid account [here](https://sendgrid.com/free?source=rest). + +Next, update your environment with your [SENDGRID_API_KEY](https://app.sendgrid.com/settings/api_keys) if you will test with Swift Mailer. + +``` +echo "export SENDGRID-API-KEY='YOUR-API-KEY'" > sendgrid.env +echo "sendgrid.env" >> .gitignore +source ./sendgrid.env +go run examples/example.go +``` + +##### Execute: ##### + +See the [examples folder](examples) to get started quickly. + + +## Understanding the Code Base + +**/examples** + +Working examples that demonstrate usage. + +**rest.go** + +There is a struct to hold both the request and response to the API server. + +The main function that does the heavy lifting (and external entry point) is `API`. + + +## Testing + +All PRs require passing tests before the PR will be reviewed. + +All test files are in [`rest-test.go`](rest_test.go). + +For the purposes of contributing to this repo, please update the [`rest-test.go`](rest_test.go) file with unit tests as you modify the code. + +Run the test: + +```bash +go test -v +``` + + +## Style Guidelines & Naming Conventions + +Generally, we follow the style guidelines as suggested by the official language. However, we ask that you conform to the styles that already exist in the library. If you wish to deviate, please explain your reasoning. + +- [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) + +Please run your code through: + +- [fmt](https://blog.golang.org/go-fmt-your-code) + +## Creating a Pull Request + +1. [Fork](https://help.github.com/fork-a-repo/) the project, clone your fork, + and configure the remotes: + + ```bash + # Clone your fork of the repo into the current directory + git clone https://github.com/sendgrid/rest + # Navigate to the newly cloned directory + cd rest + # Assign the original repo to a remote called "upstream" + git remote add upstream https://github.com/sendgrid/rest + ``` + +2. If you cloned a while ago, get the latest changes from upstream: + + ```bash + git checkout development + git pull upstream development + ``` + +3. Create a new topic branch off the `development` branch to + contain your feature, change, or fix: + + ```bash + git checkout -b + ``` + +4. Commit your changes in logical chunks. Please adhere to these [git commit + message guidelines](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) + or your code is unlikely to be merged into the main project. Use Git's + [interactive rebase](https://help.github.com/articles/interactive-rebase) + feature to tidy up your commits before making them public. + +4a. Create tests. + +4b. Create or update the example code that demonstrates the functionality of this change to the code. + +5. Locally merge (or rebase) the upstream development branch into your topic branch: + + ```bash + git pull [--rebase] upstream development + ``` + +6. Push your topic branch up to your fork: + + ```bash + git push origin + ``` + +7. [Open a Pull Request](https://help.github.com/articles/using-pull-requests/) + with a clear title and description against the `development` branch. All tests must be passing before we will review the PR. + +## Code Reviews +If you can, please look at open PRs and review them. Give feedback and help us merge these PRs much faster! If you don't know how, Github has some great [information on how to review a Pull Request](https://help.github.com/articles/about-pull-request-reviews/). diff --git a/vendor/github.com/sendgrid/rest/FIRST_TIMERS.md b/vendor/github.com/sendgrid/rest/FIRST_TIMERS.md new file mode 100644 index 0000000..005da47 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/FIRST_TIMERS.md @@ -0,0 +1,79 @@ +# How To Contribute to Twilio SendGrid Repositories via GitHub +Contributing to the Twilio SendGrid repositories is easy! All you need to do is find an open issue (see the bottom of this page for a list of repositories containing open issues), fix it and submit a pull request. Once you have submitted your pull request, the team can easily review it before it is merged into the repository. + +To make a pull request, follow these steps: + +1. Log into GitHub. If you do not already have a GitHub account, you will have to create one in order to submit a change. Click the Sign up link in the upper right-hand corner to create an account. Enter your username, password, and email address. If you are an employee of Twilio SendGrid, please use your full name with your GitHub account and enter Twilio SendGrid as your company so we can easily identify you. + + + +2. __[Fork](https://help.github.com/fork-a-repo/)__ the [rest](https://github.com/sendgrid/rest) repository: + + + +3. __Clone__ your fork via the following commands: + +```bash +# Clone your fork of the repo into the current directory +git clone https://github.com/your_username/rest +# Navigate to the newly cloned directory +cd rest +# Assign the original repo to a remote called "upstream" +git remote add upstream https://github.com/sendgrid/rest +``` + +> Don't forget to replace *your_username* in the URL by your real GitHub username. + +4. __Create a new topic branch__ (off the main project development branch) to contain your feature, change, or fix: + +```bash +git checkout -b +``` + +5. __Commit your changes__ in logical chunks. + +Please adhere to these [git commit message guidelines](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) or your code is unlikely be merged into the main project. Use Git's [interactive rebase](https://help.github.com/articles/interactive-rebase) feature to tidy up your commits before making them public. Probably you will also have to create tests (if needed) or create or update the example code that demonstrates the functionality of this change to the code. + +6. __Locally merge (or rebase)__ the upstream development branch into your topic branch: + +```bash +git pull [--rebase] upstream main +``` + +7. __Push__ your topic branch up to your fork: + +```bash +git push origin +``` + +8. __[Open a Pull Request](https://help.github.com/articles/creating-a-pull-request/#changing-the-branch-range-and-destination-repository/)__ with a clear title and description against the `main` branch. All tests must be passing before we will review the PR. + +## Important notice + +Before creating a pull request, make sure that you respect the repository's constraints regarding contributions. You can find them in the [CONTRIBUTING.md](CONTRIBUTING.md) file. + +## Repositories with Open, Easy, Help Wanted, Issue Filters + +* [Python SDK](https://github.com/sendgrid/sendgrid-python/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [PHP SDK](https://github.com/sendgrid/sendgrid-php/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [C# SDK](https://github.com/sendgrid/sendgrid-csharp/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Ruby SDK](https://github.com/sendgrid/sendgrid-ruby/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Node.js SDK](https://github.com/sendgrid/sendgrid-nodejs/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Java SDK](https://github.com/sendgrid/sendgrid-java/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Go SDK](https://github.com/sendgrid/sendgrid-go/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Python STMPAPI Client](https://github.com/sendgrid/smtpapi-python/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [PHP STMPAPI Client](https://github.com/sendgrid/smtpapi-php/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [C# STMPAPI Client](https://github.com/sendgrid/smtpapi-csharp/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Ruby STMPAPI Client](https://github.com/sendgrid/smtpapi-ruby/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Node.js STMPAPI Client](https://github.com/sendgrid/smtpapi-nodejs/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Java STMPAPI Client](https://github.com/sendgrid/smtpapi-java/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Go STMPAPI Client](https://github.com/sendgrid/smtpapi-go/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Python HTTP Client](https://github.com/sendgrid/python-http-client/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [PHP HTTP Client](https://github.com/sendgrid/php-http-client/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [C# HTTP Client](https://github.com/sendgrid/csharp-http-client/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Java HTTP Client](https://github.com/sendgrid/java-http-client/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Ruby HTTP Client](https://github.com/sendgrid/ruby-http-client/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Go HTTP Client](https://github.com/sendgrid/rest/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Open API Definition](https://github.com/sendgrid/sendgrid-oai/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [DX Automator](https://github.com/sendgrid/dx-automator/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Documentation](https://github.com/sendgrid/docs/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) diff --git a/vendor/github.com/sendgrid/rest/ISSUE_TEMPLATE.md b/vendor/github.com/sendgrid/rest/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..d0baa8d --- /dev/null +++ b/vendor/github.com/sendgrid/rest/ISSUE_TEMPLATE.md @@ -0,0 +1,30 @@ + + +### Issue Summary +A summary of the issue and the environment in which it occurs. If suitable, include the steps required to reproduce the bug. Please feel free to include screenshots, screencasts, or code examples. + +### Steps to Reproduce +1. This is the first step +2. This is the second step +3. Further steps, etc. + +### Code Snippet +```go +# paste code here +``` + +### Exception/Log +``` +# paste exception/log here +``` + +### Technical details: +* rest version: +* go version: + diff --git a/vendor/github.com/sendgrid/rest/LICENSE.md b/vendor/github.com/sendgrid/rest/LICENSE.md new file mode 100644 index 0000000..29aba59 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/LICENSE.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (C) 2020, Twilio SendGrid, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sendgrid/rest/Makefile b/vendor/github.com/sendgrid/rest/Makefile new file mode 100644 index 0000000..07d1cdf --- /dev/null +++ b/vendor/github.com/sendgrid/rest/Makefile @@ -0,0 +1,7 @@ +.PHONY: test install + +install: + go get -t -v ./... + +test: install + go test -race -cover -v ./... diff --git a/vendor/github.com/sendgrid/rest/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/sendgrid/rest/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..a868180 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,31 @@ + + +# Fixes # + +A short description of what this PR does. + +### Checklist +- [ ] I acknowledge that all my contributions will be made under the project's license +- [ ] I have made a material change to the repo (functionality, testing, spelling, grammar) +- [ ] I have read the [Contribution Guidelines](CONTRIBUTING.md) and my PR follows them +- [ ] I have titled the PR appropriately +- [ ] I have updated my branch with the main branch +- [ ] I have added tests that prove my fix is effective or that my feature works +- [ ] I have added necessary documentation about the functionality in the appropriate .md file +- [ ] I have added inline documentation to the code I modified + +If you have questions, please file a [support ticket](https://twilio.com/help/contact), or create a GitHub Issue in this repository. diff --git a/vendor/github.com/sendgrid/rest/README.md b/vendor/github.com/sendgrid/rest/README.md new file mode 100644 index 0000000..543e900 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/README.md @@ -0,0 +1,199 @@ +![SendGrid Logo](twilio_sendgrid_logo.png) + +[![Build Status](https://travis-ci.org/sendgrid/rest.svg?branch=main)](https://travis-ci.org/sendgrid/rest) +[![GoDoc](https://godoc.org/github.com/sendgrid/rest?status.png)](http://godoc.org/github.com/sendgrid/rest) +[![Go Report Card](https://goreportcard.com/badge/github.com/sendgrid/rest)](https://goreportcard.com/report/github.com/sendgrid/rest) +[![Email Notifications Badge](https://dx.sendgrid.com/badge/go)](https://dx.sendgrid.com/newsletter/go) +[![Twitter Follow](https://img.shields.io/twitter/follow/sendgrid.svg?style=social&label=Follow)](https://twitter.com/sendgrid) +[![GitHub contributors](https://img.shields.io/github/contributors/sendgrid/rest.svg)](https://github.com/sendgrid/rest/graphs/contributors) +[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE.md) + +**Quickly and easily access any RESTful or RESTful-like API.** + +If you are looking for the SendGrid API client library, please see [this repo](https://github.com/sendgrid/sendgrid-go). + +# Announcements +**The default branch name for this repository has been changed to `main` as of 07/27/2020.** + +All updates to this library is documented in our [CHANGELOG](CHANGELOG.md). + +# Table of Contents +- [Installation](#installation) +- [Quick Start](#quick-start) +- [Usage](#usage) +- [How to Contribute](#contribute) +- [About](#about) +- [License](#license) + + +# Installation + +## Prerequisites + +- Go version 1.6.X, 1.7.X, 1.8.X, 1.9.X or 1.10.X + +## Install Package + +```bash +go get github.com/sendgrid/rest +``` + +## Setup Environment Variables + +### Initial Setup + +```bash +cp .env_sample .env +``` + +### Environment Variable + +Update the development environment with your [SENDGRID_API_KEY](https://app.sendgrid.com/settings/api_keys), for example: + +```bash +echo "export SENDGRID_API_KEY='YOUR_API_KEY'" > sendgrid.env +echo "sendgrid.env" >> .gitignore +source ./sendgrid.env +``` + +## With Docker + +A Docker image has been created to allow you to get started with `rest` right away. + +```bash +docker-compose up -d --build + +# Ensure the container is running with 'docker ps' +docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +40c8d984a620 rest_go "tail -f /dev/null" About a minute ago Up About a minute rest_go_1 +``` + +With the container running, you can execute your local `go` scripts using the following: + +```bash +# docker exec +docker exec rest_go_1 go run docker/example.go +200 +{ + "args": {}, + "headers": { + "Accept-Encoding": "gzip", + "Connection": "close", + "Host": "httpbin.org", + "User-Agent": "Go-http-client/1.1" + }, + "origin": "86.180.177.202", + "url": "https://httpbin.org/get" +} + +map[Access-Control-Allow-Origin:[*] Access-Control-Allow-Credentials:[true] Via:[1.1 vegur] Connection:[keep-alive] Server:[gunicorn/19.9.0] Date:[Tue, 02 Oct 2018 18:20:43 GMT] Content-Type:[application/json] Content-Length:[233]] + +# You can install libraries too, using the same command +# NOTE: Any libraries installed will be removed when the container is stopped. +docker exec rest_go_1 go get github.com/uniplaces/carbon +``` + +Your go files will be executed relative to the root of this directory. So in the example above, to execute the `example.go` file within the `docker` directory, we run `docker exec rest_go_1 go run docker/example.go`. If this file was in the root of this repository (next to README.exe, rest.go etc.), you would run `docker exec rest_go_1 go run my_go_script.go` + + +# Quick Start + +`GET /your/api/{param}/call` + +```go +package main + +import "github.com/sendgrid/rest" +import "fmt" + +func main() { + const host = "https://api.example.com" + param := "myparam" + endpoint := "/your/api/" + param + "/call" + baseURL := host + endpoint + method := rest.Get + request := rest.Request{ + Method: method, + BaseURL: baseURL, + } + response, err := rest.Send(request) + if err != nil { + fmt.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +`POST /your/api/{param}/call` with headers, query parameters and a request body. + +```go +package main + +import "github.com/sendgrid/rest" +import "fmt" + +func main() { + const host = "https://api.example.com" + param := "myparam" + endpoint := "/your/api/" + param + "/call" + baseURL := host + endpoint + Headers := make(map[string]string) + key := os.Getenv("API_KEY") + Headers["Authorization"] = "Bearer " + key + Headers["X-Test"] = "Test" + var Body = []byte(`{"some": 0, "awesome": 1, "data": 3}`) + queryParams := make(map[string]string) + queryParams["hello"] = "0" + queryParams["world"] = "1" + method := rest.Post + request = rest.Request{ + Method: method, + BaseURL: baseURL, + Headers: Headers, + QueryParams: queryParams, + Body: Body, + } + response, err := rest.Send(request) + if err != nil { + fmt.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + + +# Usage + +- [Usage Examples](USAGE.md) + + +# How to Contribute + +We encourage contribution to our projects, please see our [CONTRIBUTING](CONTRIBUTING.md) guide for details. + +Quick links: + +- [Feature Request](CONTRIBUTING.md#feature-request) +- [Bug Reports](CONTRIBUTING.md#submit-a-bug-report) +- [Improvements to the Codebase](CONTRIBUTING.md#improvements-to-the-codebase) +- [Code Reviews](CONTRIBUTING.md#code-reviews) + + +# About + +rest is maintained and funded by Twilio SendGrid, Inc. The names and logos for rest are trademarks of Twilio SendGrid, Inc. + +If you need help installing or using the library, please check the [Twilio SendGrid Support Help Center](https://support.sendgrid.com). + +If you've instead found a bug in the library or would like new features added, go ahead and open issues or pull requests against this repo! + + +# License +[The MIT License (MIT)](LICENSE.md) diff --git a/vendor/github.com/sendgrid/rest/TROUBLESHOOTING.md b/vendor/github.com/sendgrid/rest/TROUBLESHOOTING.md new file mode 100644 index 0000000..242d4a7 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/TROUBLESHOOTING.md @@ -0,0 +1,62 @@ +## Table of Contents + +* [Viewing the Request Body](#request-body) + + + +## Viewing the Request Body + +When debugging or testing, it may be useful to examine the raw request body to compare against the [documented format](https://sendgrid.com/docs/API_Reference/api_v3.html). + +Example Code +```go +package main + +import "github.com/sendgrid/rest" +import "fmt" + +func main() { + const host = "https://api.example.com" + param := "myparam" + endpoint := "/your/api/" + param + "/call" + baseURL := host + endpoint + Headers := make(map[string]string) + key := os.Getenv("API_KEY") + Headers["Authorization"] = "Bearer " + key + Headers["X-Test"] = "Test" + var Body = []byte(`{"some": 0, "awesome": 1, "data": 3}`) + queryParams := make(map[string]string) + queryParams["hello"] = "0" + queryParams["world"] = "1" + method := rest.Post + request = rest.Request{ + Method: method, + BaseURL: baseURL, + Headers: Headers, + QueryParams: queryParams, + Body: Body, + } + response, err := rest.API(request) + if err != nil { + fmt.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +You can do this right before you call +`response, err := rest.API(request)` like so: + +```go +fmt.Printf("Request Body: %v \n", string(request.Body)) + +req, e := BuildRequestObject(request) +requestDump, err := httputil.DumpRequest(req, true) +if err != nil { + t.Errorf("Error : %v", err) +} +fmt.Printf("Request : %v \n", string(requestDump)) +``` \ No newline at end of file diff --git a/vendor/github.com/sendgrid/rest/USAGE.md b/vendor/github.com/sendgrid/rest/USAGE.md new file mode 100644 index 0000000..40def71 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/USAGE.md @@ -0,0 +1,211 @@ +# Usage + +Usage examples for SendGrid REST library + +## Initialization + +```go +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/sendgrid/rest" +) + +// Build the URL +const host = "https://api.sendgrid.com" +endpoint := "/v3/api_keys" +baseURL := host + endpoint + +// Build the request headers +key := os.Getenv("SENDGRID_API_KEY") +Headers := make(map[string]string) +Headers["Authorization"] = "Bearer " + key +``` + +## Table of Contents + +- [GET](#get) +- [DELETE](#delete) +- [POST](#post) +- [PUT](#put) +- [PATCH](#patch) + + +## GET + +#### GET Single + +```go +method = rest.Get + +// Make the API call +request = rest.Request{ + Method: method, + BaseURL: baseURL + "/" + apiKey, + Headers: Headers, +} +response, err = rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +#### GET Collection + +```go +method := rest.Get + +// Build the query parameters +queryParams := make(map[string]string) +queryParams["limit"] = "100" +queryParams["offset"] = "0" + +// Make the API call +request := rest.Request{ + Method: method, + BaseURL: baseURL, + Headers: Headers, + QueryParams: queryParams, +} +response, err := rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## DELETE + +```go +method = rest.Delete + +// Make the API call +request = rest.Request{ + Method: method, + BaseURL: baseURL + "/" + apiKey, + Headers: Headers, + QueryParams: queryParams, +} +response, err = rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Headers) +} +``` + + +## POST + +```go +method = rest.Post + +// Build the request body +var Body = []byte(`{ + "name": "My API Key", + "scopes": [ + "mail.send", + "alerts.create", + "alerts.read" + ] +}`) + +// Make the API call +request = rest.Request{ + Method: method, + BaseURL: baseURL, + Headers: Headers, + QueryParams: queryParams, + Body: Body, +} +response, err = rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} + +// Get a particular return value. +// Note that you can unmarshall into a struct if +// you know the JSON structure in advance. +b := []byte(response.Body) +var f interface{} +err = json.Unmarshal(b, &f) +if err != nil { + fmt.Println(err) +} +m := f.(map[string]interface{}) +apiKey := m["api_key_id"].(string) +``` + +## PUT + +```go +method = rest.Put + +// Build the request body +Body = []byte(`{ + "name": "A New Hope", + "scopes": [ + "user.profile.read", + "user.profile.update" + ] +}`) + +// Make the API call +request = rest.Request{ + Method: method, + BaseURL: baseURL + "/" + apiKey, + Headers: Headers, + Body: Body, +} +response, err = rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## PATCH + +```go +method = rest.Patch + +// Build the request body +Body = []byte(`{ + "name": "A New Hope" +}`) + +// Make the API call +request = rest.Request{ + Method: method, + BaseURL: baseURL + "/" + apiKey, + Headers: Headers, + Body: Body, +} +response, err = rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` \ No newline at end of file diff --git a/vendor/github.com/sendgrid/rest/docker-compose.yml b/vendor/github.com/sendgrid/rest/docker-compose.yml new file mode 100644 index 0000000..d2f8bb0 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/docker-compose.yml @@ -0,0 +1,6 @@ +version: '3' +services: + go: + build: ./docker/ + volumes: + - ./:/app diff --git a/vendor/github.com/sendgrid/rest/rest.go b/vendor/github.com/sendgrid/rest/rest.go new file mode 100644 index 0000000..8ba80ac --- /dev/null +++ b/vendor/github.com/sendgrid/rest/rest.go @@ -0,0 +1,148 @@ +// Package rest allows for quick and easy access any REST or REST-like API. +package rest + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/url" +) + +// Version represents the current version of the rest library +const Version = "2.6.2" + +// Method contains the supported HTTP verbs. +type Method string + +// Supported HTTP verbs. +const ( + Get Method = "GET" + Post Method = "POST" + Put Method = "PUT" + Patch Method = "PATCH" + Delete Method = "DELETE" +) + +// Request holds the request to an API Call. +type Request struct { + Method Method + BaseURL string // e.g. https://api.sendgrid.com + Headers map[string]string + QueryParams map[string]string + Body []byte +} + +// RestError is a struct for an error handling. +type RestError struct { + Response *Response +} + +// Error is the implementation of the error interface. +func (e *RestError) Error() string { + return e.Response.Body +} + +// DefaultClient is used if no custom HTTP client is defined +var DefaultClient = &Client{HTTPClient: &http.Client{}} + +// Client allows modification of client headers, redirect policy +// and other settings +// See https://golang.org/pkg/net/http +type Client struct { + HTTPClient *http.Client +} + +// Response holds the response from an API call. +type Response struct { + StatusCode int // e.g. 200 + Body string // e.g. {"result: success"} + Headers map[string][]string // e.g. map[X-Ratelimit-Limit:[600]] +} + +// AddQueryParameters adds query parameters to the URL. +func AddQueryParameters(baseURL string, queryParams map[string]string) string { + baseURL += "?" + params := url.Values{} + for key, value := range queryParams { + params.Add(key, value) + } + return baseURL + params.Encode() +} + +// BuildRequestObject creates the HTTP request object. +func BuildRequestObject(request Request) (*http.Request, error) { + // Add any query parameters to the URL. + if len(request.QueryParams) != 0 { + request.BaseURL = AddQueryParameters(request.BaseURL, request.QueryParams) + } + req, err := http.NewRequest(string(request.Method), request.BaseURL, bytes.NewBuffer(request.Body)) + if err != nil { + return req, err + } + for key, value := range request.Headers { + req.Header.Set(key, value) + } + _, exists := req.Header["Content-Type"] + if len(request.Body) > 0 && !exists { + req.Header.Set("Content-Type", "application/json") + } + return req, err +} + +// MakeRequest makes the API call. +func MakeRequest(req *http.Request) (*http.Response, error) { + return DefaultClient.HTTPClient.Do(req) +} + +// BuildResponse builds the response struct. +func BuildResponse(res *http.Response) (*Response, error) { + body, err := ioutil.ReadAll(res.Body) + response := Response{ + StatusCode: res.StatusCode, + Body: string(body), + Headers: res.Header, + } + res.Body.Close() // nolint + return &response, err +} + +// Deprecated: API supports old implementation +func API(request Request) (*Response, error) { + return Send(request) +} + +// Send uses the DefaultClient to send your request +func Send(request Request) (*Response, error) { + return DefaultClient.Send(request) +} + +// The following functions enable the ability to define a +// custom HTTP Client + +// MakeRequest makes the API call. +func (c *Client) MakeRequest(req *http.Request) (*http.Response, error) { + return c.HTTPClient.Do(req) +} + +// Deprecated: API supports old implementation +func (c *Client) API(request Request) (*Response, error) { + return c.Send(request) +} + +// Send will build your request, make the request, and build your response. +func (c *Client) Send(request Request) (*Response, error) { + // Build the HTTP request object. + req, err := BuildRequestObject(request) + if err != nil { + return nil, err + } + + // Build the HTTP client and make the request. + res, err := c.MakeRequest(req) + if err != nil { + return nil, err + } + + // Build Response object. + return BuildResponse(res) +} diff --git a/vendor/github.com/sendgrid/rest/twilio_sendgrid_logo.png b/vendor/github.com/sendgrid/rest/twilio_sendgrid_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..a4c22239ac0cc70e10a51f828390d713edc8a29b GIT binary patch literal 14596 zcmY+rb8xRQyER$)VFC_ zP*x5W5D>7Im8zzzrkpIdiGv-zk*R~R8NH{S<3BbK5U(fqzowm;s}Z56ovpnKwSkk{GNoLgB`{C}qZ>+um= zy1F`YGcb5~c+h*W&^tI=Ffeg(aWOD5GcYsL{bSI%c-gxedD7Xtko>pE|ED8r=3?S( z<>+eVU{ClTT_a-$H&;Gl;{O=^@Auzvx>}k4|4#NU|C83g1R4G_!oWn&$nd|~|EBW( z2jy09wle$Y{2zUOCf@&I{y*6N`0z6P$Nc{a=D#!jFZ5rk{4l%>|GRAbFtURbT0lS? z=u)CWs-D2txzIVeYJPzH>z&NMFroF>gqd)n(Atcq;HII~wy9*k!yA>|8^$f_G}3-L z#|ObWRV}5h2UT_D3@!1h7&hY|9omGrB$N=Ovci&@p-4>>ECBw=kFVTQE{_{pj}v*Q z=3%bCxu;$;E-OE84yOU%r@XZ;YoWoXoME01Q{V({Ii7ik++BEMF1eiYx~tD96!&;H zAY*T5s5a?*rS=MWY(FBp^d5(=kN7qkw8uMc7z{7u4(re8HwsR~r-!%%+AlUYxha3> zIQgM+vU5qj+z&Un2+C?h&q~Zz23JU<>>YJ)TjQ$I{hL}^*eEe#_CwcU?r1Ef(b z43<>i=T3|pQl4rb?y5r5G9UZ0IPG1qxL5hNd!M}Y*H}-utxi6@dJjLb?;Xew{fW~l z8Xi;QF4OEvYkC(~Fw|ghd$GfTkOGkqQtCZ>B7w4GW?&!WV0Fj(CLTXW^yvdqFF!Oo z^&LAfU(>^CU4CCP4KFP*I$wE3UG~w$$jGmEKgQLb22ieY8Yf+L#n@>JIG(V^V!3r( za-|IOlDD?9pXs4Us8ANCX1hFh+iiM+E`~E=pSOSWM7|$}KV;}8fjv am?oYtaj-7XJQ`J66!_Sj=jUn5tuKna3~mXZX-2A`EbIC6u^K) zASzj)z$$(2SO#>I>dMPU7TujHat(5OcWJ;J| zum+Uuq4?~8OS)6gxPpy5 z58kqGT!z~$u0Qg0GA6xQ8p9*;%eI)|8v}&QMjS8nQo_ydwwteCD%f6d=eci#p<-Qo z-~X_ENq*=Vp|IIt```P31`S1&FA}+M@V8%$#J`Z?vZ%E#KJ$*n_0ZvN-1=J`R+i#l zuC@`~!-S8sS8KNd5BhDv2LJWJP2tEOPCk0zgzHM?^hfaGwwQ>0OIavNc{74Opc|`t z;&lgvs$M^%U@q$vwM7hz2FGv|T=NKN(j$tx0)v;RseV1<-SYV-rcgh3#Mz|PHe2&i zfA9X5(Mx#k_*{}=A?Oy=@?0&xsWc9E2tXuqY)5t@URDDuXb7$JjZ!ytOUcvG#*i|@ z04LMR!h-f806Xx^^9X+^Dw=iLI*NdL)p?L3$sKq3!=aGPv-0@G(Js~KuJRF0M_tw6 zv?#csA%*mDZblifnm;O=mq>b(X^B($qtAD{-ZYlW?dGHpa5d~0^X*s2S-j@aix^@A z0}C38E5|>{WD2iTjV2CXmQqy{(MKi6Q}jhN1_PQbEi&=VT7P(o@9?h$E^@kkNI8-> zJM{=cZMUGw!<13Y!x}uBFZTIe{Hjefe%#m`rY+TZxLHI)sYX~*3~b!1sFA2k3w|_i)L8SlE|8JdkH0pD}<91(kP}{q9tZ|O|s?L2s(V* zxtHE-w3NHaEF&*AFV@4O5Etzq6u=87nnMNYYZJ|#uG*&frTLB5_~B2x^M~>0dx$iO zfRWnWI`Ew;)Q#EHQ%jZJ;T+DTk8!UlOOTCLudVKgeNOIx4)Ak|Kz4nmxiBJjZavs@ zyx(JfA=n66YFr#{VDvz*wXi&J8)}sv8%U!|eTcW<-LeBOW~2DN@8~H)C{hq|b~KS1 z)MV6LGBYRgG1tV&Q#K->on5)?X5m2+sXD%!zE^-#F|Vz1axBg5oQGUg>ykjX=W__18f zGGojP)}Y1c+*DEfy1Txm07J+t-+f=vAV@;eVS{Skoe!Z*Fnt?4zld#gJMd2t(!cM? z%V)^oa>dH@YEmYqyoDX&G@ss}Az15^by{T9jH3AKSZV>)_;;3#TeD>BY(2bj6U)<{ zbrkj?{?=<(y_?j*>L;(N;|myW1XE=H+j4q!=mEG`yUoB^>WG!&19x+SJIi9Kk2Y$M zpq||?1vky!85bvG#|L4!mwD>=Th$C9!U*aqyng`oS)rBIvFM01vO2V+vO@l)JW=(q zwbOpl;eH?)zYU?|vN1SDL{sD-3^&hZ8?rP3L3jy;UVX4ExS=D{D3Li-c(Q2Z} zN@xp>AMF>Vfym<^YPNLWRtEb<`HAFu)!@w6SlVMp7uxpDj(rC9YW4HIU+EXmy|K*7 z#n=0@ir4N*><(^kvxDeEUis2`zizG>YuR;ty3F9KP-&A}SHnkP`DOpSK9)e8=O0DI z4->5ej+T}$(}O2iot)Y)jk#;8zyl!ffsksg-OSQw$=zZFa{t%aTQ;!RW-dyot(N1} znfAjnZs(ml2m|s|keq(u_?;_Gx!Ir=q!w6i;UW}|3g*i2HJ@7MJK_pFha*mkowuv& zS^tY+np_a;fI@n@fwXF6bZ>z_4I&?#U9sA;J%>K4%9nn#o?bD_uAp@NA>q)S9EK73 zPO+Sxgrr@$d}{&Hdd%O7@kBaArR7RCLg#*Dd7OA`$)HHA`g7W*%JIM68Z&+`X`#zY z_2S*`&SUX3bT{FhHmj#hZXte-Rck41HvgS0xfFcUtr((4fZj0C7V;%j>k`MNUd(2< z%i7)9n#$=rh^X;YDSOZsQ4ctV)9jsC?EGl8!=_ypra6v)<_;UZ+ucDr3!?2YKzQ|C zO=FWkjKr2d^{*%liMAGbNc?X9(|}iKNv_-nN!)ZZAyVgLo9KG;&7b_9kV`<`>$0o} zZ@wJ{zY8z2=NKXA=d%CfP8s&&1|9HV<>rO0B*u$bOr zN2lZhdzXDm%v-2-W^97B%)`5k?O+0OD6_?8dYM()ENeL^_-4UyOim?dsj$4Dp-KU= zISg;Gs!WAi{_A*$CY~T)Rc;bNPwq>0(ssWN`Pm2V0zkiXn}R6+l{O$%=rd#UVenWv z;57nKVqQ~sjyYI55LuDO3IU}yJU=w51R1J~E72*WNVL`M&V$ zCV4|L-dHDFC|1QH|EH7lZ|=1Blj~g_wocmmSx_pUaA-G>wEG@x=gecU+sFaR)O+$C zl1Jy$Gc}2%ALy$9X)4jm>TJ8A?q8Q#6AfK4;-(4+AB&s>z0)K)n=}C_A2KJJ+mB`e zlYaN9cr%%+a^%0*QU|%mWPj0QYwAq4nEOIPjq$tPx~KDS1qL4`7`Rtxy&#Iqs0cvK zYr}(@6j~w(gUcKx$DY7pnV|Y$O}V0(^9Kw+8EEDBhk4%h%C~Qc*bN`5y(|uUeR=CC zLuP{m$nZu*b3u@U;S@6YZ)1l&1R##}gEvnt5vkLPrQGgX{KeQ{c;-18U!E;@zCB|8=X!1_d2z%$0+bE7oMs- zXRki^6=u^ochCRfdfhWxo$Uby!?VEe0=T8z5 z3Bo1{V`}DNbw#RZOFt_(y?X&sD|?%I1;U<=QOs&T&y{eU=EqWNlwA!WnLoWFfmH;p z2O$P0?a|)DIQ$MguOL>hL{%y|OqI=J`f)&Ecw>`L(390NpJK(((&ph`HYxMLowe60 zmCt0ub(KG%tP05Gb@~39W;x9*YG`_VlHQKk8^yW1d+D0GLS`J3=hCiM`@t1+mApmv zPo!0Ls~I^c93()KCu#38${qMrR4aJ-PJod-+YI*oMp;6oA%Yvo5_h%+(HM>>?r8OY z*||7MyK}OnN#(q}l4XEdkt5>d?Z;g5dm6IE#tky~47h)khN5VTxlKeG-gWTJ&V zv>m4LgNKRA;kbl7HT$dWWQuUJlhtWoMSyV>f^PDjQ---)OOf43StEL7C4r6Bfu9DBdoH5IU> zBg>QpVxr;9m!87V-IchZff+ODpNKs-%&G4__fQVA&lzJd&$(T>1s4!Y(riWeGH7sd zT!kDQe2F9b%h}AsAr45rQZ;i13kAC!>z+3GFTvF3#dr}&>dIn0JlmpD@aL4w+|ZM4DgM<5`remb>jP&_mj(w+~A?enc) z4$EJM1FVrs`LtY50HU{Pt!@XLOP$3qS~?1(C2iWOH=#$wWqe!lL8f~#P3`)NPuv8? z;)N}rY^|Y5?(a~&2s^T3IKY991>?rjz-M+rL~Je7ZTmd-Vb{*Ji~kqco75e0OHdgZ z^kSQ@>J6>EZqwG)1E>V#nCa$2NH@gpTKDa=$Y0zJGmglRGlzfD6)wJLe7oZ%d2FEuHguMCUO?mRWMAQP7OI6PPVZ#ln0q7XTSD-}-)y&guNzd5 zR`@MPK(-zxEh*_fVAY*CQ4X{PhmYED#sj4C>TWcfs`jIxn1u+o|M&6X-I>*RcuK@U*d7m=~ULWyK~>A0{OjF6jYLy{V7R1urxmaVV1G zPeq^>W{gm+)ulG)vybeq!sb%LBx{0Z70DtM!g`H{Z`N&}$NTKiy%}73dj4fc-7PB) z+q7Nqu0>CmV#2dgYygPdsk0cspSKAkXp!y=jx1SBYIyqygm%7Hjk;+@*iTQOJ~ja^ zQkJAt`~$c?qHsIxe52R$9?2qcYSuK{$fWEjixrK=A~T4+Fa@6H_=wRYE*dKs+`3fr z-_Qf6y;pCHX)xqAU^96F!kNF*m}!xrF((W~L46)Op5u+8&f9Moib{^}iX6@^!z8tO z7Hse&E$%`W4Y~FXgme;{QQ|~=3Jp&n*n59$2CCeB)UxzVEI$?lUT9Z(j?a6>75p;8 zYvQ$&6Csg;N`>C@N3glc<2jxrRkrSUY}!&+uV~rWijI~@D&+i@j$#Kk1dJtzDYdT({P3$n{0=reg;cBC_WZBpOqY^*>cUIYu;+^V&_PYEBX;=I8<)zw(9>uwWd z-h#BgacKE2@{yEV197C}@~yj@<|`J(ubCHI3DZpwbgFe0%ex3r&1dbsAtVs}0{dM; zeg-|w93!KbznL$mJKA+S{SP;e4U|o?ENI)u0X_MK0HD-ZH=TJHA{3k`95+_)o2loj zv~PL1jHYeFa`Y23lotR6ewF%YEmGT*u46GSSbp>(16{!=t`5v|2gkiqsDcIKEz`aBFDM zVYm%)j+{YBPkB}K;v$XFI1a@fw0LR$-;fvSCl`Q@9f zqCrKXY(`JJm1z6nE6P}&I`sj$0&qjR4|Ll&Uu&GU^H2dN=r3Os5)hDf216`=X6lcs zJo*bBJ+ZOYhR*680DIi^*b&#GLd4cN^X0XPnft`NK69?{M&Y&RZVwwwP9DsYWxXNe?Y0RFBKq5O{;6W=TrvdmE z`B%jvJy+-C?Ewvy{$Qq>|CXA6)DX1?Qz>T>0}I$xA&uZ1r`yPPkSxvF_6gdoqY3@P zNr?+RH?u*s`CRoP$}Atkkbl3XJt(^ZgucC7!)r=2U60K9vu@4HlegNb2}9o^w+sG* zsj^PASdDMT>WHa)GB4>JkEzi6e$a`FS28jwtuTa#vKmvvzS z=n9TlgtyQ7+ah=&%ZhL1RQ6RyFsQ?7K{|TKxS+#cqxFt>CUWj|^o7KJ&0?3&>mvjU z2Mle}Zx9xc6Gc|6soJY=p!}EAbsJ1?IRfjNf)WM!<2xe9c~w;V%VWx}?+zM9pag(^d->v#EMq9`{^DQF>Jq&9VK}wTSF%g?QWoE9?P-}V7GyJ^y9bP; zi>{(^X^=KgLlW9OK~#4d|IWvbOf5TEHMoBwH}#sc@}y0nvY0H3c#+da1kxmw#0DQy za*zQGxL!#WTWVxW0ohEZfC|Wfch-(?!bz)^I(xZV=OWn!(PxDCn*KGB;t)Bf!b1}? ztJRYz=Yt(7fiWxHXZ8~2SjzNl3=$4!=Me4hnCmGn^L;qllyIb zv>OO0lfVijUX@$8V9+`wh+k7CiYH7+ec_gCyw^MTOIbetYGk>5NV+nn!rei^;N`uF zUZ_g|=uzAwN*jc7Z?at17P~!mSXO!xv~FS-2az7W2w?p+gkY?LWFbkS7Pqr_GHiLj zzrrW3ZhZN$))m_M%OAZa)PjL_g#T{7w?cCHTD0_fHWz>2EpM|ern+#JU)kS06l*6`R7h!qwQ2o{>)YOW-wmgbWQ)QcSX0ij7Etyi_##_{B~~T z5ck;uiQf|YdFWR(5C-c#n)}>+exObAn?yU|)?mr&T=tr8z18yC?j`JBud{TGn+a$v z@%SZ@GO#fv^}B7CDYoT14l4q92d*B$^jXbuu(^15>SfyIMtiQzm3QLC518%vmNN^m z2qYt^wQU6~J3}EzV73KNh@g~*^l}KLBoOl5LHC(YV6V}>ryM6cx9;%y8LpX~Auc2u zW%v;K=FUL~g&Rbu4kAC>IZ@m8UYSLS!sf+x!748% zOq-G;Sm+~EgB2}xuax}?6Ug*gbT_u$Mc)JhKZv+`Th0#9Nd8EtBIHk5r57Hb&cCJ)#XNn2nQeXX=752y`pm z>}Be_#o^ z5U#jTM_S?%Ml-_`HPZ`PQim8w(M&Kr@e3L-+<+KyySV;#kki#uJel2jtI+0we=Uwd z5tr~fyzY_&axsXqLbkH!jfozHvoYdCkH3w@gM6|-y2&?ucTe0#c7s7V%(&Q2da#XNCY<_7n#5YXRzB+p!Dxj54@IaE0?a^C zQIhD|^3;K6`U0cyAcpc2eh9+6`Im1<0BgxR63Ln6On_2!|5Vflk>&L_Fvky(TG)Qc zHq9OFG$S%OVZT2D&K4w$gPF+yl;Z7kNmn9OVE2w{zQ46h9=j{k+VtDQ&FlkhUE2W2 zrsy=voV#wiec3Z&`_#B0nL}uc0c=W!a9gP5MKQejjHJr0{IJJpZ-zf|Xy&d)pc*KO ziV9?vRk>TE^kQ~S$+^8@2SV2f#(P|e<9FcuZh zFtnn4&S}1u9aiQJ6uDnWFH3m+@o|UHP^5ZA4aB|Fx}L5_tF{voB!Tt#Iqnf3fB#(W z;#QJg_u$r|UW2s2pLaB9-tmCFY?VK)@q^EIeBJ88$j#1A8Z9Ki15YQk?FSN;U2EDL zIjOic^2&m;h@1lQd(K6s=`fKBto%J*-|0GOKh1w=Bn zATQKVJ<4HxoAjQPZ7vzXjM>OYRcUraLjhim2pP4i;GO z>8wI7KOmo@TBjijV7tf$Wzw`sGfFlqk-9Vf$9HCu|JY=A8y0xG`&m#vzuPXXz6uKA zgGx7_de*e!BE!A+v<4KqpmI_f=Qz(oUojqdPFczp&{e47>U{cq_(c`8 z&QUga#-N+*$6Q!!VZC;x3gQRC;KNY4Ho?fNI;jjeg74BkCk0$xh?g%Ke9%Lzfp1?V zvSt4TJZt2U`n1%ek)ja;m`gG@EK#~*$;=X|3aA`z*XlRKG@MXMI09x){BqdO*u!{2 zk<&iG{HO?h(cy%@dI$)Z!G)SHb?RwmGfknWj$rU+;7w*Xg6)-~pm4 zSmR9os5!zj%O)Kd^Kslbc28}46>#?c2DKY_+k;(5z!&jzdC!tVdtDA2j?jh&75trA zmp7zZ_;8pM*$f-*8~f_;%F1Ze`lsHo2tK4Hzc+!MS5C9A?=dhK4*d-t0BcHw!uhcX zbL7?$7#gZ{#x&A=B~EOv`i2zKty-Rgl#)a{#v)d=AG>XnY^i7A-1e1=*PMGJeBw;R zQaH(EveeOH@3)m`u@wk3v~@~t<`S)HkFx8oAS9AIh*gRA`_eLpL>-p(?_uyW&=ayu zr*NV5H!d4phq{K9?x+QU8l1VSZ?uCvE~?bWW=+QyBoct0Zh)swj>CRV;zt-GnfG>?da|PAKl5=4%li$=3>W+CAB@6ARJ?~K&_$@ zZ4R$LP}-qo?2^}&-`Hw-st@(SA9Z5JHi6TUE~(g93oaN=bxI-S?_g4hrUuP76>gLM zwGZrv3U-%?ERr;pwJSb<-(24RfLLb0uN4JRsXUONiQu1B$qTEgX>(xYOYyG_AQV?K zm=l`f{U$GYVm_;|M_~{L73j}U$}Ev|6V!7v5pXdV;#QWM(E2TzEUYwURqcs^SG7?{ z$_M%>l*U_%HU;rf(d`eKYRUssU2u^kE29D|dm+a7je^{a)wy9AP*N5dYCK?PRpX$$ld5j51t_vaJC zqWu~{NJC>7MVH#{989*%`n1t2yeS({Epgg=PHKoXh0TJAD#cP&7JVJaZ7&sbtW*-o;RC9tz-a2 zwM9jgv$4Tl`53rpd{Ny@l&T1|Z7|^3D+GZ#ID%ZV=F^Yjr@v~+fY{16&3b&i#MrOZ*q27`kMaFRK#)qzE56-3&VK>39EL_~SYa}1`$pzciMdl4r0s^W^~1GFc)CXLhG zbeB6$WaW?54NScX+VsJ*e!&18y;lW`R4uQC>q9L|e6-S9##1S^9t0bxP^#I(S=P?) zb!~3vv7MQ|pDTve0KbWbI0?S}x#odj_0pFsm>fdUR5D#PIwy!pU9Wu2gEUncfe{y} zr{Rr3m6CQbf!w&TvNvc|WD`x2S{&r&0YLML*)_{bz4~eim^thgo!a83P)HVX6_D9@w zV+Vyn@`02kiL)WLW%deYZG)doW!ql@c~0f{85L%Ss~qtCQXg5Q)2)kI)J@L7k6Wlm z$>`Bd1e#YmFigg-dkkFznytA%K(C%eS^5S=JKa@)(I@{HmZUpg3)YXq{7rlb3HFn2 zlLYID$ILZV%cYr7+>q{p@?vq$L-!7y92@!Vr_e)N97dTwhEj4SG!ba$V2Q8k#*C0< zA|O)e02;8JUuGzuxwIU-fp%w?nMcZc?)98~NZ^&aSt7bgyYtven*h;Ls}Z0lMGGXA zGd+msiKCo7T4uw5s>kJWdB!Sa%{~!3e&K}>m}QO0f>@{6vt7zR?h?zC1EL!4R>#4A zgqSP#j?}eii?Lji)>{>F-5B zpf)!==)k!!Mw3_L)(-I!&mPWSQAy(aB+Q-6w)R3LH;}RytkL2_z``-m;{6@F%&~;` zZR@V_Rp`F3tSGWL2=P|MRmaN03o$Ek$Q9$&05Q%?+f3L5Qjd?KfR5+&>jjSv8J9^4 zdfL}8*E_LQwQohXUsIuyq#`4_=%VL~Pk+_3Ktx^fS!zF1X-;zKLj`#+|IGpjSb6RT zIKWhD7grWDF!xZxAt9%#_5gBa&uvauuTNKmUM^q5`*`xuWUD_wGPllC!Y{J1 zFAbh-rr!V?Y4I0N`iq%fc{sczJUr=))H53GxzR$$Jrf)&F21Umf_R-(-~v|MxLZIP z_H8E@Ml76sSpbJk9z%$!IVX!4;(me!`xzx@S()vZ=JVS3-=dm`1M##+S!NTcwz|dG zcNyYoK$StpS;&`&kRP;C&xxXN?m=>z4pzNq6}pnTOHHUc%(AxuNpCPFu{s}~=-B$_ zD-76BsS}*ie6>;lxXGA=;y|sq*$41wJOmtnh_gO7OMLvYWiFE})P7qq zof1rjYj3%jW;l|#BnqcBLpK}Dfsi7j^XU%_9&C2zD4X;M^7`|G2<3YY)^~Ai?k>N z#85bO62s4^Tieqh^*$`5(fgs3VulnrAmgQNwHX(?&D2CN&V5?`3;umUAF;phQ~jqc zZLg@Y(7h$KSq?$&$K9PFcdTHc{HROv$|9ed{)++G&)qm8T(;Ok9O-$p{jLHM>5z;$ z`3Xk(ujCb@K2-f27s0LA-@ z4A(d(Q$z;tIA5#f(}S_+kCIBZ5Q>|&cKqj|S@lqr{eX^Bw-&UB?NtJiwwlIjZv11A zcBDCOxfOx|pH)g-kFJYU9dM&(q!6=dE`J8B5+MXex@$ad9{}tV<)t z)Hy}PeUqa3kls{$1AvL0qg2e|@vIqm)C4vKG9O6Wz0H=InSUVGy3uj-Cfy}021~na zzHbW0-bx3ij$gPR_}AaD^21*)!LtV^{(TTbMriQbwNn&bJMgjcMFGq8Rjl(MO(;xD zxS)v|Xryj!lt~k#D}(E12F8@Cq?j9k1{(eq%M*Q}%=!^IfDg&5bx{IfIo_n=?6K%x}ES|MrH($;C zLm@ZneN#L{Xl8>UYhHDYo}Ne~hHOeSam{dcwyy{gP&p%ScAM2kN?x|(>gYLo1MLCe zC#W_=S#k_xST`cFldCSv4~8pwX!l+-Onqs?d2Mx6+OwX%o`)NQ_{8dCwn-xj%@UM` zpRDGymJ{n12?)i=96}@nvY9c>dHR!Td(;X{@~4u^=dK2E%{{w4 zX?gI*ks`&cJA(xi_^M#J#5Lk;ZjX7{UAe4wQ@(v0^$xBd@6Tb$eD1ROVgr#uu=yVv z;<(rB)rxSS{ZSadJ9yx>(Ltt0p-s{hb~-Ot83PxqO5zYq5<@?*lFi57+3fNutfbCq z3PlX$Osp9#=8v4wz95ye;6as!yWiiP*SyeUPdlHhHitc@(2A08Q`_gk1c{u!@!9Tj zw+5Q1g)ZuK^%bIJ<~P#LA9tp$hy^C$Vp2u%5E#?kX0nyscBzhzMglqAY%9?CLqbcc z%_J}DOoU$_ZT}=Q6_|s|7*7U#n~pw=G5%F+e)7qoZgh62Fp4p9*AiaNKl-A&66xTC zO92e1CQKsWmpG__<#Ouz)`)%N28?ff=ou?!8^B{N%D+bL9oXu(TyZc~d?YoV&I?$H zVeH@`Ad@4#{8{VrQ{ChEx^>U!dQb&{lN%E()b~p_OG+}=P^?r_OJ2(chU_PQ&-ayE z=Hi-|?c*G0+Zuc)n-Iy@6Hn&E@+Nt6VKBewY8u;3yUZ<)K>W)+3A&D0o=mjvAqc;mARHw-ga5*ghqq}cX`3|`L@~EO^32v z=(?&x^@F1{2zLd0;NDvnc+>nA1UkI#h6utiCWVX_D24B!U9TQ#H6G3$?r0-v0=S_W z8YiM&(1R~<;pzyZueZ;vfU zvBX(Ix=3$oWkS6?;)cE7J*f4SklE9B=C@b+NC)COS`uJeyy^ZP53%3@Ajd`5u=3_9TCZ>&eHz-&D!!O%hG&UqXC0{yr-QJZdcaQG)gs zC1Y4Q_FOi^)r3kO|JA8!#W^5Gu6i2K@dBvB;u)2HX3)t#kI(j=w3?ssOH;;TlF56q zjeow!);}IKnqdvgVaJ81(3R!!B;99NCXZ)W_~!tp$hZ9Njo6rAi4e*7n(J~>(|bM2 zGV4+iQUpSrqK1~zPbFOQ%pnMXl(mBX(o+p%u@21O=;g7bto6XN|GTP9NJ)Z^o&!Jb zr7U1GxDg#-##0UyD3=SF`Zi};=^F3S$PWeV2OMa{P|3Zc@+^0WiY%4o%xU+|#8$ge zO$~NCy$^39*rGZEINux-j+|UZ= zt?gGmtH}w!kOfdi4EcomoVwj@_Fngq%lXFhepMfZ4QBKtnHJG^&ZrT%K7D+el)SSK zo-GLu!nDH#K9>-<(`Q-<03KJ=E8Py+HY`A)bu%OwnH4qb&Hs*}s`NANUKTpO92-@R z%kHhDnnk*f;CQA8Pq3+tCOp&Qmof-bq%28&o#_hN zhF$nYXd1)P3T#|7m{bolb3#+21)}A+#3A#@Zg8v(A1ZxBms=9w+r(x8gYXZ1SrJDhF6VBG69@IJvQVsy+Y$ z>UhmtM;q8P!6b9NA_Y_je$(6_I{PC^TrYBl-nzrn4^MZBO8 zc$T6I7yP=7Jy<|LDUqcMcsjTfmWEG8$T{8nVNv{ZqaGFQq#19p|UOcS1C`_kg$PY)!p zpNO&q$HKCY7e+JL`Qm#cO+;fX{KYq0JpAWqLvISf`q?S2cmlU`D2MS>x#4g|(m-Ux zdfkstpV86yDAPs`1_jBhjHg(RLaaqbV5`?NUv|=lMq_|EJIb9;?7Vwi8-(t7&c)Y}Q6yRIQ!# z)=FYj3qr5mDnccFh;oLtN*zhx5Cf&)nhbOdGbCJQo|7pOArtCKeNs+@CJ>Xq7WQ%?ZdlXOSmKQO;GKY9mY2F{zW5s(`;vIeIPa}3wP;ks=J zI0-8*)gw>kkY0u?=Y}Cm82wara3O7MAao8ZKS*_?#3Em<4IRdA$}7&>=Mh-! zd9$1jrQd?1HjLwc1~#6sOaM}*Zxyj)>2kY+ZRh_m~KMQ2#=62_Q=BxB0KSY|)Y0XL=lCNzh@ z2`Ee{9D*_kBF4GCoYd4q{rkK`DFJQmOuyVy0Y34~(7*4i}rDLG?+8h zSmEeT+?(_4;ZocY_yIxK(+yxk4`64>fCKpv#L4^C8FFD`gKC78<`BQ`*3%()N=vLz zwkx3mN$A>k-;FJ*9hZ5rL$OL1FpRc9Bj6;Q97>j*QeWQ(4fVlj}$5b zhF%7fu~?XbjSu^n8TRKu1rdlVhwkP|MzGxSA9J8+ah+Q>5IzS8ba>Bj!^YTNk&eh} z>H0*VjW%Ie=E!rTnOi}C9kECwnD;Bc?Q#exm&CjBdSe>qAM4Cb2Bh&XYI1v^5Ldfw4?XT2EpIRL;SnVdqQ6%(i9yIwL ntv~W6Y3X9dsR>Z;|A4AGw<(;8DtG?pt)Y~dylAbkVetO}CY4rB literal 0 HcmV?d00001 diff --git a/vendor/github.com/sendgrid/sendgrid-go/.env_sample b/vendor/github.com/sendgrid/sendgrid-go/.env_sample new file mode 100644 index 0000000..30857f4 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/.env_sample @@ -0,0 +1 @@ +export SENDGRID_API_KEY='' \ No newline at end of file diff --git a/vendor/github.com/sendgrid/sendgrid-go/.gitignore b/vendor/github.com/sendgrid/sendgrid-go/.gitignore new file mode 100644 index 0000000..05a34cd --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/.gitignore @@ -0,0 +1,8 @@ +.DS_Store +temp.go +*.swp +.env +coverage.txt +sendgrid.env +.vscode +prism* diff --git a/vendor/github.com/sendgrid/sendgrid-go/.travis.yml b/vendor/github.com/sendgrid/sendgrid-go/.travis.yml new file mode 100644 index 0000000..0366894 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/.travis.yml @@ -0,0 +1,28 @@ +language: go + +env: + - version=1.7 + - version=1.8 + - version=1.9 + - version=1.10 + - version=1.11 + - version=1.12 + - version=1.13 + - version=1.14 + - version=1.15 + - version=latest + +script: + - make test-docker + +after_success: + - bash <(curl -s https://codecov.io/bash) + +notifications: + slack: + if: branch = main + on_pull_requests: false + on_success: never + on_failure: change + rooms: + - secure: GZFYycVurLTjSl+YLt1bJM5N0Gw1JJGD7/eye8HmdFbdPQD+cadUWAquM4DV84In+rvRqgYaMtLO62POj9PynzQm1Xvmk1jAoQVLQq+UUsS0hKpnRsxhv1Yp6k5Avi8RVxfeAq0inEopbwtIdbrcwNQYFFsyAnj3hTKSMOgz7Ks= diff --git a/vendor/github.com/sendgrid/sendgrid-go/CHANGELOG.md b/vendor/github.com/sendgrid/sendgrid-go/CHANGELOG.md new file mode 100644 index 0000000..633d69d --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/CHANGELOG.md @@ -0,0 +1,270 @@ +# Change Log +All notable changes to this project will be documented in this file. + +[2020-11-18] Version 3.7.2 +-------------------------- +**Library - Docs** +- [PR #281](https://github.com/sendgrid/sendgrid-go/pull/281): Email activity API Documentation. Thanks to [@dhoeric](https://github.com/dhoeric)! + + +[2020-11-05] Version 3.7.1 +-------------------------- +**Library - Test** +- [PR #411](https://github.com/sendgrid/sendgrid-go/pull/411): ensure source files are properly formatted. Thanks to [@childish-sambino](https://github.com/childish-sambino)! + +**Library - Fix** +- [PR #415](https://github.com/sendgrid/sendgrid-go/pull/415): Rename LICENSE.md to LICENSE. Thanks to [@coolaj86](https://github.com/coolaj86)! + +**Library - Docs** +- [PR #282](https://github.com/sendgrid/sendgrid-go/pull/282): Update examples using inline attachment with ContentID. Thanks to [@anchepiece](https://github.com/anchepiece)! + + +[2020-10-14] Version 3.7.0 +-------------------------- +**Library - Feature** +- [PR #410](https://github.com/sendgrid/sendgrid-go/pull/410): allow personalization of From name and email for each recipient. Thanks to [@JenniferMah](https://github.com/JenniferMah)! + +**Library - Fix** +- [PR #272](https://github.com/sendgrid/sendgrid-go/pull/272): Accept empty html on Email helper NewSingleEmail(). Thanks to [@tjun](https://github.com/tjun)! + + +[2020-09-28] Version 3.6.4 +-------------------------- +**Library - Fix** +- [PR #408](https://github.com/sendgrid/sendgrid-go/pull/408): don't wrap names in double-quotes. Thanks to [@childish-sambino](https://github.com/childish-sambino)! + + +[2020-09-02] Version 3.6.3 +-------------------------- +**Library - Docs** +- [PR #287](https://github.com/sendgrid/sendgrid-go/pull/287): Correct *.md files using Grammarly. Thanks to [@vkartik97](https://github.com/vkartik97)! + + +[2020-08-19] Version 3.6.2 +-------------------------- +**Library - Chore** +- [PR #402](https://github.com/sendgrid/sendgrid-go/pull/402): update GitHub branch references to use HEAD. Thanks to [@thinkingserious](https://github.com/thinkingserious)! + + +[2020-08-05] Version 3.6.1 +-------------------------- +**Library - Docs** +- [PR #329](https://github.com/sendgrid/sendgrid-go/pull/329): Remove references to legacy "Whitelabel" Verbiage. Thanks to [@crweiner](https://github.com/crweiner)! + +**Library - Fix** +- [PR #401](https://github.com/sendgrid/sendgrid-go/pull/401): use the last version of testify that works for older go versions. Thanks to [@childish-sambino](https://github.com/childish-sambino)! + +**Library - Chore** +- [PR #400](https://github.com/sendgrid/sendgrid-go/pull/400): migrate to new default sendgrid-oai branch. Thanks to [@eshanholtz](https://github.com/eshanholtz)! + + +[2020-05-14] Version 3.6.0 +-------------------------- +**Library - Feature** +- [PR #392](https://github.com/sendgrid/sendgrid-go/pull/392): add support for Twilio Email. Thanks to [@childish-sambino](https://github.com/childish-sambino)! +- [PR #390](https://github.com/sendgrid/sendgrid-go/pull/390): add function for signature verification. Thanks to [@brpat07](https://github.com/brpat07)! +- [PR #389](https://github.com/sendgrid/sendgrid-go/pull/389): add support and example for secure webhook feature. Thanks to [@brpat07](https://github.com/brpat07)! + +**Library - Fix** +- [PR #388](https://github.com/sendgrid/sendgrid-go/pull/388): refactor and fix inbound email handling. Thanks to [@eshanholtz](https://github.com/eshanholtz)! +- [PR #391](https://github.com/sendgrid/sendgrid-go/pull/391): migrate to common prism setup. Thanks to [@childish-sambino](https://github.com/childish-sambino)! + + +[2020-04-01] Version 3.5.4 +-------------------------- +**Library - Docs** +- [PR #386](https://github.com/sendgrid/sendgrid-go/pull/386): support verbiage for login issues. Thanks to [@adamchasetaylor](https://github.com/adamchasetaylor)! + + +[2020-02-19] Version 3.5.3 +-------------------------- +**Library - Docs** +- [PR #295](https://github.com/sendgrid/sendgrid-go/pull/295): Update documentation for retrieving a list of all templates. Thanks to [@renshuki](https://github.com/renshuki)! + + +[2020-02-05] Version 3.5.2 +-------------------------- +**Library - Docs** +- [PR #309](https://github.com/sendgrid/sendgrid-go/pull/309): Fixed link to bug report template. Thanks to [@alxshelepenok](https://github.com/alxshelepenok)! + +**Library - Chore** +- [PR #372](https://github.com/sendgrid/sendgrid-go/pull/372): Add current Go versions to Travis. Thanks to [@pangaunn](https://github.com/pangaunn)! + + +[2020-01-30] Version 3.5.1 +-------------------------- +**Library - Chore** +- [PR #382](https://github.com/sendgrid/sendgrid-go/pull/382): clean up prism installation. Thanks to [@childish-sambino](https://github.com/childish-sambino)! +- [PR #379](https://github.com/sendgrid/sendgrid-go/pull/379): prep repo for automation. Thanks to [@thinkingserious](https://github.com/thinkingserious)! + +**Library - Docs** +- [PR #380](https://github.com/sendgrid/sendgrid-go/pull/380): baseline all the templated markdown docs. Thanks to [@childish-sambino](https://github.com/childish-sambino)! +- [PR #348](https://github.com/sendgrid/sendgrid-go/pull/348): fix usage link in README. Thanks to [@BogdanHabic](https://github.com/BogdanHabic)! + +**Library - Fix** +- [PR #353](https://github.com/sendgrid/sendgrid-go/pull/353): double quote escape names with special characters. Thanks to [@haleyrc](https://github.com/haleyrc)! + + +[2019-06-13] Version 3.5.0 +-------------------------- +### Added +- [PR #117](https://github.com/sendgrid/sendgrid-go/pull/117): Add release notifications. Big thanks to [Gabriel Krell](https://github.com/gabrielkrell) for the PR! +- [PR #118](https://github.com/sendgrid/sendgrid-go/pull/118): Update USE_CASES.md formatting. Big thanks to [Kyle Roberts](https://github.com/kylearoberts) for the PR! +- [PR #123](https://github.com/sendgrid/sendgrid-go/pull/123): Update USE_CASES.md with substitutions and sections. Big thanks to [Kyle Roberts](https://github.com/kylearoberts) for the PR! +- [PR #111](https://github.com/sendgrid/sendgrid-go/pull/111): Add examples from "Personalizations Example Index" to USE_CASES.md. Big thanks to [Christopher Li](https://github.com/LiYChristopher) for the PR! +- [PR #127](https://github.com/sendgrid/sendgrid-go/pull/127): Update Travis YML to use newer go versions. Big thanks to [Tariq Ibrahim](https://github.com/tariq1890) for the PR! +- [PR #143](https://github.com/sendgrid/sendgrid-go/pull/143): Added a warning about the error return from sendgrid.API in TROUBLESHOOTING.md. Big thanks to [Leandro Lugaresi](https://github.com/leandro-lugaresi) for the PR! +- [PR #128](https://github.com/sendgrid/sendgrid-go/pull/128): Added a Mail Refactor proposal. Big thanks to [Suchit Parikh](https://github.com/suchitparikh) for the PR! +- [PR #153](https://github.com/sendgrid/sendgrid-go/pull/153): Added Code of Conduct. Big thanks to [Rubemlrm](https://github.com/Rubemlrm) for the PR! +- [PR #139](https://github.com/sendgrid/sendgrid-go/pull/139): Added attachment use case examples. Big thanks to [Christopher Li](https://github.com/LiYChristopher) for the PR! +- [PR #165](https://github.com/sendgrid/sendgrid-go/pull/165): Update USE_CASES.md with statistics and domain whitelabel examples. Big thanks to [Nexus Web Development](https://github.com/NexWeb) for the PR! +- [PR #186](https://github.com/sendgrid/sendgrid-go/pull/186): Moved logo to top and added more badges. Big thanks to [Alex](https://github.com/myzeprog) for the PR! +- [PR #187](https://github.com/sendgrid/sendgrid-go/pull/187): Made the README/Doc sections more SEO friendly. Big thanks to [Alex](https://github.com/myzeprog) for the PR! +- [PR #188](https://github.com/sendgrid/sendgrid-go/pull/188): Add Go specific badges to the README. Big thanks to [Thorsten Schifferdecker](https://github.com/curx) for the PR! +- [PR #181](https://github.com/sendgrid/sendgrid-go/pull/181): Add review request body section to TROUBLESHOOTING.md. Big thanks to [Alex](https://github.com/myzeprog) for the PR! +- [PR #363](https://github.com/sendgrid/sendgrid-go/pull/363): Twilio branding + CLA updates. +- [PR #217](https://github.com/sendgrid/sendgrid-go/pull/217): Initialize map on declaration (round 2). Big thanks to [Noah Santschi-Cooney](https://github.com/Strum355) for the PR! +- [PR #216](https://github.com/sendgrid/sendgrid-go/pull/216): Initialize map on declaration. Big thanks to [Noah Santschi-Cooney](https://github.com/Strum355) for the PR! +- [PR #210](https://github.com/sendgrid/sendgrid-go/pull/210): Add github PR template. Big thanks to [Alex](https://github.com/pushkyn) for the PR! +- [PR #225](https://github.com/sendgrid/sendgrid-go/pull/225): Add test for license date range. Big thanks to [Mansour Rahimi](https://github.com/m4ns0ur) for the PR! +- [PR #214](https://github.com/sendgrid/sendgrid-go/pull/214): Add a .env_sample file, update gitignore, update README.md. Big thanks to [thepriefy](https://github.com/thepriefy) for the PR! +- [PR #137](https://github.com/sendgrid/sendgrid-go/pull/137): Add Dockerize. Big thanks to [Eric Ho](https://github.com/dhoeric) for the PR! +- [PR #200](https://github.com/sendgrid/sendgrid-go/pull/200): Helping get golint to 100%. Big thanks to [Paul Lovato](https://github.com/Cleanse) for the PR! +- [PR #234](https://github.com/sendgrid/sendgrid-go/pull/234): Add announcement: Software Engineer role. Big thanks to [Marghodk](https://github.com/Marghodk) for the PR! +- [PR #228](https://github.com/sendgrid/sendgrid-go/pull/228): Include Gometalinter in Travis CI build. Big thanks to [Vasko Zdravevski](https://github.com/vaskoz) for the PR! +- [PR #229](https://github.com/sendgrid/sendgrid-go/pull/229): Add test for checking specific repo files. Big thanks to [Mansour Rahimi](https://github.com/m4ns0ur) for the PR! +- [PR #231](https://github.com/sendgrid/sendgrid-go/pull/231): Adds codecov. Big thanks to [Charlie Lewis](https://github.com/cglewis) for the PR! +- [PR #155](https://github.com/sendgrid/sendgrid-go/pull/155): Added optional rate limit handling. Big thanks to [Andy Trimble](https://github.com/andy-trimble) for the PR! +- [PR #250](https://github.com/sendgrid/sendgrid-go/pull/250): Exclude time.Until lint error until we stop supporting Go 1.7 and lower. Big thanks to [Dustin Mowcomber](https://github.com/dmowcomber) for the PR! +- [PR #264](https://github.com/sendgrid/sendgrid-go/pull/264): Readability update. Big thanks to [Anshul Singhal](https://github.com/af4ro) for the PR! +- [PR #263](https://github.com/sendgrid/sendgrid-go/pull/263): Dynamic template support. Big thanks to [Devin Chasanoff](https://github.com/devchas) for the PR! +- [PR #268](https://github.com/sendgrid/sendgrid-go/pull/268): mail: add test case on empty HTML to NewSingleEmail. Big thanks to [Arthur Silva](https://github.com/arxdsilva) for the PR! +- [PR #269](https://github.com/sendgrid/sendgrid-go/pull/269): use testify. Big thanks to [Arthur Silva](https://github.com/arxdsilva) for the PR! +- [PR #280](https://github.com/sendgrid/sendgrid-go/pull/280): helpers/mail: add testify to new test. Big thanks to [Arthur Silva](https://github.com/arxdsilva) for the PR! +- [PR #194](https://github.com/sendgrid/sendgrid-go/pull/194): Allows users to submit rfc822 formatted email addresses. Big thanks to [Tariq Ibrahim](https://github.com/tariq1890) for the PR! +- [PR #197](https://github.com/sendgrid/sendgrid-go/pull/197): Make Getenv("message") parameter more professional. Big thanks to [Nafis Faysal](https://github.com/nafisfaysal) for the PR! +- [PR #238](https://github.com/sendgrid/sendgrid-go/pull/238): Added Code Review to Contributing.md. Big thanks to [Manjiri Tapaswi](https://github.com/mptap) for the PR! +- [PR #293](https://github.com/sendgrid/sendgrid-go/pull/293): Use case directory structure update. Big thanks to [Arshad Kazmi](https://github.com/arshadkazmi42) for the PR! +- [PR #243](https://github.com/sendgrid/sendgrid-go/pull/243): Add the ability to impersonate a subuser. Big thanks to [Boris M](https://github.com/denwwer) for the PR! +- [PR #327](https://github.com/sendgrid/sendgrid-go/pull/327): Update prerequisites verbiage. Big thanks to [Rishabh](https://github.com/Rishabh04-02) for the PR! + +### Fixed +- [PR #141](https://github.com/sendgrid/sendgrid-go/pull/141): Fix TROUBLESHOOTING.md typo. Big thanks to [Cícero Pablo](https://github.com/ciceropablo) for the PR! +- [PR #149](https://github.com/sendgrid/sendgrid-go/pull/149): Various typo fixes. Big thanks to [Ivan](https://github.com/janczer) for the PR! +- [PR #146](https://github.com/sendgrid/sendgrid-go/pull/146): USAGE.MD - Various grammar fixes. Big thanks to [Necroforger](https://github.com/Necroforger) for the PR! +- [PR #121](https://github.com/sendgrid/sendgrid-go/pull/121): Go lint fixes. Big thanks to [Srinivas Iyengar](https://github.com/srini156) for the PR! +- [PR #163](https://github.com/sendgrid/sendgrid-go/pull/163): Go vet fixes. Big thanks to [Vasko Zdravevski](https://github.com/vaskoz) for the PR! +- [PR #191](https://github.com/sendgrid/sendgrid-go/pull/191): Spelling corrections in md and method names. Big thanks to [Brandon Smith](https://github.com/brandon93s) for the PR! +- [PR #202](https://github.com/sendgrid/sendgrid-go/pull/202): Typos. Big thanks to [Varun Dey](https://github.com/varundey) for the PR! +- [PR #148](https://github.com/sendgrid/sendgrid-go/pull/148): Fix golint and gofmt errors. Big thanks to [Prateek Pandey](https://github.com/prateekpandey14) for the PR! +- [PR #198](https://github.com/sendgrid/sendgrid-go/pull/198): Fix wrong mail helpers example directory in README. Big thanks to [Kher Yee](https://github.com/tkbky) for the PR! +- [PR #196](https://github.com/sendgrid/sendgrid-go/pull/196): Fix for gocyclo - reducing cyclomatic complexity. Big thanks to [Srinivas Iyengar](https://github.com/srini156) for the PR! +- [PR #223](https://github.com/sendgrid/sendgrid-go/pull/223): Update LICENSE - set correct year. Big thanks to [Alex](https://github.com/pushkyn) for the PR! +- [PR #215](https://github.com/sendgrid/sendgrid-go/pull/215): Megacheck found 2 small issues. Big thanks to [Vasko Zdravevski](https://github.com/vaskoz) for the PR! +- [PR #224](https://github.com/sendgrid/sendgrid-go/pull/224): Fix spelling and formatting of comments in mail_v3.go. Big thanks to [Catlinman](https://github.com/catlinman) for the PR! +- [PR #248](https://github.com/sendgrid/sendgrid-go/pull/248): Fix license and file tests. Big thanks to [Dustin Mowcomber](https://github.com/dmowcomber) for the PR! +- [PR #252](https://github.com/sendgrid/sendgrid-go/pull/252): Add coverage.txt to .gitignore. Big thanks to [Dustin Mowcomber](https://github.com/dmowcomber) for the PR! +- [PR #261](https://github.com/sendgrid/sendgrid-go/pull/261): README tag update and linter error fix. Big thanks to [Anshul Singhal](https://github.com/af4ro) for the PR! +- [PR #273](https://github.com/sendgrid/sendgrid-go/pull/273): Exclude examples from go tests, Travis Job. Big thanks to [Fares Rihani](https://github.com/anchepiece) for the PR! +- [PR #278](https://github.com/sendgrid/sendgrid-go/pull/278): GoReportCard fixes to reach 100%. Big thanks to [Vasko Zdravevski](https://github.com/vaskoz) for the PR! +- [PR #232](https://github.com/sendgrid/sendgrid-go/pull/232): Update CONTRIBUTING.md formatting. Big thanks to [thepriefy](https://github.com/thepriefy) for the PR! +- [PR #258](https://github.com/sendgrid/sendgrid-go/pull/258): gofmt fixes. Big thanks to [ia](https://github.com/whilei) for the PR! +- [PR #292](https://github.com/sendgrid/sendgrid-go/pull/292): Fix broken link. Big thanks to [pangaunn](https://github.com/pangaunn) for the PR! +- [PR #324](https://github.com/sendgrid/sendgrid-go/pull/324): inbound: Fix readme links. Big thanks to [Arthur Silva](https://github.com/arxdsilva) for the PR! +- [PR #339](https://github.com/sendgrid/sendgrid-go/pull/339): Fix Travis builds. Big thanks to [Kevin Gillette](https://github.com/extemporalgenome) for the PR! +- [PR #321](https://github.com/sendgrid/sendgrid-go/pull/321): Clean up Dockerfile. Big thanks to [gy741](https://github.com/gy741) for the PR! + +## [3.4.1] - 2017-07-03 +### Added +- [Pull #116](https://github.com/sendgrid/sendgrid-go/pull/116): Fixing mimetypes in the NewSingleEmail function +- Big thanks to [Depado](https://github.com/Depado) for the pull request! + +## [3.4.0] - 2017-06-14 +### Added +- [Pull #96](https://github.com/sendgrid/sendgrid-go/pull/96): Send a Single Email to a Single Recipient +- Big thanks to [Oranagwa Osmond](https://github.com/andela-ooranagwa) for the pull request! + +## [3.3.1] - 2016-10-18 +### Fixed +- [Pull #95](https://github.com/sendgrid/sendgrid-go/pull/95): Use log instead of fmt for printing errors +- Big thanks to [Gábor Lipták](https://github.com/gliptak) for the pull request! + +## [3.3.0] - 2016-10-10 +### Added +- [Pull #92](https://github.com/sendgrid/sendgrid-go/pull/92): Inbound Parse Webhook support +- Checkout the [README](helpers/inbound) for details. + +## [3.2.3] - 2016-10-10 +### Added +- [Pull #91](https://github.com/sendgrid/sendgrid-go/pull/91): Simplified code in mail helper +- Big thanks to [Roberto Ortega](https://github.com/berto) for the pull request! + +## [3.2.2] - 2016-09-08 +### Added +- Merged pull request: [update prismPath and update prism binary](https://github.com/sendgrid/sendgrid-go/pull/80) +- Special thanks to [Tom Pytleski](https://github.com/pytlesk4) for the pull request! + +## [3.2.1] - 2016-08-24 +### Added +- Table of Contents in the README +- Added a [USE_CASES.md](USE_CASES.md) section, with the first use case example for transactional templates + +## [3.2.0] - 2016-08-17 +### Added +- Merged pull request: [make contents var args in NewV3MailInit](https://github.com/sendgrid/sendgrid-go/pull/75) +- The `NewV3MailInit` [Mail Helper](helpers/mail) constructor can now take in multiple content objects. +- Thanks to [Adrien Delorme](https://github.com/azr) for the pull request! + +## [3.1.0] - 2016-07-28 +- Dependency update to v2.2.0 of [sendGrid-rest](https://github.com/sendgrid/rest/releases/tag/v2.2.0) +- Pull [#9](https://github.com/sendgrid/rest/pull/9): Allow for setting a custom HTTP client +- [Here](https://github.com/sendgrid/rest/blob/HEAD/rest_test.go#L127) is an example of usage +- This enables usage of the [sendgrid-go library](https://github.com/sendgrid/sendgrid-go) on [Google App Engine (GAE)](https://cloud.google.com/appengine/) +- Special thanks to [Chris Broadfoot](https://github.com/broady) and [Sridhar Venkatakrishnan](https://github.com/sridharv) for providing code and feedback! + +## [3.0.6] - 2016-07-26 ## +### Added +- [Troubleshooting](TROUBLESHOOTING.md) section + +## [3.0.5] - 2016-07-20 +### Added +- README updates +- Update introduction blurb to include information regarding our forward path +- Update the v3 /mail/send example to include non-helper usage +- Update the generic v3 example to include non-fluent interface usage + +## [3.0.4] - 2016-07-12 +### Added +- Update docs, unit tests and examples to include Sender ID +### Fixed +- Missing example query params for the examples + +## [3.0.3] - 2016-07-08 +### Fixed +- [Can't disable subscription tracking #68](https://github.com/sendgrid/sendgrid-go/issues/68) + +## [3.0.2] - 2016-07-07 +### Added +- Tests now mocked automatically against [prism](https://stoplight.io/prism/) + +## [3.0.1] - 2016-07-05 +### Added +- Accept: application/json header per https://sendgrid.com/docs/API_Reference/Web_API_v3/How_To_Use_The_Web_API_v3/requests.html + +### Updated +- Content based on our updated [Swagger/OAI doc](https://github.com/sendgrid/sendgrid-oai) + +## [3.0.0] - 2016-06-14 +### Added +- Breaking change to support the v3 Web API +- New HTTP client +- v3 Mail Send helper + +## [2.0.0] - 2015-05-02 +### Changed +- Fixed a nasty bug with orphaned connections but drops support for Go versions < 1.3. Thanks [trinchan](https://github.com/sendgrid/sendgrid-go/pull/24) + +## [1.2.0] - 2015-04-27 +### Added +- Support for API keys + diff --git a/vendor/github.com/sendgrid/sendgrid-go/CODE_OF_CONDUCT.md b/vendor/github.com/sendgrid/sendgrid-go/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..2f0727e --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/CODE_OF_CONDUCT.md @@ -0,0 +1,73 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at open-source@twilio.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org diff --git a/vendor/github.com/sendgrid/sendgrid-go/CONTRIBUTING.md b/vendor/github.com/sendgrid/sendgrid-go/CONTRIBUTING.md new file mode 100644 index 0000000..2d5366d --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/CONTRIBUTING.md @@ -0,0 +1,176 @@ +Hello! Thank you for choosing to help contribute to one of the Twilio SendGrid open-source libraries. There are many ways you can contribute and help is always welcome. We simply ask that you follow the following contribution policies. + +All third-party contributors acknowledge that any contributions they provide will be made under the same open-source license that the open-source project is provided under. + +- [Feature Request](#feature-request) +- [Submit a Bug Report](#submit-a-bug-report) +- [Improvements to the Codebase](#improvements-to-the-codebase) +- [Understanding the Code Base](#understanding-the-codebase) +- [Testing](#testing) +- [Style Guidelines & Naming Conventions](#style-guidelines-and-naming-conventions) +- [Creating a Pull Request](#creating-a-pull-request) +- [Code Reviews](#code-reviews) + +There are a few ways to contribute, which we'll enumerate below: + + +## Feature Request + +If you'd like to make a feature request, please read this section. + +The GitHub issue tracker is the preferred channel for library feature requests, but please respect the following restrictions: + +- Please **search for existing issues** in order to ensure we don't have duplicate bugs/feature requests. +- Please be respectful and considerate of others when commenting on issues + + +## Submit a Bug Report + +Note: DO NOT include your credentials in ANY code examples, descriptions, or media you make public. + +A software bug is a demonstrable issue in the code base. In order for us to diagnose the issue and respond as quickly as possible, please add as much detail as possible into your bug report. + +Before you decide to create a new issue, please try the following: + +1. Check the Github issues tab if the identified issue has already been reported, if so, please add a +1 to the existing post. +2. Update to the latest version of this code and check if the issue has already been fixed +3. Copy and fill in the Bug Report Template we have provided below + +### Please use our Bug Report Template + +In order to make the process easier, we've included a [sample bug report template](ISSUE_TEMPLATE.md). + + +## Improvements to the Codebase + +We welcome direct contributions to the sendgrid-go code base. Thank you! + +### Development Environment ### + +#### Install and Run Locally #### + +##### Prerequisites ##### + +- Go 1.6 +- [rest](https://github.com/sendgrid/rest) + +##### Initial setup: ##### + +```bash +git clone https://github.com/sendgrid/sendgrid-go.git +cd sendgrid-go +``` + +### Environment Variables + +First, get your free Twilio SendGrid account [here](https://sendgrid.com/free?source=sendgrid-go). + +Next, update your environment with your [SENDGRID_API_KEY](https://app.sendgrid.com/settings/api_keys). + +```bash +echo "export SENDGRID_API_KEY='YOUR_API_KEY'" > sendgrid.env +echo "sendgrid.env" >> .gitignore +source ./sendgrid.env +``` + +##### Execute: ##### + +* Check out the documentation for [Web API v3 endpoints](https://sendgrid.com/docs/API_Reference/Web_API_v3/index.html). +* Review the corresponding [example](examples). +* Update the file + +```bash +go run +``` + + +## Understanding the Code Base + +**/examples** + +Working examples that demonstrate usage. + +**sendgrid.go** + +The main function that does the heavy lifting (and external entry point) is `API`. + + +## Testing + +All PRs require passing tests before the PR will be reviewed. All test files are in [`sendgrid_test.go`](sendgrid_test.go). For the purposes of contributing to this repo, please update the [`sendgrid_test.go`](sendgrid_test.go) file with unit tests as you modify the code. + +The integration tests require a Twilio SendGrid mock API in order to execute. We've simplified setting this up using Docker to run the tests. You will just need [Docker Desktop](https://docs.docker.com/get-docker/) and `make`. + +Once these are available, simply execute the Docker test target to run all tests: `make test-docker`. This command can also be used to open an interactive shell into the container where this library is installed. To start a *bash* shell for example, use this command: `command=bash make test-docker`. + + +## Style Guidelines & Naming Conventions + +Generally, we follow the style guidelines as suggested by the official language. However, we ask that you conform to the styles that already exist in the library. If you wish to deviate, please explain your reasoning. + +- [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) + +Please run your code through: + +- [fmt](https://blog.golang.org/go-fmt-your-code) + + +## Creating a Pull Request + +1. [Fork](https://help.github.com/fork-a-repo/) the project, clone your fork, + and configure the remotes: + + ```bash + # Clone your fork of the repo into the current directory + git clone https://github.com/sendgrid/sendgrid-go + + # Navigate to the newly cloned directory + cd sendgrid-go + + # Assign the original repo to a remote called "upstream" + git remote add upstream https://github.com/sendgrid/sendgrid-go + ``` + +2. If you cloned a while ago, get the latest changes from upstream: + + ```bash + git checkout + git pull upstream + ``` + +3. Create a new topic branch (off the main project development branch) to + contain your feature, change, or fix: + + ```bash + git checkout -b + ``` + +4. Commit your changes in logical chunks. Please adhere to these [git commit + message guidelines](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) + or your code is unlikely to be merged into the main project. Use Git's + [interactive rebase](https://help.github.com/articles/interactive-rebase) + feature to tidy up your commits before making them public. + +4a. Create tests. + +4b. Create or update the example code that demonstrates the functionality of this change to the code. + +5. Locally merge (or rebase) the upstream development branch into your topic branch: + + ```bash + git pull [--rebase] upstream main + ``` + +6. Push your topic branch up to your fork: + + ```bash + git push origin + ``` + +7. [Open a Pull Request](https://help.github.com/articles/using-pull-requests/) + with a clear title and description against the `main` branch. All tests must be passing before we will review the PR. + + +## Code Reviews + +If you can, please look at open PRs and review them. Give feedback and help us merge these PRs much faster! If you don't know how, Github has some [great information on how to review a Pull Request](https://help.github.com/articles/about-pull-request-reviews/). diff --git a/vendor/github.com/sendgrid/sendgrid-go/Dockerfile b/vendor/github.com/sendgrid/sendgrid-go/Dockerfile new file mode 100644 index 0000000..221541e --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/Dockerfile @@ -0,0 +1,17 @@ +ARG version=latest +FROM golang:$version + +ENV GODEBUG 'x509ignoreCN=0' + +COPY prism/prism/nginx/cert.crt /usr/local/share/ca-certificates/cert.crt +RUN update-ca-certificates + +WORKDIR /go/src/github.com/sendgrid/sendgrid-go +COPY . . + +RUN make install + +# Use the last version of testify that works for older go versions, and then +# re-install to update dependencies. +RUN (cd /go/src/github.com/stretchr/testify && git checkout v1.6.0) +RUN make install diff --git a/vendor/github.com/sendgrid/sendgrid-go/FIRST_TIMERS.md b/vendor/github.com/sendgrid/sendgrid-go/FIRST_TIMERS.md new file mode 100644 index 0000000..e53415c --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/FIRST_TIMERS.md @@ -0,0 +1,79 @@ +# How To Contribute to Twilio SendGrid Repositories via GitHub +Contributing to the Twilio SendGrid repositories is easy! All you need to do is find an open issue (see the bottom of this page for a list of repositories containing open issues), fix it and submit a pull request. Once you have submitted your pull request, the team can easily review it before it is merged into the repository. + +To make a pull request, follow these steps: + +1. Log into GitHub. If you do not already have a GitHub account, you will have to create one in order to submit a change. Click the Sign up link in the upper right-hand corner to create an account. Enter your username, password, and email address. If you are an employee of Twilio SendGrid, please use your full name with your GitHub account and enter Twilio SendGrid as your company so we can easily identify you. + + + +2. __[Fork](https://help.github.com/fork-a-repo/)__ the [sendgrid-go](https://github.com/sendgrid/sendgrid-go) repository: + + + +3. __Clone__ your fork via the following commands: + +```bash +# Clone your fork of the repo into the current directory +git clone https://github.com/your_username/sendgrid-go +# Navigate to the newly cloned directory +cd sendgrid-go +# Assign the original repo to a remote called "upstream" +git remote add upstream https://github.com/sendgrid/sendgrid-go +``` + +> Don't forget to replace *your_username* in the URL by your real GitHub username. + +4. __Create a new topic branch__ (off the main project development branch) to contain your feature, change, or fix: + +```bash +git checkout -b +``` + +5. __Commit your changes__ in logical chunks. + +Please adhere to these [git commit message guidelines](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) or your code is unlikely be merged into the main project. Use Git's [interactive rebase](https://help.github.com/articles/interactive-rebase) feature to tidy up your commits before making them public. Probably you will also have to create tests (if needed) or create or update the example code that demonstrates the functionality of this change to the code. + +6. __Locally merge (or rebase)__ the upstream development branch into your topic branch: + +```bash +git pull [--rebase] upstream main +``` + +7. __Push__ your topic branch up to your fork: + +```bash +git push origin +``` + +8. __[Open a Pull Request](https://help.github.com/articles/creating-a-pull-request/#changing-the-branch-range-and-destination-repository/)__ with a clear title and description against the `main` branch. All tests must be passing before we will review the PR. + +## Important notice + +Before creating a pull request, make sure that you respect the repository's constraints regarding contributions. You can find them in the [CONTRIBUTING.md](CONTRIBUTING.md) file. + +## Repositories with Open, Easy, Help Wanted, Issue Filters + +* [Python SDK](https://github.com/sendgrid/sendgrid-python/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [PHP SDK](https://github.com/sendgrid/sendgrid-php/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [C# SDK](https://github.com/sendgrid/sendgrid-csharp/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Ruby SDK](https://github.com/sendgrid/sendgrid-ruby/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Node.js SDK](https://github.com/sendgrid/sendgrid-nodejs/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Java SDK](https://github.com/sendgrid/sendgrid-java/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Go SDK](https://github.com/sendgrid/sendgrid-go/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Python SMTPAPI Client](https://github.com/sendgrid/smtpapi-python/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [PHP SMTPAPI Client](https://github.com/sendgrid/smtpapi-php/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [C# SMTPAPI Client](https://github.com/sendgrid/smtpapi-csharp/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Ruby SMTPAPI Client](https://github.com/sendgrid/smtpapi-ruby/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Node.js SMTPAPI Client](https://github.com/sendgrid/smtpapi-nodejs/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Java SMTPAPI Client](https://github.com/sendgrid/smtpapi-java/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Go SMTPAPI Client](https://github.com/sendgrid/smtpapi-go/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Python HTTP Client](https://github.com/sendgrid/python-http-client/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [PHP HTTP Client](https://github.com/sendgrid/php-http-client/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [C# HTTP Client](https://github.com/sendgrid/csharp-http-client/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Java HTTP Client](https://github.com/sendgrid/java-http-client/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Ruby HTTP Client](https://github.com/sendgrid/ruby-http-client/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Go HTTP Client](https://github.com/sendgrid/rest/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Open API Definition](https://github.com/sendgrid/sendgrid-oai/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [DX Automator](https://github.com/sendgrid/dx-automator/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) +* [Documentation](https://github.com/sendgrid/docs/issues?utf8=%E2%9C%93&q=is%3Aopen+label%3A%22difficulty%3A+easy%22+label%3A%22status%3A+help+wanted%22) diff --git a/vendor/github.com/sendgrid/sendgrid-go/ISSUE_TEMPLATE.md b/vendor/github.com/sendgrid/sendgrid-go/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..347eee7 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/ISSUE_TEMPLATE.md @@ -0,0 +1,30 @@ + + +### Issue Summary +A summary of the issue and the environment in which it occurs. If suitable, include the steps required to reproduce the bug. Please feel free to include screenshots, screencasts, or code examples. + +### Steps to Reproduce +1. This is the first step +2. This is the second step +3. Further steps, etc. + +### Code Snippet +```go +# paste code here +``` + +### Exception/Log +``` +# paste exception/log here +``` + +### Technical details: +* sendgrid-go version: +* go version: + diff --git a/vendor/github.com/sendgrid/sendgrid-go/LICENSE b/vendor/github.com/sendgrid/sendgrid-go/LICENSE new file mode 100644 index 0000000..29aba59 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (C) 2020, Twilio SendGrid, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sendgrid/sendgrid-go/Makefile b/vendor/github.com/sendgrid/sendgrid-go/Makefile new file mode 100644 index 0000000..f180226 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/Makefile @@ -0,0 +1,15 @@ +.PHONY: test install test-integ test-docker + +install: + go get -t -v ./... + +test: + ./go.coverage.sh + bash -c 'diff -u <(echo -n) <(gofmt -d -s .)' + +test-integ: test + +version ?= latest +test-docker: + curl -s https://raw.githubusercontent.com/sendgrid/sendgrid-oai/HEAD/prism/prism.sh -o prism.sh + version=$(version) bash ./prism.sh diff --git a/vendor/github.com/sendgrid/sendgrid-go/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/sendgrid/sendgrid-go/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..567de30 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,31 @@ + + +# Fixes # + +A short description of what this PR does. + +### Checklist +- [x] I acknowledge that all my contributions will be made under the project's license +- [ ] I have made a material change to the repo (functionality, testing, spelling, grammar) +- [ ] I have read the [Contribution Guidelines](https://github.com/sendgrid/sendgrid-go/blob/main/CONTRIBUTING.md) and my PR follows them +- [ ] I have titled the PR appropriately +- [ ] I have updated my branch with the main branch +- [ ] I have added tests that prove my fix is effective or that my feature works +- [ ] I have added the necessary documentation about the functionality in the appropriate .md file +- [ ] I have added inline documentation to the code I modified + +If you have questions, please file a [support ticket](https://support.sendgrid.com), or create a GitHub Issue in this repository. diff --git a/vendor/github.com/sendgrid/sendgrid-go/README.md b/vendor/github.com/sendgrid/sendgrid-go/README.md new file mode 100644 index 0000000..7f91607 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/README.md @@ -0,0 +1,254 @@ +![Twilio SendGrid Logo](twilio_sendgrid_logo.png) + +[![BuildStatus](https://travis-ci.com/sendgrid/sendgrid-go.svg?branch=main)](https://travis-ci.com/sendgrid/sendgrid-go) +[![Email Notifications Badge](https://dx.sendgrid.com/badge/go)](https://dx.sendgrid.com/newsletter/go) +[![GoDoc](https://godoc.org/github.com/sendgrid/sendgrid-go?status.svg)](https://godoc.org/github.com/sendgrid/sendgrid-go) +[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) +[![Twitter Follow](https://img.shields.io/twitter/follow/sendgrid.svg?style=social&label=Follow)](https://twitter.com/sendgrid) +[![GitHub contributors](https://img.shields.io/github/contributors/sendgrid/sendgrid-go.svg)](https://github.com/sendgrid/sendgrid-go/graphs/contributors) +[![Open Source Helpers](https://www.codetriage.com/sendgrid/sendgrid-go/badges/users.svg)](https://www.codetriage.com/sendgrid/sendgrid-go) + +**NEW:** Subscribe to email [notifications](https://dx.sendgrid.com/newsletter/go) for releases and breaking changes. + +**The default branch name for this repository has been changed to `main` as of 07/27/2020.** + +**This library allows you to quickly and easily use the Twilio SendGrid Web API v3 via Go.** + +Version 3.X.X of this library provides full support for all Twilio SendGrid [Web API v3](https://sendgrid.com/docs/API_Reference/Web_API_v3/index.html) endpoints, including the new [v3 /mail/send](https://sendgrid.com/blog/introducing-v3mailsend-sendgrids-new-mail-endpoint). + +This library represents the beginning of a new path for Twilio SendGrid. We want this library to be community driven and Twilio SendGrid led. We need your help to realize this goal. To help make sure we are building the right things in the right order, we ask that you create [issues](https://github.com/sendgrid/sendgrid-go/issues) and [pull requests](CONTRIBUTING.md) or simply upvote or comment on existing issues or pull requests. + +Please browse the rest of this README for further detail. + +We appreciate your continued support, thank you! + +# Table of Contents + +* [Installation](#installation) +* [Quick Start](#quick-start) +* [Processing Inbound Email](#inbound) +* [Usage](#usage) +* [Use Cases](#use-cases) +* [Announcements](#announcements) +* [How to Contribute](#contribute) +* [Troubleshooting](#troubleshooting) +* [About](#about) +* [License](#license) + + +# Installation + +## Prerequisites + +- Go version 1.7 +- The Twilio SendGrid service, starting at the [free level](https://sendgrid.com/free?source=sendgrid-go), to send up to 40,000 emails for the first 30 days, then send 100 emails/day free forever or check out [our pricing](https://sendgrid.com/pricing?source=sendgrid-go). + +## Setup Environment Variables + +Update the development environment with your [SENDGRID_API_KEY](https://app.sendgrid.com/settings/api_keys), for example: + +```bash +echo "export SENDGRID_API_KEY='YOUR_API_KEY'" > sendgrid.env +echo "sendgrid.env" >> .gitignore +source ./sendgrid.env +``` + +## Install Package + +`go get github.com/sendgrid/sendgrid-go` + +## Dependencies + +- [rest](https://github.com/sendgrid/rest) + +## Setup Environment Variables + +### Initial Setup + +```bash +cp .env_sample .env +``` + +### Environment Variable + +Update the development environment with your [SENDGRID_API_KEY](https://app.sendgrid.com/settings/api_keys), for example: + +```bash +echo "export SENDGRID_API_KEY='YOUR_API_KEY'" > sendgrid.env +echo "sendgrid.env" >> .gitignore +source ./sendgrid.env +``` + + +# Quick Start + +## Hello Email + +The following is the minimum needed code to send an email with the [/mail/send Helper](helpers/mail) ([here](examples/helpers/mail/example.go#L32) is a full example): + +### With Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + from := mail.NewEmail("Example User", "test@example.com") + subject := "Sending with Twilio SendGrid is Fun" + to := mail.NewEmail("Example User", "test@example.com") + plainTextContent := "and easy to do anywhere, even with Go" + htmlContent := "and easy to do anywhere, even with Go" + message := mail.NewSingleEmail(from, subject, to, plainTextContent, htmlContent) + client := sendgrid.NewSendClient(os.Getenv("SENDGRID_API_KEY")) + response, err := client.Send(message) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +The `NewEmail` constructor creates a [personalization object](https://sendgrid.com/docs/Classroom/Send/v3_Mail_Send/personalizations.html) for you. [Here](examples/helpers/mail/example.go#L28) is an example of how to add to it. + +### Without Mail Helper Class + +The following is the minimum needed code to send an email without the /mail/send Helper ([here](examples/mail/mail.go#L47) is a full example): + +```go +package main + +import ( + "fmt" + "github.com/sendgrid/sendgrid-go" + "log" + "os" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(` { + "personalizations": [ + { + "to": [ + { + "email": "test@example.com" + } + ], + "subject": "Sending with Twilio SendGrid is Fun" + } + ], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/plain", + "value": "and easy to do anywhere, even with Go" + } + ] +}`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +## General v3 Web API Usage + +```go +package main + +import ( + "fmt" + "github.com/sendgrid/sendgrid-go" + "log" + "os" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/api_keys", "https://api.sendgrid.com") + request.Method = "GET" + + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + + + +# Processing Inbound Email + +Please see [our helper](helpers/inbound) for utilizing our Inbound Parse webhook. + + +# Usage + +- [Twilio SendGrid Docs](https://sendgrid.com/docs/API_Reference/index.html) +- [Library Usage Docs](USAGE.md) +- [Example Code](examples) +- [How-to: Migration from v2 to v3](https://sendgrid.com/docs/Classroom/Send/v3_Mail_Send/how_to_migrate_from_v2_to_v3_mail_send.html) +- [v3 Web API Mail Send Helper](helpers/mail/README.md) + + +# Use Cases + +[Examples of common API use cases](use-cases/README.md), such as how to send an email with a transactional template. + + +# Announcements + +Please see our announcement regarding [breaking changes](https://github.com/sendgrid/sendgrid-go/issues/81). Your support is appreciated! + +All updates to this library are documented in our [CHANGELOG](CHANGELOG.md) and [releases](https://github.com/sendgrid/sendgrid-go/releases). You may also subscribe to email [release notifications](https://dx.sendgrid.com/newsletter/go) for releases and breaking changes. + + +# How to Contribute + +We encourage contribution to our libraries (you might even score some nifty swag), please see our [CONTRIBUTING](CONTRIBUTING.md) guide for details. + +Quick links: + +- [Feature Request](CONTRIBUTING.md#feature-request) +- [Bug Reports](CONTRIBUTING.md#submit-a-bug-report) +- [Improvements to the Codebase](CONTRIBUTING.md#improvements-to-the-codebase) +- [Review Pull Requests](CONTRIBUTING.md#code-reviews) + + +# Troubleshooting + +Please see our [troubleshooting guide](TROUBLESHOOTING.md) for common library issues. + + +# About + +sendgrid-go is maintained and funded by Twilio SendGrid, Inc. The names and logos for sendgrid-go are trademarks of Twilio SendGrid, Inc. + +If you need help installing or using the library, please check the [Twilio SendGrid Support Help Center](https://support.sendgrid.com). + +If you've instead found a bug in the library or would like new features added, go ahead and open issues or pull requests against this repo! + +# License +[The MIT License (MIT)](LICENSE) diff --git a/vendor/github.com/sendgrid/sendgrid-go/TROUBLESHOOTING.md b/vendor/github.com/sendgrid/sendgrid-go/TROUBLESHOOTING.md new file mode 100644 index 0000000..37cd892 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/TROUBLESHOOTING.md @@ -0,0 +1,110 @@ +If you have an issue logging into your Twilio SendGrid account, please read this [document](https://sendgrid.com/docs/ui/account-and-settings/troubleshooting-login/). For any questions regarding login issues, please contact our [support team](https://support.sendgrid.com). + +If you have a non-library Twilio SendGrid issue, please contact our [support team](https://support.sendgrid.com). + +If you can't find a solution below, please open an [issue](https://github.com/sendgrid/sendgrid-go/issues). + + +## Table of Contents + +* [Migrating from v2 to v3](#migrating) +* [Continue Using v2](#v2) +* [Testing v3 /mail/send Calls Directly](#testing) +* [Error Messages](#error) +* [Versions](#versions) +* [Environment Variables and Your Twilio SendGrid API Key](#environment) +* [Viewing the Request Body](#request-body) + + +## Migrating from v2 to v3 + +Please review [our guide](https://sendgrid.com/docs/Classroom/Send/v3_Mail_Send/how_to_migrate_from_v2_to_v3_mail_send.html) on how to migrate from v2 to v3. + + +## Continue Using v2 + +[Here](https://github.com/sendgrid/sendgrid-go/tree/0bf6332788d0230b7da84a1ae68d7531073200e1) is the last working version with v2 support. + +Download: + +Click the "Clone or download" green button in [GitHub](https://github.com/sendgrid/sendgrid-go/tree/0bf6332788d0230b7da84a1ae68d7531073200e1) and choose download. + + +## Testing v3 /mail/send Calls Directly + +[Here](https://sendgrid.com/docs/for-developers/sending-email/curl-examples) are some cURL examples for common use cases. + + +## Error Messages + +An error is returned if caused by client policy (such as CheckRedirect), or failure to speak HTTP (such as a network connectivity problem). + +To read the error message returned by Twilio SendGrid's API: + +```go +func main() { + from := mail.NewEmail("Example User", "test@example.com") + subject := "Hello World from the Twilio SendGrid Go Library" + to := mail.NewEmail("Example User", "test@example.com") + content := mail.NewContent("text/plain", "some text here") + m := mail.NewV3MailInit(from, subject, to, content) + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KE"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +__CAUTION__: A non-2xx status code doesn't cause an error on sendgrid.API and the application has to verify the response: + +```golang +resp, err := sendgrid.API(request) +if err != nil { + return err +} +if resp.StatusCode >= 400 { + // something goes wrong and you have to handle (e.g. returning an error to the user or logging the problem) + log.Printf("api response: HTTP %d: %s", resp.StatusCode, resp.Body) + // OR + // return fmt.Errorf("api response: HTTP %d: %s", resp.StatusCode, resp.Body) +} +``` + + +## Versions + +We follow the MAJOR.MINOR.PATCH versioning scheme as described by [SemVer.org](http://semver.org). Therefore, we recommend that you always pin (or vendor) the particular version you are working with to your code and never auto-update to the latest version. Especially when there is a MAJOR point release since that is guaranteed to be a breaking change. Changes are documented in the [CHANGELOG](CHANGELOG.md) and [releases](https://github.com/sendgrid/sendgrid-go/releases) section. + + +## Environment Variables and Your Twilio SendGrid API Key + +All of our examples assume you are using [environment variables](https://github.com/sendgrid/sendgrid-go#setup-environment-variables) to hold your Twilio SendGrid API key. + +If you choose to add your Twilio SendGrid API key directly (not recommended): + +`os.Getenv("SENDGRID_API_KEY")` + +becomes + +`"SENDGRID_API_KEY"` + +In the first case, SENDGRID_API_KEY is in reference to the name of the environment variable, while the second case references the actual Twilio SendGrid API Key. + + +## Viewing the Request Body + +When debugging or testing, it may be useful to examine the raw request body to compare against the [documented format](https://sendgrid.com/docs/API_Reference/api_v3.html). + +You can do this right before you call `response, err := client.Send(message)` like so: + +```go +fmt.Println(string(mail.GetRequestBody(message))) +``` diff --git a/vendor/github.com/sendgrid/sendgrid-go/USAGE.md b/vendor/github.com/sendgrid/sendgrid-go/USAGE.md new file mode 100644 index 0000000..e8cac7e --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/USAGE.md @@ -0,0 +1,6661 @@ +This documentation is based on our [OAI specification](https://github.com/sendgrid/sendgrid-oai). + +# INITIALIZATION + +```go +package main + +import ( + "fmt" + "github.com/sendgrid/sendgrid-go" + "log" + "os" +) + +apiKey := os.Getenv("SENDGRID_API_KEY") +host := "https://api.sendgrid.com" +``` + +# Table of Contents + +* [ACCESS SETTINGS](#access-settings) +* [ALERTS](#alerts) +* [API KEYS](#api-keys) +* [ASM](#asm) +* [BROWSERS](#browsers) +* [CAMPAIGNS](#campaigns) +* [CATEGORIES](#categories) +* [CLIENTS](#clients) +* [CONTACTDB](#contactdb) +* [DEVICES](#devices) +* [EMAIL ACTIVITY](#email-activity) +* [GEO](#geo) +* [IPS](#ips) +* [MAIL](#mail) +* [MAIL SETTINGS](#mail-settings) +* [MAILBOX PROVIDERS](#mailbox-providers) +* [PARTNER SETTINGS](#partner-settings) +* [SCOPES](#scopes) +* [SENDERS](#senders) +* [SENDER AUTHENTICATION](#sender-authentication) +* [STATS](#stats) +* [SUBUSERS](#subusers) +* [SUPPRESSION](#suppression) +* [TEMPLATES](#templates) +* [TRACKING SETTINGS](#tracking-settings) +* [ON-BEHALF OF SUBUSER](#on-behalf-of) +* [USER](#user) + + +# ACCESS SETTINGS + +## Retrieve all recent access attempts + +**This endpoint allows you to retrieve a list of all of the IP addresses that recently attempted to access your account either through the User Interface or the API.** + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### GET /access_settings/activity + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/activity", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add one or more IPs to the whitelist + +**This endpoint allows you to add one or more IP addresses to your IP whitelist.** + +When adding an IP to your whitelist, include the IP address in an array. You can whitelist one IP at a time, or you can whitelist multiple IPs at once. + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### POST /access_settings/whitelist + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/whitelist", host) +request.Method = "POST" +request.Body = []byte(` { + "ips": [ + { + "ip": "192.168.1.1" + }, + { + "ip": "192.*.*.*" + }, + { + "ip": "192.168.1.3/32" + } + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a list of currently whitelisted IPs + +**This endpoint allows you to retrieve a list of IP addresses that are currently whitelisted.** + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### GET /access_settings/whitelist + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/whitelist", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Remove one or more IPs from the whitelist + +**This endpoint allows you to remove one or more IPs from your IP whitelist.** + +You can remove one IP at a time, or you can remove multiple IP addresses. + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### DELETE /access_settings/whitelist + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/whitelist", host) +request.Method = "DELETE" +request.Body = []byte(` { + "ids": [ + 1, + 2, + 3 + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific whitelisted IP + +**This endpoint allows you to retrieve a specific IP address that has been whitelisted.** + +You must include the ID for the specific IP address you want to retrieve in your call. + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### GET /access_settings/whitelist/{rule_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/whitelist/{rule_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Remove a specific IP from the whitelist + +**This endpoint allows you to remove a specific IP address from your IP whitelist.** + +When removing a specific IP address from your whitelist, you must include the ID in your call. + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### DELETE /access_settings/whitelist/{rule_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/whitelist/{rule_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# ALERTS + +## Create a new Alert + +**This endpoint allows you to create a new alert.** + +Alerts allow you to specify an email address to receive notifications regarding your email usage or statistics. +* Usage alerts allow you to set the threshold at which an alert will be sent. +* Stats notifications allow you to set how frequently you would like to receive email statistics reports. For example, "daily", "weekly", or "monthly". + +For more information about alerts, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/alerts.html). + +### POST /alerts + +```go +request := sendgrid.GetRequest(apiKey, "/v3/alerts", host) +request.Method = "POST" +request.Body = []byte(` { + "email_to": "example@example.com", + "frequency": "daily", + "type": "stats_notification" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all alerts + +**This endpoint allows you to retrieve all of your alerts.** + +Alerts allow you to specify an email address to receive notifications regarding your email usage or statistics. +* Usage alerts allow you to set the threshold at which an alert will be sent. +* Stats notifications allow you to set how frequently you would like to receive email statistics reports. For example, "daily", "weekly", or "monthly". + +For more information about alerts, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/alerts.html). + +### GET /alerts + +```go +request := sendgrid.GetRequest(apiKey, "/v3/alerts", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update an alert + +**This endpoint allows you to update an alert.** + +Alerts allow you to specify an email address to receive notifications regarding your email usage or statistics. +* Usage alerts allow you to set the threshold at which an alert will be sent. +* Stats notifications allow you to set how frequently you would like to receive email statistics reports. For example, "daily", "weekly", or "monthly". + +For more information about alerts, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/alerts.html). + +### PATCH /alerts/{alert_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/alerts/{alert_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "email_to": "example@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific alert + +**This endpoint allows you to retrieve a specific alert.** + +Alerts allow you to specify an email address to receive notifications regarding your email usage or statistics. +* Usage alerts allow you to set the threshold at which an alert will be sent. +* Stats notifications allow you to set how frequently you would like to receive email statistics reports. For example, "daily", "weekly", or "monthly". + +For more information about alerts, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/alerts.html). + +### GET /alerts/{alert_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/alerts/{alert_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete an alert + +**This endpoint allows you to delete an alert.** + +Alerts allow you to specify an email address to receive notifications regarding your email usage or statistics. +* Usage alerts allow you to set the threshold at which an alert will be sent. +* Stats notifications allow you to set how frequently you would like to receive email statistics reports. For example, "daily", "weekly", or "monthly". + +For more information about alerts, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/alerts.html). + +### DELETE /alerts/{alert_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/alerts/{alert_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# API KEYS + +## Create API keys + +**This endpoint allows you to create a new random API Key for the user.** + +A JSON request body containing a "name" property is required. If the number of maximum keys is reached, HTTP 403 will be returned. + +There is a limit of 100 API Keys on your account. + +The API Keys feature allows customers to be able to generate an API Key credential which can be used for authentication with the Twilio SendGrid v3 Web API or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + +See the [API Key Permissions List](https://sendgrid.com/docs/API_Reference/Web_API_v3/API_Keys/api_key_permissions_list.html) for a list of all available scopes. + +### POST /api_keys + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys", host) +request.Method = "POST" +request.Body = []byte(` { + "name": "My API Key", + "sample": "data", + "scopes": [ + "mail.send", + "alerts.create", + "alerts.read" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all API Keys belonging to the authenticated user + +**This endpoint allows you to retrieve all API Keys that belong to the authenticated user.** + +The API Keys feature allows customers to be able to generate an API Key credential which can be used for authentication with the Twilio SendGrid v3 Web API or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + +### GET /api_keys + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update the name & scopes of an API Key + +**This endpoint allows you to update the name and scopes of a given API key.** + +A JSON request body with a "name" property is required. +Most provide the list of all the scopes an API key should have. + +The API Keys feature allows customers to be able to generate an API Key credential which can be used for authentication with the Twilio SendGrid v3 Web API or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + + +### PUT /api_keys/{api_key_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys/{api_key_id}", host) +request.Method = "PUT" +request.Body = []byte(` { + "name": "A New Hope", + "scopes": [ + "user.profile.read", + "user.profile.update" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update API keys + +**This endpoint allows you to update the name of an existing API Key.** + +A JSON request body with a "name" property is required. + +The API Keys feature allows customers to be able to generate an API Key credential which can be used for authentication with the Twilio SendGrid v3 Web API or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + +## URI Parameters + +| URI Parameter | Type | Required? | Description | +|---|---|---|---| +|api_key_id |string | required | The ID of the API Key you are updating.| + +### PATCH /api_keys/{api_key_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys/{api_key_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "name": "A New Hope" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve an existing API Key + +**This endpoint allows you to retrieve a single API key.** + +If the API Key ID does not exist an HTTP 404 will be returned. + +### GET /api_keys/{api_key_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys/{api_key_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete API keys + +**This endpoint allows you to revoke an existing API Key** + +Authentications using this API Key will fail after this request is made, with some small propagation delay. If the API Key ID does not exist an HTTP 404 will be returned. + +The API Keys feature allows customers to be able to generate an API Key credential which can be used for authentication with the Twilio SendGrid v3 Web API or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + +## URI Parameters + +| URI Parameter | Type | Required? | Description | +|---|---|---|---| +|api_key_id |string | required | The ID of the API Key you are deleting.| + +### DELETE /api_keys/{api_key_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys/{api_key_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# ASM + +## Create a new suppression group + +**This endpoint allows you to create a new suppression group.** + +Suppression groups, or unsubscribe groups, are specific types or categories of email that you would like your recipients to be able to unsubscribe from. For example: Daily Newsletters, Invoices, System Alerts. + +The **name** and **description** of the unsubscribe group will be visible by recipients when they are managing their subscriptions. + +Each user can create up to 25 different suppression groups. + +### POST /asm/groups + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups", host) +request.Method = "POST" +request.Body = []byte(` { + "description": "Suggestions for products our users might like.", + "is_default": true, + "name": "Product Suggestions" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve information about multiple suppression groups + +**This endpoint allows you to retrieve information about multiple suppression groups.** + +This endpoint will return information for each group ID that you include in your request. To add a group ID to your request, simply append `&id=` followed by the group ID. + +Suppressions are a list of email addresses that will not receive content sent under a given [group](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). + +Suppression groups, or [unsubscribe groups](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html), allow you to label a category of content that you regularly send. This gives your recipients the ability to opt out of a specific set of your email. For example, you might define a group for your transactional email, and one for your marketing email so that your users can continue receiving your transactional email without having to receive your marketing content. + +### GET /asm/groups + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a suppression group. + +**This endpoint allows you to update or change a suppression group.** + +Suppression groups, or unsubscribe groups, are specific types or categories of email that you would like your recipients to be able to unsubscribe from. For example: Daily Newsletters, Invoices, System Alerts. + +The **name** and **description** of the unsubscribe group will be visible by recipients when they are managing their subscriptions. + +Each user can create up to 25 different suppression groups. + +### PATCH /asm/groups/{group_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "description": "Suggestions for items our users might like.", + "id": 103, + "name": "Item Suggestions" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Get information on a single suppression group. + +**This endpoint allows you to retrieve a single suppression group.** + +Suppression groups, or unsubscribe groups, are specific types or categories of email that you would like your recipients to be able to unsubscribe from. For example: Daily Newsletters, Invoices, System Alerts. + +The **name** and **description** of the unsubscribe group will be visible by recipients when they are managing their subscriptions. + +Each user can create up to 25 different suppression groups. + +### GET /asm/groups/{group_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a suppression group. + +**This endpoint allows you to delete a suppression group.** + +You can only delete groups that have not been attached to sent mail in the last 60 days. If a recipient uses the "one-click unsubscribe" option on an email associated with a deleted group, that recipient will be added to the global suppression list. + +Suppression groups, or unsubscribe groups, are specific types or categories of email that you would like your recipients to be able to unsubscribe from. For example: Daily Newsletters, Invoices, System Alerts. + +The **name** and **description** of the unsubscribe group will be visible by recipients when they are managing their subscriptions. + +Each user can create up to 25 different suppression groups. + +### DELETE /asm/groups/{group_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add suppressions to a suppression group + +**This endpoint allows you to add email addresses to an unsubscribe group.** + +If you attempt to add suppressions to a group that has been deleted or does not exist, the suppressions will be added to the global suppressions list. + +Suppressions are recipient email addresses that are added to [unsubscribe groups](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). Once a recipient's address is on the suppressions list for an unsubscribe group, they will not receive any emails that are tagged with that unsubscribe group. + +### POST /asm/groups/{group_id}/suppressions + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}/suppressions", host) +request.Method = "POST" +request.Body = []byte(` { + "recipient_emails": [ + "test1@example.com", + "test2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all suppressions for a suppression group + +**This endpoint allows you to retrieve all suppressed email addresses belonging to the given group.** + +Suppressions are recipient email addresses that are added to [unsubscribe groups](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). Once a recipient's address is on the suppressions list for an unsubscribe group, they will not receive any emails that are tagged with that unsubscribe group. + +### GET /asm/groups/{group_id}/suppressions + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}/suppressions", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Search for suppressions within a group + +**This endpoint allows you to search a suppression group for multiple suppressions.** + +When given a list of email addresses and a group ID, this endpoint will return only the email addresses that have been unsubscribed from the given group. + +Suppressions are a list of email addresses that will not receive content sent under a given [group](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). + +### POST /asm/groups/{group_id}/suppressions/search + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}/suppressions/search", host) +request.Method = "POST" +request.Body = []byte(` { + "recipient_emails": [ + "exists1@example.com", + "exists2@example.com", + "doesnotexists@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a suppression from a suppression group + +**This endpoint allows you to remove a suppressed email address from the given suppression group.** + +Suppressions are recipient email addresses that are added to [unsubscribe groups](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). Once a recipient's address is on the suppressions list for an unsubscribe group, they will not receive any emails that are tagged with that unsubscribe group. + +### DELETE /asm/groups/{group_id}/suppressions/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}/suppressions/{email}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all suppressions + +**This endpoint allows you to retrieve a list of all suppressions.** + +Suppressions are a list of email addresses that will not receive content sent under a given [group](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). + +### GET /asm/suppressions + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/suppressions", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add recipient addresses to the global suppression group. + +**This endpoint allows you to add one or more email addresses to the global suppressions group.** + +A global suppression (or global unsubscribe) is an email address of a recipient who does not want to receive any of your messages. A globally suppressed recipient will be removed from any email you send. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/global_unsubscribes.html). + +### POST /asm/suppressions/global + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/suppressions/global", host) +request.Method = "POST" +request.Body = []byte(` { + "recipient_emails": [ + "test1@example.com", + "test2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Global Suppression + +**This endpoint allows you to retrieve a global suppression. You can also use this endpoint to confirm if an email address is already globally suppressed.** + +If the email address you include in the URL path parameter `{email}` is already globally suppressed, the response will include that email address. If the address you enter for `{email}` is not globally suppressed, an empty JSON object `{}` will be returned. + +A global suppression (or global unsubscribe) is an email address of a recipient who does not want to receive any of your messages. A globally suppressed recipient will be removed from any email you send. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/global_unsubscribes.html). + +### GET /asm/suppressions/global/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/suppressions/global/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Global Suppression + +**This endpoint allows you to remove an email address from the global suppressions group.** + +A global suppression (or global unsubscribe) is an email address of a recipient who does not want to receive any of your messages. A globally suppressed recipient will be removed from any email you send. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/global_unsubscribes.html). + +### DELETE /asm/suppressions/global/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/suppressions/global/{email}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all suppression groups for an email address + +**This endpoint returns the list of all groups that the given email address has been unsubscribed from.** + +Suppressions are a list of email addresses that will not receive content sent under a given [group](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). + +### GET /asm/suppressions/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/suppressions/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# BROWSERS + +## Retrieve email statistics by browser. + +**This endpoint allows you to retrieve your email statistics segmented by browser type.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /browsers/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/browsers/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["aggregated_by"] = "day" +queryParams["browsers"] = "test_string" +queryParams["limit"] = "test_string" +queryParams["offset"] = "test_string" +queryParams["start_date"] = "2016-01-01" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# CAMPAIGNS + +## Create a Campaign + +**This endpoint allows you to create a campaign.** + +Our Marketing Campaigns API lets you create, manage, send, and schedule campaigns. + +Note: In order to send or schedule the campaign, you will be required to provide a subject, sender ID, content (we suggest both HTML and plain text), and at least one list or segment ID. This information is not required when you create a campaign. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### POST /campaigns + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns", host) +request.Method = "POST" +request.Body = []byte(` { + "categories": [ + "spring line" + ], + "custom_unsubscribe_url": "", + "html_content": "

Check out our spring line!

", + "ip_pool": "marketing", + "list_ids": [ + 110, + 124 + ], + "plain_content": "Check out our spring line!", + "segment_ids": [ + 110 + ], + "sender_id": 124451, + "subject": "New Products for Spring!", + "suppression_group_id": 42, + "title": "March Newsletter" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all Campaigns + +**This endpoint allows you to retrieve a list of all of your campaigns.** + +Returns campaigns in reverse order they were created (newest first). + +Returns an empty array if no campaigns exist. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### GET /campaigns + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a Campaign + +Update a campaign. This is especially useful if you only set up the campaign using POST /campaigns, but didn't set many of the parameters. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### PATCH /campaigns/{campaign_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "categories": [ + "summer line" + ], + "html_content": "

Check out our summer line!

", + "plain_content": "Check out our summer line!", + "subject": "New Products for Summer!", + "title": "May Newsletter" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a single campaign + +**This endpoint allows you to retrieve a specific campaign.** + +Our Marketing Campaigns API lets you create, manage, send, and schedule campaigns. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### GET /campaigns/{campaign_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Campaign + +**This endpoint allows you to delete a specific campaign.** + +Our Marketing Campaigns API lets you create, manage, send, and schedule campaigns. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### DELETE /campaigns/{campaign_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a Scheduled Campaign + +**This endpoint allows to you change the scheduled time and date for a campaign to be sent.** + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### PATCH /campaigns/{campaign_id}/schedules + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules", host) +request.Method = "PATCH" +request.Body = []byte(` { + "send_at": 1489451436 +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Schedule a Campaign + +**This endpoint allows you to schedule a specific date and time for your campaign to be sent.** + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### POST /campaigns/{campaign_id}/schedules + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules", host) +request.Method = "POST" +request.Body = []byte(` { + "send_at": 1489771528 +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## View Scheduled Time of a Campaign + +**This endpoint allows you to retrieve the date and time that the given campaign has been scheduled to be sent.** + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### GET /campaigns/{campaign_id}/schedules + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Unschedule a Scheduled Campaign + +**This endpoint allows you to unschedule a campaign that has already been scheduled to be sent.** + +A successful unschedule will return a 204. +If the specified campaign is in the process of being sent, the only option is to cancel (a different method). + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### DELETE /campaigns/{campaign_id}/schedules + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Send a Campaign + +**This endpoint allows you to immediately send a campaign at the time you make the API call.** + +Normally a POST would have a request body, but since this endpoint is telling us to send a resource that is already created, a request body is not needed. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### POST /campaigns/{campaign_id}/schedules/now + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules/now", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Send a Test Campaign + +**This endpoint allows you to send a test campaign.** + +To send to multiple addresses, use an array for the JSON "to" value ["one@address","two@address"] + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### POST /campaigns/{campaign_id}/schedules/test + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules/test", host) +request.Method = "POST" +request.Body = []byte(` { + "to": "your.email@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# CATEGORIES + +## Retrieve all categories + +**This endpoint allows you to retrieve a list of all of your categories.** + +Categories can help organize your email analytics by enabling you to tag emails by type or broad topic. You can define your own custom categories. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/categories.html). + +### GET /categories + +```go +request := sendgrid.GetRequest(apiKey, "/v3/categories", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["category"] = "test_string" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Email Statistics for Categories + +**This endpoint allows you to retrieve all of your email statistics for each of your categories.** + +If you do not define any query parameters, this endpoint will return a sum for each category in groups of 10. + +Categories allow you to group your emails together according to broad topics that you define. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/categories.html). + +### GET /categories/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/categories/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["categories"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve sums of email stats for each category [Needs: Stats object defined, has category ID?] + +**This endpoint allows you to retrieve the total sum of each email statistic for every category over the given date range.** + +If you do not define any query parameters, this endpoint will return a sum for each category in groups of 10. + +Categories allow you to group your emails together according to broad topics that you define. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/categories.html). + +### GET /categories/stats/sums + +```go +request := sendgrid.GetRequest(apiKey, "/v3/categories/stats/sums", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["sort_by_metric"] = "test_string" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["sort_by_direction"] = "asc" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# CLIENTS + +## Retrieve email statistics by client type. + +**This endpoint allows you to retrieve your email statistics segmented by client type.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /clients/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/clients/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["aggregated_by"] = "day" +queryParams["start_date"] = "2016-01-01" +queryParams["end_date"] = "2016-04-01" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve stats by a specific client type. + +**This endpoint allows you to retrieve your email statistics segmented by a specific client type.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +## Available Client Types +- phone +- tablet +- webmail +- desktop + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /clients/{client_type}/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/clients/{client_type}/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["aggregated_by"] = "day" +queryParams["start_date"] = "2016-01-01" +queryParams["end_date"] = "2016-04-01" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# CONTACTDB + +## Create a Custom Field + +**This endpoint allows you to create a custom field.** + +The contactdb is a database of your contacts for [Twilio SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### POST /contactdb/custom_fields + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/custom_fields", host) +request.Method = "POST" +request.Body = []byte(` { + "name": "pet", + "type": "text" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all custom fields + +**This endpoint allows you to retrieve all custom fields.** + +The contactdb is a database of your contacts for [Twilio SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### GET /contactdb/custom_fields + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/custom_fields", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Custom Field + +**This endpoint allows you to retrieve a custom field by ID.** + +The contactdb is a database of your contacts for [Twilio SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### GET /contactdb/custom_fields/{custom_field_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/custom_fields/{custom_field_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Custom Field + +**This endpoint allows you to delete a custom field by ID.** + +The contactdb is a database of your contacts for [Twilio SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### DELETE /contactdb/custom_fields/{custom_field_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/custom_fields/{custom_field_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create a List + +**This endpoint allows you to create a list for your recipients.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### POST /contactdb/lists + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists", host) +request.Method = "POST" +request.Body = []byte(` { + "name": "your list name" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all lists + +**This endpoint allows you to retrieve all of your recipient lists. If you don't have any lists, an empty array will be returned.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/lists + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete Multiple lists + +**This endpoint allows you to delete multiple recipient lists.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### DELETE /contactdb/lists + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists", host) +request.Method = "DELETE" +request.Body = []byte(` [ + 1, + 2, + 3, + 4 +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a List + +**This endpoint allows you to update the name of one of your recipient lists.** + + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### PATCH /contactdb/lists/{list_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "name": "newlistname" +}`) +queryParams := make(map[string]string) +queryParams["list_id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a single list + +This endpoint allows you to retrieve a single recipient list. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/lists/{list_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["list_id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a List + +**This endpoint allows you to delete a specific recipient list with the given ID.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### DELETE /contactdb/lists/{list_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}", host) +request.Method = "DELETE" +queryParams := make(map[string]string) +queryParams["delete_contacts"] = "true" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add Multiple Recipients to a List + +**This endpoint allows you to add multiple recipients to a list.** + +Adds existing recipients to a list, passing in the recipient IDs to add. Recipient IDs should be passed exactly as they are returned from recipient endpoints. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### POST /contactdb/lists/{list_id}/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}/recipients", host) +request.Method = "POST" +request.Body = []byte(` [ + "recipient_id1", + "recipient_id2" +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all recipients on a List + +**This endpoint allows you to retrieve all recipients on the list with the given ID.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/lists/{list_id}/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}/recipients", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["page"] = "1" +queryParams["page_size"] = "1" +queryParams["list_id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add a Single Recipient to a List + +**This endpoint allows you to add a single recipient to a list.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### POST /contactdb/lists/{list_id}/recipients/{recipient_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}/recipients/{recipient_id}", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Single Recipient from a Single List + +**This endpoint allows you to delete a single recipient from a list.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### DELETE /contactdb/lists/{list_id}/recipients/{recipient_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}/recipients/{recipient_id}", host) +request.Method = "DELETE" +queryParams := make(map[string]string) +queryParams["recipient_id"] = "1" +queryParams["list_id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Recipient + +**This endpoint allows you to update one or more recipients.** + +The body of an API call to this endpoint must include an array of one or more recipient objects. + +It is of note that you can add custom field data as parameters on recipient objects. We have provided an example using some of the default custom fields Twilio SendGrid provides. + +The contactdb is a database of your contacts for [Twilio SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### PATCH /contactdb/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients", host) +request.Method = "PATCH" +request.Body = []byte(` [ + { + "email": "jones@example.com", + "first_name": "Guy", + "last_name": "Jones" + } +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add recipients + +**This endpoint allows you to add a Marketing Campaigns recipient.** + +It is of note that you can add custom field data as a parameter on this endpoint. We have provided an example using some of the default custom fields Twilio SendGrid provides. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### POST /contactdb/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients", host) +request.Method = "POST" +request.Body = []byte(` [ + { + "age": 25, + "email": "example@example.com", + "first_name": "", + "last_name": "User" + }, + { + "age": 25, + "email": "example2@example.com", + "first_name": "Example", + "last_name": "User" + } +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve recipients + +**This endpoint allows you to retrieve all of your Marketing Campaigns recipients.** + +Batch deletion of a page makes it possible to receive an empty page of recipients before reaching the end of +the list of recipients. To avoid this issue; iterate over pages until a 404 is retrieved. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["page"] = "1" +queryParams["page_size"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete Recipient + +**This endpoint allows you to delete one or more recipients.** + +The body of an API call to this endpoint must include an array of recipient IDs of the recipients you want to delete. + +The contactdb is a database of your contacts for [Twilio SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### DELETE /contactdb/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients", host) +request.Method = "DELETE" +request.Body = []byte(` [ + "recipient_id1", + "recipient_id2" +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve the count of billable recipients + +**This endpoint allows you to retrieve the number of Marketing Campaigns recipients that you will be billed for.** + +You are billed for marketing campaigns based on the highest number of recipients you have had in your account at one time. This endpoint will allow you to know the current billable count value. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/recipients/billable_count + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/billable_count", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Count of Recipients + +**This endpoint allows you to retrieve the total number of Marketing Campaigns recipients.** + +The contactdb is a database of your contacts for [Twilio SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### GET /contactdb/recipients/count + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/count", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve recipients matching search criteria + +**This endpoint allows you to perform a search on all of your Marketing Campaigns recipients.** + +field_name: + +* is a variable that is substituted for your actual custom field name from your recipient. +* Text fields must be url-encoded. Date fields are searchable only by unix timestamp (e.g. 2/2/2015 becomes 1422835200) +* If field_name is a 'reserved' date field, such as created_at or updated_at, the system will internally convert +your epoch time to a date range encompassing the entire day. For example, an epoch time of 1422835600 converts to +Mon, 02 Feb 2015 00:06:40 GMT, but internally the system will search from Mon, 02 Feb 2015 00:00:00 GMT through +Mon, 02 Feb 2015 23:59:59 GMT. + +The contactdb is a database of your contacts for [Twilio SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### GET /contactdb/recipients/search + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/search", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["{field_name}"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a single recipient + +**This endpoint allows you to retrieve a single recipient by ID from your contact database.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/recipients/{recipient_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/{recipient_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Recipient + +**This endpoint allows you to delete a single recipient with the given ID from your contact database.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### DELETE /contactdb/recipients/{recipient_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/{recipient_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve the lists that a recipient is on + +**This endpoint allows you to retrieve the lists that a given recipient belongs to.** + +Each recipient can be on many lists. This endpoint gives you all of the lists that any one recipient has been added to. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/recipients/{recipient_id}/lists + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/{recipient_id}/lists", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve reserved fields + +**This endpoint allows you to list all fields that are reserved and can't be used for custom field names.** + +The contactdb is a database of your contacts for [Twilio SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### GET /contactdb/reserved_fields + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/reserved_fields", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create a Segment + +**This endpoint allows you to create a segment.** + +All recipients in your contactdb will be added or removed automatically depending on whether they match the criteria for this segment. + +List Id: + +* Send this to segment from an existing list +* Don't send this in order to segment from your entire contactdb. + +Valid operators for create and update depend on the type of the field you are segmenting: + +* **Dates:** "eq", "ne", "lt" (before), "gt" (after) +* **Text:** "contains", "eq" (is - matches the full field), "ne" (is not - matches any field where the entire field is not the condition value) +* **Numbers:** "eq", "lt", "gt" +* **Email Clicks and Opens:** "eq" (opened), "ne" (not opened) + +Segment conditions using "eq" or "ne" for email clicks and opens should provide a "field" of either *clicks.campaign_identifier* or *opens.campaign_identifier*. The condition value should be a string containing the id of a completed campaign. + +Segments may contain multiple conditions, joined by an "and" or "or" in the "and_or" field. The first condition in the conditions list must have an empty "and_or", and subsequent conditions must all specify an "and_or". + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### POST /contactdb/segments + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments", host) +request.Method = "POST" +request.Body = []byte(` { + "conditions": [ + { + "and_or": "", + "field": "last_name", + "operator": "eq", + "value": "Miller" + }, + { + "and_or": "and", + "field": "last_clicked", + "operator": "gt", + "value": "01/02/2015" + }, + { + "and_or": "or", + "field": "clicks.campaign_identifier", + "operator": "eq", + "value": "513" + } + ], + "list_id": 4, + "name": "Last Name Miller" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all segments + +**This endpoint allows you to retrieve all of your segments.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### GET /contactdb/segments + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a segment + +**This endpoint allows you to update a segment.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### PATCH /contactdb/segments/{segment_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments/{segment_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "conditions": [ + { + "and_or": "", + "field": "last_name", + "operator": "eq", + "value": "Miller" + } + ], + "list_id": 5, + "name": "The Millers" +}`) +queryParams := make(map[string]string) +queryParams["segment_id"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a segment + +**This endpoint allows you to retrieve a single segment with the given ID.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### GET /contactdb/segments/{segment_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments/{segment_id}", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["segment_id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a segment + +**This endpoint allows you to delete a segment from your recipient's database.** + +You also have the option to delete all the contacts from your Marketing Campaigns recipient database who were in this segment. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### DELETE /contactdb/segments/{segment_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments/{segment_id}", host) +request.Method = "DELETE" +queryParams := make(map[string]string) +queryParams["delete_contacts"] = "true" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve recipients on a segment + +**This endpoint allows you to retrieve all of the recipients in a segment with the given ID.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### GET /contactdb/segments/{segment_id}/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments/{segment_id}/recipients", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["page"] = "1" +queryParams["page_size"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# DEVICES + +## Retrieve email statistics by device type. + +**This endpoint allows you to retrieve your email statistics segmented by the device type.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +## Available Device Types +| **Device** | **Description** | **Example** | +|---|---|---| +| Desktop | Email software on desktop computer. | I.E., Outlook, Sparrow, or Apple Mail. | +| Webmail | A web-based email client. | I.E., Yahoo, Google, AOL, or Outlook.com. | +| Phone | A smart phone. | iPhone, Android, Blackberry, etc. +| Tablet | A tablet computer. | iPad, android based tablet, etc. | +| Other | An unrecognized device. | + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /devices/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/devices/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["end_date"] = "2016-04-01" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# Email Activity + +## Filter all messages + +> In order to gain access to the Email Activity Feed API, you must purchase [additional email activity history](https://app.sendgrid.com/settings/billing/addons/email_activity). + +Filter all messages to search your Email Activity. + +Queries may need to be [URL encoded](https://meyerweb.com/eric/tools/dencoder/). URL encoding depends on how you're using the API - if you are trying it out here, or using one of the Libraries, we handle the encoding for you. If you are using cURL, or your own implementation, you probably need to encode it. + +Queries have this format: + +`query={query_type}="{query_content}"` + +encoded, this would look like this: + +`query=type%3D%22query_content%22` + +for example: + +Filter by a specific email - `query=to_email%3D%22example%40example.com%22` + +Filter by subject line - `query=subject%3d%22A%20Great%20Subject%22` + +You can filter by other operators besides `=`. We also accept `!=`, `<`, and `>`. + +For a tutorial on how to get started, check out [Getting Started with the Email Activity API](https://sendgrid.com/docs/API_Reference/Web_API_v3/Tutorials/getting_started_email_activity_api.html). + +For information about building combined queries, see [Building compound Email Activity queries](https://sendgrid.com/docs/API_Reference/Web_API_v3/Tutorials/getting_started_email_activity_api.html#-Creating-compound-queries). + +### GET /messages + +```go +request := sendgrid.GetRequest(apiKey, "/v3/messages", host) +request.Method = "GET" + +filterKey := "to_email" +filterOperator := url.QueryEscape("=") +filterValue := "testing@sendgrid.net" +filterValue = url.QueryEscape(fmt.Sprintf("\"%s\"", filterValue)) + +queryParams := make(map[string]string) +queryParams["query"] = fmt.Sprintf("%s%s%s", filterKey, filterOperator, filterValue) +queryParams["limit"] = "1" +request.QueryParams = queryParams + +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Filter messages by message ID + +> In order to gain access to the Email Activity Feed API, you must purchase [additional email activity history](https://app.sendgrid.com/settings/billing/addons/email_activity). + +Get all of the details about the specified message. + +### GET /messages/{msg_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/messages/{msg_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Request a CSV + +### POST /messages/download + +> In order to gain access to the Email Activity Feed API, you must purchase [additional email activity history](https://app.sendgrid.com/settings/billing/addons/email_activity). + +This request kicks of a process to generate a CSV file. When the file is generated, the email that is listed as the account owner gets an email that links out to the file that is ready for download. The link expires in 3 days. + +The CSV fill contain the last 1 million messages. This endpoint will be rate limited to 1 request every 12 hours. + +```go +request := sendgrid.GetRequest(apiKey, "/v3/messages/download", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Download CSV + +### GET /messages/download/{download_uuid} + +> In order to gain access to the Email Activity Feed API, you must purchase [additional email activity history](https://app.sendgrid.com/settings/billing/addons/email_activity). + +Download the CSV that you requested with the POST Request a CSV. + +When the file is generated, the email that is listed as the account owner gets an email that links out to the file that is ready for download. The link expires in 3 days. + +The CSV fill contain the last 1 million messages. This endpoint will be rate limited to 1 request every 12 hours. + +```go +request := sendgrid.GetRequest(apiKey, "/v3/messages/download/{download_uuid}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +# GEO + +## Retrieve email statistics by country and state/province. + +**This endpoint allows you to retrieve your email statistics segmented by country and state/province.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /geo/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/geo/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["country"] = "US" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# IPS + +## Retrieve all IP addresses + +**This endpoint allows you to retrieve a list of all assigned and unassigned IPs.** + +The response includes warm up status, pools, assigned subusers, and authentication info. The start_date field corresponds to when warmup started for that IP. + +A single IP address or a range of IP addresses may be dedicated to an account in order to send email for multiple domains. The reputation of this IP is based on the aggregate performance of all the senders who use it. + +### GET /ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["subuser"] = "test_string" +queryParams["ip"] = "test_string" +queryParams["limit"] = "1" +queryParams["exclude_whitelabels"] = "true" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all assigned IPs + +**This endpoint allows you to retrieve only assigned IP addresses.** + +A single IP address or a range of IP addresses may be dedicated to an account in order to send email for multiple domains. The reputation of this IP is based on the aggregate performance of all the senders who use it. + +### GET /ips/assigned + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/assigned", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create an IP pool. + +**This endpoint allows you to create an IP pool.** + +**Each user can create up to 10 different IP pools.** + +IP Pools allow you to group your dedicated Twilio SendGrid IP addresses together. For example, you could create separate pools for your transactional and marketing email. When sending marketing emails, specify that you want to use the marketing IP pool. This allows you to maintain separate reputations for your different email traffic. + +IP pools can only be used with authenticated IP addresses. + +If an IP pool is NOT specified for an email, it will use any IP available, including ones in pools. + +### POST /ips/pools + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools", host) +request.Method = "POST" +request.Body = []byte(` { + "name": "marketing" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all IP pools. + +**This endpoint allows you to retrieve all of your IP pools.** + +IP Pools allow you to group your dedicated Twilio SendGrid IP addresses together. For example, you could create separate pools for your transactional and marketing email. When sending marketing emails, specify that you want to use the marketing IP pool. This allows you to maintain separate reputations for your different email traffic. + +IP pools can only be used with authenticated IP addresses. + +If an IP pool is NOT specified for an email, it will use any IP available, including ones in pools. + +### GET /ips/pools + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update an IP pools name. + +**This endpoint allows you to update the name of an IP pool.** + +IP Pools allow you to group your dedicated Twilio SendGrid IP addresses together. For example, you could create separate pools for your transactional and marketing email. When sending marketing emails, specify that you want to use the marketing IP pool. This allows you to maintain separate reputations for your different email traffic. + +IP pools can only be used with authenticated IP addresses. + +If an IP pool is NOT specified for an email, it will use any IP available, including ones in pools. + +### PUT /ips/pools/{pool_name} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools/{pool_name}", host) +request.Method = "PUT" +request.Body = []byte(` { + "name": "new_pool_name" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all IPs in a specified pool. + +**This endpoint allows you to list all of the IP addresses that are in a specific IP pool.** + +IP Pools allow you to group your dedicated Twilio SendGrid IP addresses together. For example, you could create separate pools for your transactional and marketing email. When sending marketing emails, specify that you want to use the marketing IP pool. This allows you to maintain separate reputations for your different email traffic. + +IP pools can only be used with authenticated IP addresses. + +If an IP pool is NOT specified for an email, it will use any IP available, including ones in pools. + +### GET /ips/pools/{pool_name} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools/{pool_name}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete an IP pool. + +**This endpoint allows you to delete an IP pool.** + +IP Pools allow you to group your dedicated Twilio SendGrid IP addresses together. For example, you could create separate pools for your transactional and marketing email. When sending marketing emails, specify that you want to use the marketing IP pool. This allows you to maintain separate reputations for your different email traffic. + +IP pools can only be used with authenticated IP addresses. + +If an IP pool is NOT specified for an email, it will use any IP available, including ones in pools. + +### DELETE /ips/pools/{pool_name} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools/{pool_name}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add an IP address to a pool + +**This endpoint allows you to add an IP address to an IP pool.** + +You can add the same IP address to multiple pools. It may take up to 60 seconds for your IP address to be added to a pool after your request is made. + +A single IP address or a range of IP addresses may be dedicated to an account in order to send email for multiple domains. The reputation of this IP is based on the aggregate performance of all the senders who use it. + +### POST /ips/pools/{pool_name}/ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools/{pool_name}/ips", host) +request.Method = "POST" +request.Body = []byte(` { + "ip": "0.0.0.0" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Remove an IP address from a pool. + +**This endpoint allows you to remove an IP address from an IP pool.** + +The same IP address can be added to multiple IP pools. + +A single IP address or a range of IP addresses may be dedicated to an account in order to send email for multiple domains. The reputation of this IP is based on the aggregate performance of all the senders who use it. + +### DELETE /ips/pools/{pool_name}/ips/{ip} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools/{pool_name}/ips/{ip}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add an IP to warmup + +**This endpoint allows you to enter an IP address into warmup mode.** + +Twilio SendGrid can automatically warm up dedicated IP addresses by limiting the amount of mail that can be sent through them per hour, with the limit determined by how long the IP address has been in warmup. See the [warmup schedule](https://sendgrid.com/docs/API_Reference/Web_API_v3/IP_Management/ip_warmup_schedule.html) for more details on how Twilio SendGrid limits your email traffic for IPs in warmup. + +For more general information about warming up IPs, please see our [Classroom](https://sendgrid.com/docs/Classroom/Deliver/Delivery_Introduction/warming_up_ips.html). + +### POST /ips/warmup + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/warmup", host) +request.Method = "POST" +request.Body = []byte(` { + "ip": "0.0.0.0" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all IPs currently in warmup + +**This endpoint allows you to retrieve all of your IP addresses that are currently warming up.** + +Twilio SendGrid can automatically warm up dedicated IP addresses by limiting the amount of mail that can be sent through them per hour, with the limit determined by how long the IP address has been in warmup. See the [warmup schedule](https://sendgrid.com/docs/API_Reference/Web_API_v3/IP_Management/ip_warmup_schedule.html) for more details on how Twilio SendGrid limits your email traffic for IPs in warmup. + +For more general information about warming up IPs, please see our [Classroom](https://sendgrid.com/docs/Classroom/Deliver/Delivery_Introduction/warming_up_ips.html). + +### GET /ips/warmup + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/warmup", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve warmup status for a specific IP address + +**This endpoint allows you to retrieve the warmup status for a specific IP address.** + +Twilio SendGrid can automatically warm up dedicated IP addresses by limiting the amount of mail that can be sent through them per hour, with the limit determined by how long the IP address has been in warmup. See the [warmup schedule](https://sendgrid.com/docs/API_Reference/Web_API_v3/IP_Management/ip_warmup_schedule.html) for more details on how Twilio SendGrid limits your email traffic for IPs in warmup. + +For more general information about warming up IPs, please see our [Classroom](https://sendgrid.com/docs/Classroom/Deliver/Delivery_Introduction/warming_up_ips.html). + +### GET /ips/warmup/{ip_address} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/warmup/{ip_address}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Remove an IP from warmup + +**This endpoint allows you to remove an IP address from warmup mode.** + +Twilio SendGrid can automatically warm up dedicated IP addresses by limiting the amount of mail that can be sent through them per hour, with the limit determined by how long the IP address has been in warmup. See the [warmup schedule](https://sendgrid.com/docs/API_Reference/Web_API_v3/IP_Management/ip_warmup_schedule.html) for more details on how Twilio SendGrid limits your email traffic for IPs in warmup. + +For more general information about warming up IPs, please see our [Classroom](https://sendgrid.com/docs/Classroom/Deliver/Delivery_Introduction/warming_up_ips.html). + +### DELETE /ips/warmup/{ip_address} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/warmup/{ip_address}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all IP pools an IP address belongs to + +**This endpoint allows you to see which IP pools a particular IP address has been added to.** + +The same IP address can be added to multiple IP pools. + +A single IP address or a range of IP addresses may be dedicated to an account in order to send email for multiple domains. The reputation of this IP is based on the aggregate performance of all the senders who use it. + +### GET /ips/{ip_address} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/{ip_address}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# MAIL + +## Create a batch ID + +**This endpoint allows you to generate a new batch ID. This batch ID can be associated with scheduled sends via the mail/send endpoint.** + +If you set the SMTPAPI header `batch_id`, it allows you to then associate multiple scheduled mail/send requests together with the same ID. Then at any time up to 10 minutes before the scheduled date, you can cancel all of the mail/send requests that have this batch ID by calling the Cancel Scheduled Send endpoint. + +More Information: + +* [Scheduling Parameters > Batch ID](https://sendgrid.com/docs/API_Reference/SMTP_API/scheduling_parameters.html) + +### POST /mail/batch + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail/batch", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Validate batch ID + +**This endpoint allows you to validate a batch ID.** + +If you set the SMTPAPI header `batch_id`, it allows you to then associate multiple scheduled mail/send requests together with the same ID. Then at any time up to 10 minutes before the scheduled date, you can cancel all of the mail/send requests that have this batch ID by calling the Cancel Scheduled Send endpoint. + +More Information: + +* [Scheduling Parameters > Batch ID](https://sendgrid.com/docs/API_Reference/SMTP_API/scheduling_parameters.html) + +### GET /mail/batch/{batch_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail/batch/{batch_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## v3 Mail Send + +This endpoint allows you to send an email over Twilio SendGrid's v3 Web API, the most recent version of our API. If you are looking for documentation about the v2 Mail Send endpoint, please see our [v2 API Reference](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + +* Top level parameters are referred to as "global". +* Individual fields within the personalizations array will override any other global, or message level, parameters that are defined outside of personalizations. + +For an overview of the v3 Mail Send endpoint, please visit our [v3 API Reference](https://sendgrid.com/docs/API_Reference/Web_API_v3/Mail/index.html) + +For more detailed information about how to use the v3 Mail Send endpoint, please visit our [Classroom](https://sendgrid.com/docs/Classroom/Send/v3_Mail_Send/index.html). + +### POST /mail/send +This endpoint has a helper, check it out [here](helpers/mail/README.md). + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail/send", host) +request.Method = "POST" +request.Body = []byte(` { + "asm": { + "group_id": 1, + "groups_to_display": [ + 1, + 2, + 3 + ] + }, + "attachments": [ + { + "content": "[BASE64 encoded content block here]", + "content_id": "ii_139db99fdb5c3704", + "disposition": "inline", + "filename": "file1.jpg", + "name": "file1", + "type": "jpg" + } + ], + "batch_id": "[YOUR BATCH ID GOES HERE]", + "categories": [ + "category1", + "category2" + ], + "content": [ + { + "type": "text/html", + "value": "

Hello, world!

" + } + ], + "custom_args": { + "New Argument 1": "New Value 1", + "activationAttempt": "1", + "customerAccountNumber": "[CUSTOMER ACCOUNT NUMBER GOES HERE]" + }, + "from": { + "email": "sam.smith@example.com", + "name": "Sam Smith" + }, + "headers": {}, + "ip_pool_name": "[YOUR POOL NAME GOES HERE]", + "mail_settings": { + "bcc": { + "email": "ben.doe@example.com", + "enable": true + }, + "bypass_list_management": { + "enable": true + }, + "footer": { + "enable": true, + "html": "

Thanks
The Twilio SendGrid Team

", + "text": "Thanks,/n The Twilio SendGrid Team" + }, + "sandbox_mode": { + "enable": false + }, + "spam_check": { + "enable": true, + "post_to_url": "http://example.com/compliance", + "threshold": 3 + } + }, + "personalizations": [ + { + "bcc": [ + { + "email": "sam.doe@example.com", + "name": "Sam Doe" + } + ], + "cc": [ + { + "email": "jane.doe@example.com", + "name": "Jane Doe" + } + ], + "custom_args": { + "New Argument 1": "New Value 1", + "activationAttempt": "1", + "customerAccountNumber": "[CUSTOMER ACCOUNT NUMBER GOES HERE]" + }, + "headers": { + "X-Accept-Language": "en", + "X-Mailer": "MyApp" + }, + "send_at": 1409348513, + "subject": "Hello, World!", + "substitutions": { + "id": "substitutions", + "type": "object" + }, + "to": [ + { + "email": "john.doe@example.com", + "name": "John Doe" + } + ] + } + ], + "reply_to": { + "email": "sam.smith@example.com", + "name": "Sam Smith" + }, + "sections": { + "section": { + ":sectionName1": "section 1 text", + ":sectionName2": "section 2 text" + } + }, + "send_at": 1409348513, + "subject": "Hello, World!", + "template_id": "[YOUR TEMPLATE ID GOES HERE]", + "tracking_settings": { + "click_tracking": { + "enable": true, + "enable_text": true + }, + "ganalytics": { + "enable": true, + "utm_campaign": "[NAME OF YOUR REFERRER SOURCE]", + "utm_content": "[USE THIS SPACE TO DIFFERENTIATE YOUR EMAIL FROM ADS]", + "utm_medium": "[NAME OF YOUR MARKETING MEDIUM e.g. email]", + "utm_name": "[NAME OF YOUR CAMPAIGN]", + "utm_term": "[IDENTIFY PAID KEYWORDS HERE]" + }, + "open_tracking": { + "enable": true, + "substitution_tag": "%opentrack" + }, + "subscription_tracking": { + "enable": true, + "html": "If you would like to unsubscribe and stop receiving these emails <% clickhere %>.", + "substitution_tag": "<%click here%>", + "text": "If you would like to unsubscribe and stop receiving these emails <% click here %>." + } + } +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# MAIL SETTINGS + +## Retrieve all mail settings + +**This endpoint allows you to retrieve a list of all mail settings.** + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update address whitelist mail settings + +**This endpoint allows you to update your current email address whitelist settings.** + +The address whitelist setting whitelists a specified email address or domain for which mail should never be suppressed. For example, you own the domain example.com, and one or more of your recipients use email@example.com addresses, by placing example.com in the address whitelist setting, all bounces, blocks, and unsubscribes logged for that domain will be ignored and sent as if under normal sending conditions. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/address_whitelist + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/address_whitelist", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "list": [ + "email1@example.com", + "example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve address whitelist mail settings + +**This endpoint allows you to retrieve your current email address whitelist settings.** + +The address whitelist setting whitelists a specified email address or domain for which mail should never be suppressed. For example, you own the domain example.com, and one or more of your recipients use email@example.com addresses, by placing example.com in the address whitelist setting, all bounces, blocks, and unsubscribes logged for that domain will be ignored and sent as if under normal sending conditions. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/address_whitelist + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/address_whitelist", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update BCC mail settings + +**This endpoint allows you to update your current BCC mail settings.** + +When the BCC mail setting is enabled, Twilio SendGrid will automatically send a blind carbon copy (BCC) to an address for every email sent without adding that address to the header. Please note that only one email address may be entered in this field, if you wish to distribute BCCs to multiple addresses you will need to create a distribution group or use forwarding rules. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/bcc + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bcc", host) +request.Method = "PATCH" +request.Body = []byte(` { + "email": "email@example.com", + "enabled": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all BCC mail settings + +**This endpoint allows you to retrieve your current BCC mail settings.** + +When the BCC mail setting is enabled, Twilio SendGrid will automatically send a blind carbon copy (BCC) to an address for every email sent without adding that address to the header. Please note that only one email address may be entered in this field, if you wish to distribute BCCs to multiple addresses you will need to create a distribution group or use forwarding rules. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/bcc + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bcc", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update bounce purge mail settings + +**This endpoint allows you to update your current bounce purge settings.** + +This setting allows you to set a schedule for Twilio SendGrid to automatically delete contacts from your soft and hard bounce suppression lists. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/bounce_purge + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bounce_purge", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "hard_bounces": 5, + "soft_bounces": 5 +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve bounce purge mail settings + +**This endpoint allows you to retrieve your current bounce purge settings.** + +This setting allows you to set a schedule for Twilio SendGrid to automatically delete contacts from your soft and hard bounce suppression lists. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/bounce_purge + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bounce_purge", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update footer mail settings + +**This endpoint allows you to update your current Footer mail settings.** + +The footer setting will insert a custom footer at the bottom of the text and HTML bodies. Use the embedded HTML editor and plain text entry fields to create the content of the footers to be inserted into your emails. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/footer + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/footer", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "html_content": "...", + "plain_content": "..." +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve footer mail settings + +**This endpoint allows you to retrieve your current Footer mail settings.** + +The footer setting will insert a custom footer at the bottom of the text and HTML bodies. Use the embedded HTML editor and plain text entry fields to create the content of the footers to be inserted into your emails. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/footer + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/footer", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update forward bounce mail settings + +**This endpoint allows you to update your current bounce forwarding mail settings.** + +Activating this setting allows you to specify an email address to which bounce reports are forwarded. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/forward_bounce + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_bounce", host) +request.Method = "PATCH" +request.Body = []byte(` { + "email": "example@example.com", + "enabled": true +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve forward bounce mail settings + +**This endpoint allows you to retrieve your current bounce forwarding mail settings.** + +Activating this setting allows you to specify an email address to which bounce reports are forwarded. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/forward_bounce + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_bounce", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update forward spam mail settings + +**This endpoint allows you to update your current Forward Spam mail settings.** + +Enabling the forward spam setting allows you to specify an email address to which spam reports will be forwarded. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/forward_spam + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_spam", host) +request.Method = "PATCH" +request.Body = []byte(` { + "email": "", + "enabled": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve forward spam mail settings + +**This endpoint allows you to retrieve your current Forward Spam mail settings.** + +Enabling the forward spam setting allows you to specify an email address to which spam reports will be forwarded. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/forward_spam + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_spam", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update plain content mail settings + +**This endpoint allows you to update your current Plain Content mail settings.** + +The plain content setting will automatically convert any plain text emails that you send to HTML before sending. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/plain_content + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/plain_content", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve plain content mail settings + +**This endpoint allows you to retrieve your current Plain Content mail settings.** + +The plain content setting will automatically convert any plain text emails that you send to HTML before sending. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/plain_content + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/plain_content", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update spam check mail settings + +**This endpoint allows you to update your current spam checker mail settings.** + +The spam checker filter notifies you when emails are detected that exceed a predefined spam threshold. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/spam_check + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/spam_check", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "max_score": 5, + "url": "url" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve spam check mail settings + +**This endpoint allows you to retrieve your current Spam Checker mail settings.** + +The spam checker filter notifies you when emails are detected that exceed a predefined spam threshold. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/spam_check + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/spam_check", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update template mail settings + +**This endpoint allows you to update your current legacy email template settings.** + +This setting refers to our original email templates. We currently support more fully featured [transactional templates](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +The legacy email template setting wraps an HTML template around your email content. This can be useful for sending out marketing email and/or other HTML formatted messages. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/template + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/template", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "html_content": "<% body %>" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve legacy template mail settings + +**This endpoint allows you to retrieve your current legacy email template settings.** + +This setting refers to our original email templates. We currently support more fully featured [transactional templates](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +The legacy email template setting wraps an HTML template around your email content. This can be useful for sending out marketing email and/or other HTML formatted messages. + +Mail settings allow you to tell Twilio SendGrid specific things to do to every email that you send to your recipients over Twilio SendGrid's [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/template + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/template", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# MAILBOX PROVIDERS + +## Retrieve email statistics by mailbox provider. + +**This endpoint allows you to retrieve your email statistics segmented by recipient mailbox provider.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /mailbox_providers/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mailbox_providers/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["mailbox_providers"] = "test_string" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# PARTNER SETTINGS + +## Returns a list of all partner settings. + +**This endpoint allows you to retrieve a list of all partner settings that you can enable.** + +Our partner settings allow you to integrate your Twilio SendGrid account with our partners to increase your Twilio SendGrid experience and functionality. For more information about our partners, and how you can begin integrating with them, please visit our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/partners.html). + +### GET /partner_settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/partner_settings", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Updates New Relic partner settings. + +**This endpoint allows you to update or change your New Relic partner settings.** + +Our partner settings allow you to integrate your Twilio SendGrid account with our partners to increase your Twilio SendGrid experience and functionality. For more information about our partners, and how you can begin integrating with them, please visit our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/partners.html). + +By integrating with New Relic, you can send your Twilio SendGrid email statistics to your New Relic Dashboard. If you enable this setting, your stats will be sent to New Relic every 5 minutes. You will need your New Relic License Key to enable this setting. For more information, please see our [Classroom](https://sendgrid.com/docs/Classroom/Track/Collecting_Data/new_relic.html). + +### PATCH /partner_settings/new_relic + +```go +request := sendgrid.GetRequest(apiKey, "/v3/partner_settings/new_relic", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enable_subuser_statistics": true, + "enabled": true, + "license_key": "" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Returns all New Relic partner settings. + +**This endpoint allows you to retrieve your current New Relic partner settings.** + +Our partner settings allow you to integrate your Twilio SendGrid account with our partners to increase your Twilio SendGrid experience and functionality. For more information about our partners, and how you can begin integrating with them, please visit our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/partners.html). + +By integrating with New Relic, you can send your Twilio SendGrid email statistics to your New Relic Dashboard. If you enable this setting, your stats will be sent to New Relic every 5 minutes. You will need your New Relic License Key to enable this setting. For more information, please see our [Classroom](https://sendgrid.com/docs/Classroom/Track/Collecting_Data/new_relic.html). + +### GET /partner_settings/new_relic + +```go +request := sendgrid.GetRequest(apiKey, "/v3/partner_settings/new_relic", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# SCOPES + +## Retrieve a list of scopes for which this user has access. + +**This endpoint returns a list of all scopes that this user has access to.** + +API Keys can be used to authenticate the use of [Twilio SendGrid's v3 Web API](https://sendgrid.com/docs/API_Reference/Web_API_v3/index.html), or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). API Keys may be assigned certain permissions, or scopes, that limit which API endpoints they are able to access. For a more detailed explanation of how you can use API Key permissions, please visit our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/api_keys.html#-API-Key-Permissions) or [Classroom](https://sendgrid.com/docs/Classroom/Basics/API/api_key_permissions.html). + +### GET /scopes + +```go +request := sendgrid.GetRequest(apiKey, "/v3/scopes", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# SENDERS + +## Create a Sender Identity + +**This endpoint allows you to create a new sender identity.** + +*You may create up to 100 unique sender identities.* + +Sender Identities are required to be verified before use. If your domain has been authenticated, it will auto verify on creation. Otherwise, an email will be sent to the `from.email`. + +### POST /senders + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders", host) +request.Method = "POST" +request.Body = []byte(` { + "address": "123 Elm St.", + "address_2": "Apt. 456", + "city": "Denver", + "country": "United States", + "from": { + "email": "from@example.com", + "name": "Example INC" + }, + "nickname": "My Sender ID", + "reply_to": { + "email": "replyto@example.com", + "name": "Example INC" + }, + "state": "Colorado", + "zip": "80202" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Get all Sender Identities + +**This endpoint allows you to retrieve a list of all sender identities that have been created for your account.** + +Sender Identities are required to be verified before use. If your domain has been authenticated, it will auto verify on creation. Otherwise, an email will be sent to the `from.email`. + +### GET /senders + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a Sender Identity + +**This endpoint allows you to update a sender identity.** + +Updates to `from.email` require re-verification. If your domain has been authenticated, it will auto verify on creation. Otherwise, an email will be sent to the `from.email`. + +Partial updates are allowed, but fields that are marked as "required" in the POST (create) endpoint must not be nil if that field is included in the PATCH request. + +### PATCH /senders/{sender_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "address": "123 Elm St.", + "address_2": "Apt. 456", + "city": "Denver", + "country": "United States", + "from": { + "email": "from@example.com", + "name": "Example INC" + }, + "nickname": "My Sender ID", + "reply_to": { + "email": "replyto@example.com", + "name": "Example INC" + }, + "state": "Colorado", + "zip": "80202" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## View a Sender Identity + +**This endpoint allows you to retrieve a specific sender identity.** + +Sender Identities are required to be verified before use. If your domain has been authenticated, it will auto verify on creation. Otherwise, an email will be sent to the `from.email`. + +### GET /senders/{sender_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Sender Identity + +**This endpoint allows you to delete one of your sender identities.** + +Sender Identities are required to be verified before use. If your domain has been authenticated, it will auto verify on creation. Otherwise, an email will be sent to the `from.email`. + +### DELETE /senders/{sender_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Resend Sender Identity Verification + +**This endpoint allows you to resend a sender identity verification email.** + +Sender Identities are required to be verified before use. If your domain has been authenticated, it will auto verify on creation. Otherwise, an email will be sent to the `from.email`. + +### POST /senders/{sender_id}/resend_verification + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}/resend_verification", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# SENDER AUTHENTICATION + +## Create an authenticated domain. + +**This endpoint allows you to create a domain authentication for one of your domains.** + +If you are creating a domain authentication that you would like a subuser to use, you have two options: +1. Use the "username" parameter. This allows you to create am authenticated subuser. This means the subuser is able to see and modify the created authentication. +2. Use the Association workflow (see Associate Domain section). This allows you to assign a domain authentication created by the parent to a subuser. This means the subuser will default to the assigned domain authentication, but will not be able to see or modify that authentication. However, if the subuser creates their own domain authentication it will overwrite the assigned domain authentication. + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + +### POST /whitelabel/domains + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains", host) +request.Method = "POST" +request.Body = []byte(` { + "automatic_security": false, + "custom_spf": true, + "default": true, + "domain": "example.com", + "ips": [ + "192.168.1.1", + "192.168.1.2" + ], + "subdomain": "news", + "username": "john@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## List all domain authentications. + +**This endpoint allows you to retrieve a list of all domain authentications you have created.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + + +### GET /whitelabel/domains + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["username"] = "test_string" +queryParams["domain"] = "test_string" +queryParams["exclude_subusers"] = "true" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Get the default domain authentication. + +**This endpoint allows you to retrieve the default authentication for a domain.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| domain | string |The domain to find a default domain whitelabel for. | + +### GET /whitelabel/domains/default + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/default", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## List the domain authentication associated with the given user. + +**This endpoint allows you to retrieve all of the domain authentications that have been assigned to a specific subuser.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +Domain authentications can be associated with (i.e. assigned to) subusers from a parent account. This functionality allows subusers to send mail using their parent's authenticated domains. To associate a domain authentication with a subuser, the parent account must first create the domain authentication and validate it. The parent may then associate the domain authentication via the subuser management tools. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| username | string | Username of the subuser to find associated whitelabels for. | + +### GET /whitelabel/domains/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/subuser", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Disassociate a domain authentication from a given user. + +**This endpoint allows you to disassociate a specific domain authentication from a subuser.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +Domain authentications can be associated with (i.e. assigned to) subusers from a parent account. This functionality allows subusers to send mail using their parent's authenticated domains. To associate a domain authentication with a subuser, the parent account must first create the domain authentication and validate it. The parent may then associate the domain authentication via the subuser management tools. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + +## URI Parameters +| URI Parameter | Type | Required? | Description | +|---|---|---|---| +| username | string | required | Username for the subuser to find associated whitelabels for. | + +### DELETE /whitelabel/domains/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/subuser", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a domain authentication. + +**This endpoint allows you to update the settings for a domain authentication.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + +### PATCH /whitelabel/domains/{domain_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "custom_spf": true, + "default": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a domain authentication. + +**This endpoint allows you to retrieve a specific domain authentication.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + + +### GET /whitelabel/domains/{domain_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a domain authentication. + +**This endpoint allows you to delete a domain authentication.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + +### DELETE /whitelabel/domains/{domain_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Associate a domain authentication with a given user. + +**This endpoint allows you to associate a specific domain authentication with a subuser.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +Domain authentications can be associated with (i.e. assigned to) subusers from a parent account. This functionality allows subusers to send mail using their parent's authenticated domains. To associate a domain authentication with a subuser, the parent account must first create the domain authentication and validate it. The parent may then associate the domain authentication via the subuser management tools. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| domain_id | integer | ID of the domain whitelabel to associate with the subuser. | + +### POST /whitelabel/domains/{domain_id}/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}/subuser", host) +request.Method = "POST" +request.Body = []byte(` { + "username": "jane@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add an IP to a domain authentication. + +**This endpoint allows you to add an IP address to a domain authentication.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| id | integer | ID of the domain to which you are adding an IP | + +### POST /whitelabel/domains/{id}/ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{id}/ips", host) +request.Method = "POST" +request.Body = []byte(` { + "ip": "192.168.0.1" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Remove an IP from a domain authentication. + +**This endpoint allows you to remove a domain's IP address from that domain's authentication.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| id | integer | ID of the domain whitelabel to delete the IP from. | +| ip | string | IP to remove from the domain whitelabel. | + +### DELETE /whitelabel/domains/{id}/ips/{ip} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{id}/ips/{ip}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Validate a domain authentication. + +**This endpoint allows you to validate a domain authentication. If it fails, it will return an error message describing why the domain could not be validated.** + +A domain authentication allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Authenticating a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on domain authentication, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-domain-authentication/) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| id | integer |ID of the domain whitelabel to validate. | + +### POST /whitelabel/domains/{id}/validate + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{id}/validate", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create reverse DNS record + +**This endpoint allows you to create a reverse DNS record.** + +When creating a reverse DNS record, you should use the same subdomain that you used when you created a domain authentication. + +Reverse DNS consists of a subdomain and domain that will be used to generate a record for a given IP. Once Twilio SendGrid has verified that the appropriate A record for the IP has been created, the appropriate reverse DNS record for the IP is generated. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-reverse-dns/). + +### POST /whitelabel/ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips", host) +request.Method = "POST" +request.Body = []byte(` { + "domain": "example.com", + "ip": "192.168.1.1", + "subdomain": "email" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all reverse DNS records + +**This endpoint allows you to retrieve all of the reverse DNS records that have been created by this account.** + +You may include a search key by using the "ip" parameter. This enables you to perform a prefix search for a given IP segment (e.g. "192."). + +Reverse DNS consists of a subdomain and domain that will be used to generate a record for a given IP. Once Twilio SendGrid has verified that the appropriate A record for the IP has been created, the appropriate reverse DNS record for the IP is generated. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-reverse-dns/). + +### GET /whitelabel/ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["ip"] = "test_string" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve an reverse DNS record + +**This endpoint allows you to retrieve a reverse DNS record.** + +Reverse DNS consists of a subdomain and domain that will be used to generate a record for a given IP. Once Twilio SendGrid has verified that the appropriate A record for the IP has been created, the appropriate reverse DNS record for the IP is generated. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-reverse-dns/). + +### GET /whitelabel/ips/{id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips/{id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete an reverse DNS record + +**This endpoint allows you to delete a reverse DNS record.** + +Reverse DNS consists of a subdomain and domain that will be used to generate a record for a given IP. Once Twilio SendGrid has verified that the appropriate A record for the IP has been created, the appropriate reverse DNS record for the IP is generated. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-reverse-dns/). + +### DELETE /whitelabel/ips/{id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips/{id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Validate a reverse DNS + +**This endpoint allows you to validate a reverse DNS record.** + +Reverse DNS consists of a subdomain and domain that will be used to generate a record for a given IP. Once Twilio SendGrid has verified that the appropriate A record for the IP has been created, the appropriate reverse DNS record for the IP is generated. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-reverse-dns/). + +### POST /whitelabel/ips/{id}/validate + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips/{id}/validate", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create a Branded Link + +**This endpoint allows you to create a new link branding.** + +Email link branding allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-link-branding/). + +### POST /whitelabel/links + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links", host) +request.Method = "POST" +request.Body = []byte(` { + "default": true, + "domain": "example.com", + "subdomain": "mail" +}`) +queryParams := make(map[string]string) +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all link brandings + +**This endpoint allows you to retrieve all link brandings.** + +Email link branding allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-link-branding/). + +### GET /whitelabel/links + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Default Link Branding + +**This endpoint allows you to retrieve the default link branding.** + +Default link branding is the actual link branding to be used when sending messages. If there are multiple link brandings, the default is determined by the following order: +
    +
  • Validated link branding marked as "default"
  • +
  • Legacy link brands (migrated from the whitelabel wizard)
  • +
  • Default SendGrid link whitelabel (i.e. 100.ct.sendgrid.net)
  • +
+ +Email link branding allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-link-branding/). + +### GET /whitelabel/links/default + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/default", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["domain"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Associated Link Branding + +**This endpoint allows you to retrieve the associated link branding for a subuser.** + +Link branding can be associated with subusers from the parent account. This functionality allows +subusers to send mail using their parent's link brands. To associate a link branding, the parent account +must first create a branded link and validate it. The parent may then associate that branded link with a subuser via the API or the Subuser Management page in the user interface. + +Email link branding allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-link-branding/). + +### GET /whitelabel/links/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/subuser", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["username"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Disassociate a Link Branding + +**This endpoint allows you to disassociate a link branding from a subuser.** + +Link branding can be associated with subusers from the parent account. This functionality allows +subusers to send mail using their parent's link brands. To associate a link branding, the parent account +must first create a branded link and validate it. The parent may then associate that branded link with a subuser via the API or the Subuser Management page in the user interface. + +Email link branding allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-link-branding/). + +### DELETE /whitelabel/links/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/subuser", host) +request.Method = "DELETE" +queryParams := make(map[string]string) +queryParams["username"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a Link Branding + +**This endpoint allows you to update a specific link branding. You can use this endpoint to change a branded link's default status.** + +Email link branding allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-link-branding/). + +### PATCH /whitelabel/links/{id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "default": true +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Link Branding + +**This endpoint allows you to retrieve a specific link branding.** + +Email link branding allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-link-branding/). + +### GET /whitelabel/links/{id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Link Branding + +**This endpoint allows you to delete a link branding.** + +Email link branding allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-link-branding/). + +### DELETE /whitelabel/links/{id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Validate a Link Branding + +**This endpoint allows you to validate a link branding.** + +Email link branding allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-link-branding/). + +### POST /whitelabel/links/{id}/validate + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}/validate", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Associate a Link Branding + +**This endpoint allows you to associate a link branding with a subuser account.** + +Link branding can be associated with subusers from the parent account. This functionality allows +subusers to send mail using their parent's link brands. To associate a link branding, the parent account +must first create a branded link and validate it. The parent may then associate that branded link with a subuser via the API or the Subuser Management page in the user interface. + +Email link branding allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-link-branding/). + +### POST /whitelabel/links/{link_id}/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{link_id}/subuser", host) +request.Method = "POST" +request.Body = []byte(` { + "username": "jane@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# STATS + +## Retrieve global email statistics + +**This endpoint allows you to retrieve all of your global email statistics between a given date range.** + +Parent accounts will see aggregated stats for their account and all subuser accounts. Subuser accounts will only see their own stats. + +### GET /stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["end_date"] = "2016-04-01" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# SUBUSERS + +## Create Subuser + +This endpoint allows you to retrieve a list of all of your subusers. You can choose to retrieve specific subusers as well as limit the results that come back from the API. + +For more information about Subusers: + +* [User Guide > Subusers](https://sendgrid.com/docs/User_Guide/Settings/Subusers/index.html) +* [Classroom > How do I add more subusers to my account?](https://sendgrid.com/docs/Classroom/Basics/Account/how_do_i_add_more_subusers_to_my_account.html) + +### POST /subusers + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers", host) +request.Method = "POST" +request.Body = []byte(` { + "email": "John@example.com", + "ips": [ + "1.1.1.1", + "2.2.2.2" + ], + "password": "johns_password", + "username": "John@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## List all Subusers + +This endpoint allows you to retrieve a list of all of your subusers. You can choose to retrieve specific subusers as well as limit the results that come back from the API. + +For more information about Subusers: + +* [User Guide > Subusers](https://sendgrid.com/docs/User_Guide/Settings/Subusers/index.html) +* [Classroom > How do I add more subusers to my account?](https://sendgrid.com/docs/Classroom/Basics/Account/how_do_i_add_more_subusers_to_my_account.html) + +### GET /subusers + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["username"] = "test_string" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Subuser Reputations + +Subuser sender reputations give a good idea how well a sender is doing with regards to how recipients and recipient servers react to the mail that is being received. When a bounce, spam report, or other negative action happens on a sent email, it will effect your sender rating. + +This endpoint allows you to request the reputations for your subusers. + +### GET /subusers/reputations + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/reputations", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["usernames"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve email statistics for your subusers. + +**This endpoint allows you to retrieve the email statistics for the given subusers.** + +You may retrieve statistics for up to 10 different subusers by including an additional _subusers_ parameter for each additional subuser. + +While you can always view the statistics for all email activity on your account, subuser statistics enable you to view specific segments of your stats. Emails sent, bounces, and spam reports are always tracked for subusers. Unsubscribes, clicks, and opens are tracked if you have enabled the required settings. + +For more information, see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/subuser.html). + +### GET /subusers/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["subusers"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve monthly stats for all subusers + +**This endpoint allows you to retrieve the monthly email statistics for all subusers over the given date range.** + +While you can always view the statistics for all email activity on your account, subuser statistics enable you to view specific segments of your stats for your subusers. Emails sent, bounces, and spam reports are always tracked for subusers. Unsubscribes, clicks, and opens are tracked if you have enabled the required settings. + +When using the `sort_by_metric` to sort your stats by a specific metric, you can not sort by the following metrics: +`bounce_drops`, `deferred`, `invalid_emails`, `processed`, `spam_report_drops`, `spam_reports`, or `unsubscribe_drops`. + +For more information, see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/subuser.html). + +### GET /subusers/stats/monthly + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/stats/monthly", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["subuser"] = "test_string" +queryParams["limit"] = "1" +queryParams["sort_by_metric"] = "test_string" +queryParams["offset"] = "1" +queryParams["date"] = "test_string" +queryParams["sort_by_direction"] = "asc" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve the totals for each email statistic metric for all subusers. + +**This endpoint allows you to retrieve the total sums of each email statistic metric for all subusers over the given date range.** + + +While you can always view the statistics for all email activity on your account, subuser statistics enable you to view specific segments of your stats. Emails sent, bounces, and spam reports are always tracked for subusers. Unsubscribes, clicks, and opens are tracked if you have enabled the required settings. + +For more information, see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/subuser.html). + +### GET /subusers/stats/sums + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/stats/sums", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["sort_by_metric"] = "test_string" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["sort_by_direction"] = "asc" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Enable/disable a subuser + +This endpoint allows you to enable or disable a subuser. + +For more information about Subusers: + +* [User Guide > Subusers](https://sendgrid.com/docs/User_Guide/Settings/Subusers/index.html) +* [Classroom > How do I add more subusers to my account?](https://sendgrid.com/docs/Classroom/Basics/Account/how_do_i_add_more_subusers_to_my_account.html) + +### PATCH /subusers/{subuser_name} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "disabled": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a subuser + +This endpoint allows you to delete a subuser. This is a permanent action, once deleted a subuser cannot be retrieved. + +For more information about Subusers: + +* [User Guide > Subusers](https://sendgrid.com/docs/User_Guide/Settings/Subusers/index.html) +* [Classroom > How do I add more subusers to my account?](https://sendgrid.com/docs/Classroom/Basics/Account/how_do_i_add_more_subusers_to_my_account.html) + +### DELETE /subusers/{subuser_name} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update IPs assigned to a subuser + +Each subuser should be assigned to an IP address, from which all of this subuser's mail will be sent. Often, this is the same IP as the parent account, but each subuser can have their own, or multiple, IP addresses as well. + +More information: + +* [How to request more IPs](https://sendgrid.com/docs/Classroom/Basics/Account/adding_an_additional_dedicated_ip_to_your_account.html) +* [Setup Reverse DNS](https://sendgrid.com/docs/ui/account-and-settings/how-to-set-up-reverse-dns/) + +### PUT /subusers/{subuser_name}/ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/ips", host) +request.Method = "PUT" +request.Body = []byte(` [ + "127.0.0.1" +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Monitor Settings for a subuser + +Subuser monitor settings allow you to receive a sample of an outgoing message by a specific customer at a specific frequency of emails. + +### PUT /subusers/{subuser_name}/monitor + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/monitor", host) +request.Method = "PUT" +request.Body = []byte(` { + "email": "example@example.com", + "frequency": 500 +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create monitor settings + +Subuser monitor settings allow you to receive a sample of an outgoing message by a specific customer at a specific frequency of emails. + +### POST /subusers/{subuser_name}/monitor + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/monitor", host) +request.Method = "POST" +request.Body = []byte(` { + "email": "example@example.com", + "frequency": 50000 +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve monitor settings for a subuser + +Subuser monitor settings allow you to receive a sample of an outgoing message by a specific customer at a specific frequency of emails. + +### GET /subusers/{subuser_name}/monitor + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/monitor", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete monitor settings + +Subuser monitor settings allow you to receive a sample of an outgoing message by a specific customer at a specific frequency of emails. + +### DELETE /subusers/{subuser_name}/monitor + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/monitor", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve the monthly email statistics for a single subuser + +**This endpoint allows you to retrieve the monthly email statistics for a specific subuser.** + +While you can always view the statistics for all email activity on your account, subuser statistics enable you to view specific segments of your stats for your subusers. Emails sent, bounces, and spam reports are always tracked for subusers. Unsubscribes, clicks, and opens are tracked if you have enabled the required settings. + +When using the `sort_by_metric` to sort your stats by a specific metric, you can not sort by the following metrics: +`bounce_drops`, `deferred`, `invalid_emails`, `processed`, `spam_report_drops`, `spam_reports`, or `unsubscribe_drops`. + +For more information, see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/subuser.html). + +### GET /subusers/{subuser_name}/stats/monthly + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/stats/monthly", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["date"] = "test_string" +queryParams["sort_by_direction"] = "asc" +queryParams["limit"] = "1" +queryParams["sort_by_metric"] = "test_string" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# SUPPRESSION + +## Retrieve all blocks + +**This endpoint allows you to retrieve a list of all email addresses that are currently on your blocks list.** + +[Blocks](https://sendgrid.com/docs/Glossary/blocks.html) happen when your message was rejected for a reason related to the message, not the recipient address. This can happen when your mail server IP address has been added to a blacklist or blocked by an ISP, or if the message content is flagged by a filter on the receiving server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/blocks.html). + +### GET /suppression/blocks + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/blocks", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["start_time"] = "1" +queryParams["limit"] = "1" +queryParams["end_time"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete blocks + +**This endpoint allows you to delete all email addresses on your blocks list.** + +There are two options for deleting blocked emails: + +1. You can delete all blocked emails by setting `delete_all` to true in the request body. +2. You can delete some blocked emails by specifying the email addresses in an array in the request body. + +[Blocks](https://sendgrid.com/docs/Glossary/blocks.html) happen when your message was rejected for a reason related to the message, not the recipient address. This can happen when your mail server IP address has been added to a blacklist or blocked by an ISP, or if the message content is flagged by a filter on the receiving server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/blocks.html). + +### DELETE /suppression/blocks + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/blocks", host) +request.Method = "DELETE" +request.Body = []byte(` { + "delete_all": false, + "emails": [ + "example1@example.com", + "example2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific block + +**This endpoint allows you to retrieve a specific email address from your blocks list.** + +[Blocks](https://sendgrid.com/docs/Glossary/blocks.html) happen when your message was rejected for a reason related to the message, not the recipient address. This can happen when your mail server IP address has been added to a blacklist or blocked by an ISP, or if the message content is flagged by a filter on the receiving server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/blocks.html). + +### GET /suppression/blocks/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/blocks/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a specific block + +**This endpoint allows you to delete a specific email address from your blocks list.** + +[Blocks](https://sendgrid.com/docs/Glossary/blocks.html) happen when your message was rejected for a reason related to the message, not the recipient address. This can happen when your mail server IP address has been added to a blacklist or blocked by an ISP, or if the message content is flagged by a filter on the receiving server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/blocks.html). + +### DELETE /suppression/blocks/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/blocks/{email}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all bounces + +**This endpoint allows you to retrieve all of your bounces.** + +Bounces are messages that are returned to the server that sent it. + +For more information see: + +* [User Guide > Bounces](https://sendgrid.com/docs/User_Guide/Suppressions/bounces.html) for more information +* [Glossary > Bounces](https://sendgrid.com/docs/Glossary/Bounces.html) + +### GET /suppression/bounces + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/bounces", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["start_time"] = "1" +queryParams["end_time"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete bounces + +**This endpoint allows you to delete all of your bounces. You can also use this endpoint to remove a specific email address from your bounce list.** + +Bounces are messages that are returned to the server that sent it. + +For more information see: + +* [User Guide > Bounces](https://sendgrid.com/docs/User_Guide/Suppressions/bounces.html) for more information +* [Glossary > Bounces](https://sendgrid.com/docs/Glossary/Bounces.html) +* [Classroom > List Scrubbing Guide](https://sendgrid.com/docs/Classroom/Deliver/list_scrubbing.html) + +Note: the `delete_all` and `emails` parameters should be used independently of each other as they have different purposes. + +### DELETE /suppression/bounces + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/bounces", host) +request.Method = "DELETE" +request.Body = []byte(` { + "delete_all": true, + "emails": [ + "example@example.com", + "example2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Bounce + +**This endpoint allows you to retrieve a specific bounce for a given email address.** + +Bounces are messages that are returned to the server that sent it. + +For more information see: + +* [User Guide > Bounces](https://sendgrid.com/docs/User_Guide/Suppressions/bounces.html) for more information +* [Glossary > Bounces](https://sendgrid.com/docs/Glossary/Bounces.html) +* [Classroom > List Scrubbing Guide](https://sendgrid.com/docs/Classroom/Deliver/list_scrubbing.html) + +### GET /suppression/bounces/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/bounces/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a bounce + +**This endpoint allows you to remove an email address from your bounce list.** + +Bounces are messages that are returned to the server that sent it. This endpoint allows you to delete a single email addresses from your bounce list. + +For more information see: + +* [User Guide > Bounces](https://sendgrid.com/docs/User_Guide/Suppressions/bounces.html) for more information +* [Glossary > Bounces](https://sendgrid.com/docs/Glossary/Bounces.html) +* [Classroom > List Scrubbing Guide](https://sendgrid.com/docs/Classroom/Deliver/list_scrubbing.html) + +### DELETE /suppression/bounces/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/bounces/{email}", host) +request.Method = "DELETE" +queryParams := make(map[string]string) +queryParams["email_address"] = "example@example.com" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all invalid emails + +**This endpoint allows you to retrieve a list of all invalid email addresses.** + +An invalid email occurs when you attempt to send email to an address that is formatted in a manner that does not meet internet email format standards or the email does not exist at the recipient's mail server. + +Examples include addresses without the @ sign or addresses that include certain special characters and/or spaces. This response can come from our own server or the recipient mail server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/invalid_emails.html). + +### GET /suppression/invalid_emails + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/invalid_emails", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["start_time"] = "1" +queryParams["limit"] = "1" +queryParams["end_time"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete invalid emails + +**This endpoint allows you to remove email addresses from your invalid email address list.** + +There are two options for deleting invalid email addresses: + +1) You can delete all invalid email addresses by setting `delete_all` to true in the request body. +2) You can delete some invalid email addresses by specifying certain addresses in an array in the request body. + +An invalid email occurs when you attempt to send email to an address that is formatted in a manner that does not meet internet email format standards or the email does not exist at the recipient's mail server. + +Examples include addresses without the @ sign or addresses that include certain special characters and/or spaces. This response can come from our own server or the recipient mail server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/invalid_emails.html). + +### DELETE /suppression/invalid_emails + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/invalid_emails", host) +request.Method = "DELETE" +request.Body = []byte(` { + "delete_all": false, + "emails": [ + "example1@example.com", + "example2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific invalid email + +**This endpoint allows you to retrieve a specific invalid email addresses.** + +An invalid email occurs when you attempt to send email to an address that is formatted in a manner that does not meet internet email format standards or the email does not exist at the recipient's mail server. + +Examples include addresses without the @ sign or addresses that include certain special characters and/or spaces. This response can come from our own server or the recipient mail server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/invalid_emails.html). + +### GET /suppression/invalid_emails/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/invalid_emails/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a specific invalid email + +**This endpoint allows you to remove a specific email address from the invalid email address list.** + +An invalid email occurs when you attempt to send email to an address that is formatted in a manner that does not meet internet email format standards or the email does not exist at the recipient's mail server. + +Examples include addresses without the @ sign or addresses that include certain special characters and/or spaces. This response can come from our own server or the recipient mail server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/invalid_emails.html). + +### DELETE /suppression/invalid_emails/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/invalid_emails/{email}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific spam report + +**This endpoint allows you to retrieve a specific spam report.** + +[Spam reports](https://sendgrid.com/docs/Glossary/spam_reports.html) happen when a recipient indicates that they think your email is [spam](https://sendgrid.com/docs/Glossary/spam.html) and then their email provider reports this to Twilio SendGrid. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/spam_reports.html). + +### GET /suppression/spam_report/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/spam_report/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a specific spam report + +**This endpoint allows you to delete a specific spam report.** + +[Spam reports](https://sendgrid.com/docs/Glossary/spam_reports.html) happen when a recipient indicates that they think your email is [spam](https://sendgrid.com/docs/Glossary/spam.html) and then their email provider reports this to Twilio SendGrid. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/spam_reports.html). + +### DELETE /suppression/spam_report/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/spam_report/{email}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all spam reports + +**This endpoint allows you to retrieve all spam reports.** + +[Spam reports](https://sendgrid.com/docs/Glossary/spam_reports.html) happen when a recipient indicates that they think your email is [spam](https://sendgrid.com/docs/Glossary/spam.html) and then their email provider reports this to Twilio SendGrid. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/spam_reports.html). + +### GET /suppression/spam_reports + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/spam_reports", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["start_time"] = "1" +queryParams["limit"] = "1" +queryParams["end_time"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete spam reports + +**This endpoint allows you to delete your spam reports.** + +There are two options for deleting spam reports: + +1) You can delete all spam reports by setting "delete_all" to true in the request body. +2) You can delete some spam reports by specifying the email addresses in an array in the request body. + +[Spam reports](https://sendgrid.com/docs/Glossary/spam_reports.html) happen when a recipient indicates that they think your email is [spam](https://sendgrid.com/docs/Glossary/spam.html) and then their email provider reports this to Twilio SendGrid. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/spam_reports.html). + +### DELETE /suppression/spam_reports + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/spam_reports", host) +request.Method = "DELETE" +request.Body = []byte(` { + "delete_all": false, + "emails": [ + "example1@example.com", + "example2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all global suppressions + +**This endpoint allows you to retrieve a list of all email addresses that are globally suppressed.** + +A global suppression (or global unsubscribe) is an email address of a recipient who does not want to receive any of your messages. A globally suppressed recipient will be removed from any email you send. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/global_unsubscribes.html). + +### GET /suppression/unsubscribes + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/unsubscribes", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["start_time"] = "1" +queryParams["limit"] = "1" +queryParams["end_time"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# TEMPLATES + +## Create a transactional template. + +**This endpoint allows you to create a transactional template.** + +Each user can create up to 300 different transactional templates. Transactional templates are specific to accounts and subusers. Templates created on a parent account will not be accessible from the subuser accounts. + +Transactional templates are templates created specifically for transactional email and are not to be confused with [Marketing Campaigns templates](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/templates.html). For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +### POST /templates + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates", host) +request.Method = "POST" +request.Body = []byte(` { + "name": "example_name" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all transactional templates (legacy & dynamic). + +**This endpoint allows you to retrieve all transactional templates.** + +Each user can create up to 300 different transactional templates. Transactional templates are specific to accounts and subusers. Templates created on a parent account will not be accessible from the subuser accounts. + +Transactional templates are templates created specifically for transactional email and are not to be confused with [Marketing Campaigns templates](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/templates.html). For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +### GET /templates + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["generations"] = "legacy,dynamic" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Edit a transactional template. + +**This endpoint allows you to edit a transactional template.** + +Each user can create up to 300 different transactional templates. Transactional templates are specific to accounts and subusers. Templates created on a parent account will not be accessible from the subuser accounts. + +Transactional templates are templates created specifically for transactional email and are not to be confused with [Marketing Campaigns templates](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/templates.html). For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + + +### PATCH /templates/{template_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "name": "new_example_name" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a single transactional template. + +**This endpoint allows you to retrieve a single transactional template.** + +Each user can create up to 300 different transactional templates. Transactional templates are specific to accounts and subusers. Templates created on a parent account will not be accessible from the subuser accounts. + +Transactional templates are templates created specifically for transactional email and are not to be confused with [Marketing Campaigns templates](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/templates.html). For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + + +### GET /templates/{template_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a template. + +**This endpoint allows you to delete a transactional template.** + +Each user can create up to 300 different transactional templates. Transactional templates are specific to accounts and subusers. Templates created on a parent account will not be accessible from the subuser accounts. + +Transactional templates are templates created specifically for transactional email and are not to be confused with [Marketing Campaigns templates](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/templates.html). For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + + +### DELETE /templates/{template_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create a new transactional template version. + +**This endpoint allows you to create a new version of a template.** + +Each transactional template can have multiple versions, each version with its own subject and content. Each user can have up to 300 versions across all templates. + +For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + + +### POST /templates/{template_id}/versions + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}/versions", host) +request.Method = "POST" +request.Body = []byte(` { + "active": 1, + "html_content": "<%body%>", + "name": "example_version_name", + "plain_content": "<%body%>", + "subject": "<%subject%>", + "template_id": "ddb96bbc-9b92-425e-8979-99464621b543" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Edit a transactional template version. + +**This endpoint allows you to edit a version of one of your transactional templates.** + +Each transactional template can have multiple versions, each version with its own subject and content. Each user can have up to 300 versions across all templates. + +For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| template_id | string | The ID of the original template | +| version_id | string | The ID of the template version | + +### PATCH /templates/{template_id}/versions/{version_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}/versions/{version_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "active": 1, + "html_content": "<%body%>", + "name": "updated_example_name", + "plain_content": "<%body%>", + "subject": "<%subject%>" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific transactional template version. + +**This endpoint allows you to retrieve a specific version of a template.** + +Each transactional template can have multiple versions, each version with its own subject and content. Each user can have up to 300 versions across across all templates. + +For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| template_id | string | The ID of the original template | +| version_id | string | The ID of the template version | + +### GET /templates/{template_id}/versions/{version_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}/versions/{version_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a transactional template version. + +**This endpoint allows you to delete one of your transactional template versions.** + +Each transactional template can have multiple versions, each version with its own subject and content. Each user can have up to 300 versions across across all templates. + +For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| template_id | string | The ID of the original template | +| version_id | string | The ID of the template version | + +### DELETE /templates/{template_id}/versions/{version_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}/versions/{version_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Activate a transactional template version. + +**This endpoint allows you to activate a version of one of your templates.** + +Each transactional template can have multiple versions, each version with its own subject and content. Each user can have up to 300 versions across across all templates. + + +For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| template_id | string | The ID of the original template | +| version_id | string | The ID of the template version | + +### POST /templates/{template_id}/versions/{version_id}/activate + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}/versions/{version_id}/activate", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# TRACKING SETTINGS + +## Retrieve Tracking Settings + +**This endpoint allows you to retrieve a list of all tracking settings that you can enable on your account.** + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### GET /tracking_settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Click Tracking Settings + +**This endpoint allows you to change your current click tracking setting. You can enable, or disable, click tracking using this endpoint.** + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### PATCH /tracking_settings/click + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/click", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Click Track Settings + +**This endpoint allows you to retrieve your current click tracking setting.** + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### GET /tracking_settings/click + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/click", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Google Analytics Settings + +**This endpoint allows you to update your current setting for Google Analytics.** + +For more information about using Google Analytics, please refer to [Googles URL Builder](https://support.google.com/analytics/answer/1033867?hl=en) and their article on ["Best Practices for Campaign Building"](https://support.google.com/analytics/answer/1037445). + +We default the settings to Googles recommendations. For more information, see [Google Analytics Demystified](https://sendgrid.com/docs/Classroom/Track/Collecting_Data/google_analytics_demystified_ga_statistics_vs_sg_statistics.html). + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### PATCH /tracking_settings/google_analytics + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/google_analytics", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "utm_campaign": "website", + "utm_content": "", + "utm_medium": "email", + "utm_source": "sendgrid.com", + "utm_term": "" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Google Analytics Settings + +**This endpoint allows you to retrieve your current setting for Google Analytics.** + +For more information about using Google Analytics, please refer to [Googles URL Builder](https://support.google.com/analytics/answer/1033867?hl=en) and their article on ["Best Practices for Campaign Building"](https://support.google.com/analytics/answer/1037445). + +We default the settings to Googles recommendations. For more information, see [Google Analytics Demystified](https://sendgrid.com/docs/Classroom/Track/Collecting_Data/google_analytics_demystified_ga_statistics_vs_sg_statistics.html). + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### GET /tracking_settings/google_analytics + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/google_analytics", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Open Tracking Settings + +**This endpoint allows you to update your current settings for open tracking.** + +Open Tracking adds an invisible image at the end of the email which can track email opens. If the email recipient has images enabled on their email client, a request to Twilio SendGrid's server for the invisible image is executed and an open event is logged. These events are logged in the Statistics portal, Email Activity interface, and are reported by the Event Webhook. + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### PATCH /tracking_settings/open + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/open", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Get Open Tracking Settings + +**This endpoint allows you to retrieve your current settings for open tracking.** + +Open Tracking adds an invisible image at the end of the email which can track email opens. If the email recipient has images enabled on their email client, a request to Twilio SendGrid's server for the invisible image is executed and an open event is logged. These events are logged in the Statistics portal, Email Activity interface, and are reported by the Event Webhook. + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### GET /tracking_settings/open + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/open", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Subscription Tracking Settings + +**This endpoint allows you to update your current settings for subscription tracking.** + +Subscription tracking adds links to the bottom of your emails that allows your recipients to subscribe to, or unsubscribe from, your emails. + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### PATCH /tracking_settings/subscription + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/subscription", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "html_content": "html content", + "landing": "landing page html", + "plain_content": "text content", + "replace": "replacement tag", + "url": "url" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Subscription Tracking Settings + +**This endpoint allows you to retrieve your current settings for subscription tracking.** + +Subscription tracking adds links to the bottom of your emails that allows your recipients to subscribe to, or unsubscribe from, your emails. + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### GET /tracking_settings/subscription + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/subscription", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# On-Behalf of Subuser + +The on-behalf-of header allows you to make calls for a particular subuser through the parent account; this can be useful for automating bulk updates or administering a subuser without changing authentication in your code. +## With Mail Helper Class + +```go + +client := sendgrid.NewSendClientSubuser(os.Getenv("SENDGRID_API_KEY"), "SUBUSER_USERNAME") + +``` + +## Without Mail Helper Class + +```go + +request := sendgrid.GetRequestSubuser(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com", "SUBUSER_USERNAME") + +``` + + +# USER + +## Get a user's account information. + +**This endpoint allows you to retrieve your user account details.** + +Your user's account information includes the user's account type and reputation. + +Keeping your user profile up to date is important. This will help Twilio SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [Twilio SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### GET /user/account + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/account", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve your credit balance + +**This endpoint allows you to retrieve the current credit balance for your account.** + +Your monthly credit allotment limits the number of emails you may send before incurring overage charges. For more information about credits and billing, please visit our [Classroom](https://sendgrid.com/docs/Classroom/Basics/Billing/billing_info_and_faqs.html). + +### GET /user/credits + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/credits", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update your account email address + +**This endpoint allows you to update the email address currently on file for your account.** + +Keeping your user profile up to date is important. This will help Twilio SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [Twilio SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### PUT /user/email + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/email", host) +request.Method = "PUT" +request.Body = []byte(` { + "email": "example@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve your account email address + +**This endpoint allows you to retrieve the email address currently on file for your account.** + +Keeping your user profile up to date is important. This will help Twilio SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [Twilio SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### GET /user/email + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/email", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update your password + +**This endpoint allows you to update your password.** + +Keeping your user profile up to date is important. This will help Twilio SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [Twilio SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### PUT /user/password + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/password", host) +request.Method = "PUT" +request.Body = []byte(` { + "new_password": "new_password", + "old_password": "old_password" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a user's profile + +**This endpoint allows you to update your current profile details.** + +Keeping your user profile up to date is important. This will help Twilio SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [Twilio SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +It should be noted that any one or more of the parameters can be updated via the PATCH /user/profile endpoint. The only requirement is that you include at least one when you PATCH. + +### PATCH /user/profile + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/profile", host) +request.Method = "PATCH" +request.Body = []byte(` { + "city": "Orange", + "first_name": "Example", + "last_name": "User" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Get a user's profile + +Keeping your user profile up to date is important. This will help Twilio SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [Twilio SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### GET /user/profile + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/profile", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Cancel or pause a scheduled send + +**This endpoint allows you to cancel or pause an email that has been scheduled to be sent.** + +If the maximum number of cancellations/pauses are added, HTTP 400 will +be returned. + +The Cancel Scheduled Sends feature allows the customer to cancel a scheduled send based on a Batch ID included in the SMTPAPI header. Scheduled sends canceled less than 10 minutes before the scheduled time are not guaranteed to be canceled. + +### POST /user/scheduled_sends + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends", host) +request.Method = "POST" +request.Body = []byte(` { + "batch_id": "YOUR_BATCH_ID", + "status": "pause" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all scheduled sends + +**This endpoint allows you to retrieve all cancel/paused scheduled send information.** + +The Cancel Scheduled Sends feature allows the customer to cancel a scheduled send based on a Batch ID included in the SMTPAPI header. Scheduled sends canceled less than 10 minutes before the scheduled time are not guaranteed to be canceled. + +### GET /user/scheduled_sends + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update user scheduled send information + +**This endpoint allows you to update the status of a scheduled send for the given `batch_id`.** + +The Cancel Scheduled Sends feature allows the customer to cancel a scheduled send based on a Batch ID included in the SMTPAPI header. Scheduled sends canceled less than 10 minutes before the scheduled time are not guaranteed to be canceled. + +### PATCH /user/scheduled_sends/{batch_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends/{batch_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "status": "pause" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve scheduled send + +**This endpoint allows you to retrieve the cancel/paused scheduled send information for a specific `batch_id`.** + +The Cancel Scheduled Sends feature allows the customer to cancel a scheduled send based on a Batch ID included in the SMTPAPI header. Scheduled sends canceled less than 10 minutes before the scheduled time are not guaranteed to be canceled. + +### GET /user/scheduled_sends/{batch_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends/{batch_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a cancellation or pause of a scheduled send + +**This endpoint allows you to delete the cancellation/pause of a scheduled send.** + +The Cancel Scheduled Sends feature allows the customer to cancel a scheduled send based on a Batch ID included in the SMTPAPI header. Scheduled sends canceled less than 10 minutes before the scheduled time are not guaranteed to be canceled. + +### DELETE /user/scheduled_sends/{batch_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends/{batch_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Enforced TLS settings + +**This endpoint allows you to update your current Enforced TLS settings.** + +The Enforced TLS settings specify whether or not the recipient is required to support TLS or have a valid certificate. See the [SMTP Ports User Guide](https://sendgrid.com/docs/Classroom/Basics/Email_Infrastructure/smtp_ports.html) for more information on opportunistic TLS. + +**Note:** If either setting is enabled and the recipient does not support TLS or have a valid certificate, we drop the message and send a block event with TLS required but not supported as the description. + +### PATCH /user/settings/enforced_tls + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/settings/enforced_tls", host) +request.Method = "PATCH" +request.Body = []byte(` { + "require_tls": true, + "require_valid_cert": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve current Enforced TLS settings. + +**This endpoint allows you to retrieve your current Enforced TLS settings.** + +The Enforced TLS settings specify whether or not the recipient is required to support TLS or have a valid certificate. See the [SMTP Ports User Guide](https://sendgrid.com/docs/Classroom/Basics/Email_Infrastructure/smtp_ports.html) for more information on opportunistic TLS. + +**Note:** If either setting is enabled and the recipient does not support TLS or have a valid certificate, we drop the message and send a block event with TLS required but not supported as the description. + +### GET /user/settings/enforced_tls + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/settings/enforced_tls", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update your username + +**This endpoint allows you to update the username for your account.** + +Keeping your user profile up to date is important. This will help Twilio SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [Twilio SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### PUT /user/username + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/username", host) +request.Method = "PUT" +request.Body = []byte(` { + "username": "test_username" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve your username + +**This endpoint allows you to retrieve your current account username.** + +Keeping your user profile up to date is important. This will help Twilio SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [Twilio SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### GET /user/username + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/username", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Event Notification Settings + +**This endpoint allows you to update your current event webhook settings.** + +If an event type is marked as `true`, then the event webhook will include information about that event. + +Twilio SendGrid's Event Webhook will notify a URL of your choice via HTTP POST with information about events that occur as Twilio SendGrid processes your email. + +Common uses of this data are to remove unsubscribes, react to spam reports, determine unengaged recipients, identify bounced email addresses, or create advanced analytics of your email program. + +### PATCH /user/webhooks/event/settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/event/settings", host) +request.Method = "PATCH" +request.Body = []byte(` { + "bounce": true, + "click": true, + "deferred": true, + "delivered": true, + "dropped": true, + "enabled": true, + "group_resubscribe": true, + "group_unsubscribe": true, + "open": true, + "processed": true, + "spam_report": true, + "unsubscribe": true, + "url": "url" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Event Webhook settings + +**This endpoint allows you to retrieve your current event webhook settings.** + +If an event type is marked as `true`, then the event webhook will include information about that event. + +Twilio SendGrid's Event Webhook will notify a URL of your choice via HTTP POST with information about events that occur as Twilio SendGrid processes your email. + +Common uses of this data are to remove unsubscribes, react to spam reports, determine unengaged recipients, identify bounced email addresses, or create advanced analytics of your email program. + +### GET /user/webhooks/event/settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/event/settings", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Test Event Notification Settings + +**This endpoint allows you to test your event webhook by sending a fake event notification post to the provided URL.** + +Twilio SendGrid's Event Webhook will notify a URL of your choice via HTTP POST with information about events that occur as Twilio SendGrid processes your email. + +Common uses of this data are to remove unsubscribes, react to spam reports, determine unengaged recipients, identify bounced email addresses, or create advanced analytics of your email program. + +### POST /user/webhooks/event/test + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/event/test", host) +request.Method = "POST" +request.Body = []byte(` { + "url": "url" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create a parse setting + +**This endpoint allows you to create a new inbound parse setting.** + +The inbound parse webhook allows you to have incoming emails parsed, extracting some or all of the content, and then have that content POSTed by Twilio SendGrid to a URL of your choosing. For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Webhooks/parse.html). + +### POST /user/webhooks/parse/settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings", host) +request.Method = "POST" +request.Body = []byte(` { + "hostname": "myhostname.com", + "send_raw": false, + "spam_check": true, + "url": "http://email.myhosthame.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all parse settings + +**This endpoint allows you to retrieve all of your current inbound parse settings.** + +The inbound parse webhook allows you to have incoming emails parsed, extracting some or all of the content, and then have that content POSTed by Twilio SendGrid to a URL of your choosing. For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Webhooks/parse.html). + +### GET /user/webhooks/parse/settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a parse setting + +**This endpoint allows you to update a specific inbound parse setting.** + +The inbound parse webhook allows you to have incoming emails parsed, extracting some or all of the content, and then have that content POSTed by Twilio SendGrid to a URL of your choosing. For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Webhooks/parse.html). + +### PATCH /user/webhooks/parse/settings/{hostname} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings/{hostname}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "send_raw": true, + "spam_check": false, + "url": "http://newdomain.com/parse" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific parse setting + +**This endpoint allows you to retrieve a specific inbound parse setting.** + +The inbound parse webhook allows you to have incoming emails parsed, extracting some or all of the content, and then have that content POSTed by Twilio SendGrid to a URL of your choosing. For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Webhooks/parse.html). + +### GET /user/webhooks/parse/settings/{hostname} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings/{hostname}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a parse setting + +**This endpoint allows you to delete a specific inbound parse setting.** + +The inbound parse webhook allows you to have incoming emails parsed, extracting some or all of the content, and then have that content POSTed by Twilio SendGrid to a URL of your choosing. For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Webhooks/parse.html). + +### DELETE /user/webhooks/parse/settings/{hostname} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings/{hostname}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieves Inbound Parse Webhook statistics. + +**This endpoint allows you to retrieve the statistics for your Parse Webhook usage.** + +Twilio SendGrid's Inbound Parse Webhook allows you to parse the contents and attachments of incoming emails. The Parse API can then POST the parsed emails to a URL that you specify. The Inbound Parse Webhook cannot parse messages greater than 20MB in size, including all attachments. + +There are a number of pre-made integrations for the Twilio SendGrid Parse Webhook which make processing events easy. You can find these integrations in the [Library Index](https://sendgrid.com/docs/Integrate/libraries.html#-Webhook-Libraries). + +### GET /user/webhooks/parse/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "test_string" +queryParams["start_date"] = "2016-01-01" +queryParams["end_date"] = "2016-04-01" +queryParams["offset"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` diff --git a/vendor/github.com/sendgrid/sendgrid-go/base_interface.go b/vendor/github.com/sendgrid/sendgrid-go/base_interface.go new file mode 100644 index 0000000..6f60f6b --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/base_interface.go @@ -0,0 +1,130 @@ +package sendgrid + +import ( + "errors" + "net/http" + "strconv" + "time" + + "github.com/sendgrid/rest" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +// Version is this client library's current version +const ( + Version = "3.7.2" + rateLimitRetry = 5 + rateLimitSleep = 1100 +) + +type options struct { + Auth string + Endpoint string + Host string + Subuser string +} + +// Client is the Twilio SendGrid Go client +type Client struct { + rest.Request +} + +func (o *options) baseURL() string { + return o.Host + o.Endpoint +} + +// requestNew create Request +// @return [Request] a default request object +func requestNew(options options) rest.Request { + requestHeaders := map[string]string{ + "Authorization": options.Auth, + "User-Agent": "sendgrid/" + Version + ";go", + "Accept": "application/json", + } + + if len(options.Subuser) != 0 { + requestHeaders["On-Behalf-Of"] = options.Subuser + } + + return rest.Request{ + BaseURL: options.baseURL(), + Headers: requestHeaders, + } +} + +// Send sends an email through Twilio SendGrid +func (cl *Client) Send(email *mail.SGMailV3) (*rest.Response, error) { + cl.Body = mail.GetRequestBody(email) + return MakeRequest(cl.Request) +} + +// DefaultClient is used if no custom HTTP client is defined +var DefaultClient = rest.DefaultClient + +// API sets up the request to the Twilio SendGrid API, this is main interface. +// Please use the MakeRequest or MakeRequestAsync functions instead. +// (deprecated) +func API(request rest.Request) (*rest.Response, error) { + return MakeRequest(request) +} + +// MakeRequest attempts a Twilio SendGrid request synchronously. +func MakeRequest(request rest.Request) (*rest.Response, error) { + return DefaultClient.Send(request) +} + +// MakeRequestRetry a synchronous request, but retry in the event of a rate +// limited response. +func MakeRequestRetry(request rest.Request) (*rest.Response, error) { + retry := 0 + var response *rest.Response + var err error + + for { + response, err = MakeRequest(request) + if err != nil { + return nil, err + } + + if response.StatusCode != http.StatusTooManyRequests { + return response, nil + } + + if retry > rateLimitRetry { + return nil, errors.New("rate limit retry exceeded") + } + retry++ + + resetTime := time.Now().Add(rateLimitSleep * time.Millisecond) + + reset, ok := response.Headers["X-RateLimit-Reset"] + if ok && len(reset) > 0 { + t, err := strconv.Atoi(reset[0]) + if err == nil { + resetTime = time.Unix(int64(t), 0) + } + } + time.Sleep(resetTime.Sub(time.Now())) + } +} + +// MakeRequestAsync attempts a request asynchronously in a new go +// routine. This function returns two channels: responses +// and errors. This function will retry in the case of a +// rate limit. +func MakeRequestAsync(request rest.Request) (chan *rest.Response, chan error) { + r := make(chan *rest.Response) + e := make(chan error) + + go func() { + response, err := MakeRequestRetry(request) + if err != nil { + e <- err + } + if response != nil { + r <- response + } + }() + + return r, e +} diff --git a/vendor/github.com/sendgrid/sendgrid-go/go.coverage.sh b/vendor/github.com/sendgrid/sendgrid-go/go.coverage.sh new file mode 100644 index 0000000..fadb3cc --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/go.coverage.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo > coverage.txt + +for d in $(go list ./... | grep -v -E '/vendor|/examples|/docker'); do + go test -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/README.md b/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/README.md new file mode 100644 index 0000000..4cee382 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/README.md @@ -0,0 +1,31 @@ +**This helper allows you to quickly and easily build a Mail object for sending email through Twilio SendGrid.** + +## Dependencies + +- [rest](https://github.com/sendgrid/rest) + +# Quick Start + +Run the [example](../../examples/helpers/mail/example.go) (make sure you have set your environment variable to include your SENDGRID_API_KEY). + +```bash +go run examples/helpers/mail/example.go +``` + +## Usage + +- See the [example](../../examples/helpers/mail/example.go) for a complete working example. +- [Documentation](https://sendgrid.com/docs/API_Reference/Web_API_v3/Mail/index.html) + +## Test + +```bash +go test ./... -v +``` + +or + +```bash +cd helpers/mail +go test -v +``` diff --git a/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/mail_v3.go b/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/mail_v3.go new file mode 100644 index 0000000..16d7cd2 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/mail_v3.go @@ -0,0 +1,702 @@ +package mail + +import ( + "encoding/json" + "log" + "net/mail" +) + +// SGMailV3 contains mail struct +type SGMailV3 struct { + From *Email `json:"from,omitempty"` + Subject string `json:"subject,omitempty"` + Personalizations []*Personalization `json:"personalizations,omitempty"` + Content []*Content `json:"content,omitempty"` + Attachments []*Attachment `json:"attachments,omitempty"` + TemplateID string `json:"template_id,omitempty"` + Sections map[string]string `json:"sections,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + Categories []string `json:"categories,omitempty"` + CustomArgs map[string]string `json:"custom_args,omitempty"` + SendAt int `json:"send_at,omitempty"` + BatchID string `json:"batch_id,omitempty"` + Asm *Asm `json:"asm,omitempty"` + IPPoolID string `json:"ip_pool_name,omitempty"` + MailSettings *MailSettings `json:"mail_settings,omitempty"` + TrackingSettings *TrackingSettings `json:"tracking_settings,omitempty"` + ReplyTo *Email `json:"reply_to,omitempty"` +} + +// Personalization holds mail body struct +type Personalization struct { + To []*Email `json:"to,omitempty"` + From *Email `json:"from,omitempty"` + CC []*Email `json:"cc,omitempty"` + BCC []*Email `json:"bcc,omitempty"` + Subject string `json:"subject,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + Substitutions map[string]string `json:"substitutions,omitempty"` + CustomArgs map[string]string `json:"custom_args,omitempty"` + DynamicTemplateData map[string]interface{} `json:"dynamic_template_data,omitempty"` + Categories []string `json:"categories,omitempty"` + SendAt int `json:"send_at,omitempty"` +} + +// Email holds email name and address info +type Email struct { + Name string `json:"name,omitempty"` + Address string `json:"email,omitempty"` +} + +// Content defines content of the mail body +type Content struct { + Type string `json:"type,omitempty"` + Value string `json:"value,omitempty"` +} + +// Attachment holds attachement information +type Attachment struct { + Content string `json:"content,omitempty"` + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Filename string `json:"filename,omitempty"` + Disposition string `json:"disposition,omitempty"` + ContentID string `json:"content_id,omitempty"` +} + +// Asm contains Grpip Id and int array of groups ID +type Asm struct { + GroupID int `json:"group_id,omitempty"` + GroupsToDisplay []int `json:"groups_to_display,omitempty"` +} + +// MailSettings defines mail and spamCheck settings +type MailSettings struct { + BCC *BccSetting `json:"bcc,omitempty"` + BypassListManagement *Setting `json:"bypass_list_management,omitempty"` + Footer *FooterSetting `json:"footer,omitempty"` + SandboxMode *Setting `json:"sandbox_mode,omitempty"` + SpamCheckSetting *SpamCheckSetting `json:"spam_check,omitempty"` +} + +// TrackingSettings holds tracking settings and mail settings +type TrackingSettings struct { + ClickTracking *ClickTrackingSetting `json:"click_tracking,omitempty"` + OpenTracking *OpenTrackingSetting `json:"open_tracking,omitempty"` + SubscriptionTracking *SubscriptionTrackingSetting `json:"subscription_tracking,omitempty"` + GoogleAnalytics *GaSetting `json:"ganalytics,omitempty"` + BCC *BccSetting `json:"bcc,omitempty"` + BypassListManagement *Setting `json:"bypass_list_management,omitempty"` + Footer *FooterSetting `json:"footer,omitempty"` + SandboxMode *SandboxModeSetting `json:"sandbox_mode,omitempty"` +} + +// BccSetting holds email bcc setings to enable of disable +// default is false +type BccSetting struct { + Enable *bool `json:"enable,omitempty"` + Email string `json:"email,omitempty"` +} + +// FooterSetting holds enaable/disable settings +// and the format of footer i.e HTML/Text +type FooterSetting struct { + Enable *bool `json:"enable,omitempty"` + Text string `json:"text,omitempty"` + Html string `json:"html,omitempty"` +} + +// ClickTrackingSetting ... +type ClickTrackingSetting struct { + Enable *bool `json:"enable,omitempty"` + EnableText *bool `json:"enable_text,omitempty"` +} + +// OpenTrackingSetting ... +type OpenTrackingSetting struct { + Enable *bool `json:"enable,omitempty"` + SubstitutionTag string `json:"substitution_tag,omitempty"` +} + +// SandboxModeSetting ... +type SandboxModeSetting struct { + Enable *bool `json:"enable,omitempty"` + ForwardSpam *bool `json:"forward_spam,omitempty"` + SpamCheck *SpamCheckSetting `json:"spam_check,omitempty"` +} + +// SpamCheckSetting holds spam settings and +// which can be enable or disable and +// contains spamThreshold value +type SpamCheckSetting struct { + Enable *bool `json:"enable,omitempty"` + SpamThreshold int `json:"threshold,omitempty"` + PostToURL string `json:"post_to_url,omitempty"` +} + +// SubscriptionTrackingSetting ... +type SubscriptionTrackingSetting struct { + Enable *bool `json:"enable,omitempty"` + Text string `json:"text,omitempty"` + Html string `json:"html,omitempty"` + SubstitutionTag string `json:"substitution_tag,omitempty"` +} + +// GaSetting ... +type GaSetting struct { + Enable *bool `json:"enable,omitempty"` + CampaignSource string `json:"utm_source,omitempty"` + CampaignTerm string `json:"utm_term,omitempty"` + CampaignContent string `json:"utm_content,omitempty"` + CampaignName string `json:"utm_campaign,omitempty"` + CampaignMedium string `json:"utm_medium,omitempty"` +} + +// Setting enables the mail settings +type Setting struct { + Enable *bool `json:"enable,omitempty"` +} + +// NewV3Mail ... +func NewV3Mail() *SGMailV3 { + return &SGMailV3{ + Personalizations: make([]*Personalization, 0), + Content: make([]*Content, 0), + Attachments: make([]*Attachment, 0), + } +} + +// NewV3MailInit ... +func NewV3MailInit(from *Email, subject string, to *Email, content ...*Content) *SGMailV3 { + m := new(SGMailV3) + m.SetFrom(from) + m.Subject = subject + p := NewPersonalization() + p.AddTos(to) + m.AddPersonalizations(p) + m.AddContent(content...) + return m +} + +// GetRequestBody ... +func GetRequestBody(m *SGMailV3) []byte { + b, err := json.Marshal(m) + if err != nil { + log.Println(err) + } + return b +} + +// AddPersonalizations ... +func (s *SGMailV3) AddPersonalizations(p ...*Personalization) *SGMailV3 { + s.Personalizations = append(s.Personalizations, p...) + return s +} + +// AddContent ... +func (s *SGMailV3) AddContent(c ...*Content) *SGMailV3 { + s.Content = append(s.Content, c...) + return s +} + +// AddAttachment ... +func (s *SGMailV3) AddAttachment(a ...*Attachment) *SGMailV3 { + s.Attachments = append(s.Attachments, a...) + return s +} + +// SetFrom ... +func (s *SGMailV3) SetFrom(e *Email) *SGMailV3 { + s.From = e + return s +} + +// SetReplyTo ... +func (s *SGMailV3) SetReplyTo(e *Email) *SGMailV3 { + s.ReplyTo = e + return s +} + +// SetTemplateID ... +func (s *SGMailV3) SetTemplateID(templateID string) *SGMailV3 { + s.TemplateID = templateID + return s +} + +// AddSection ... +func (s *SGMailV3) AddSection(key string, value string) *SGMailV3 { + if s.Sections == nil { + s.Sections = make(map[string]string) + } + + s.Sections[key] = value + return s +} + +// SetHeader ... +func (s *SGMailV3) SetHeader(key string, value string) *SGMailV3 { + if s.Headers == nil { + s.Headers = make(map[string]string) + } + + s.Headers[key] = value + return s +} + +// AddCategories ... +func (s *SGMailV3) AddCategories(category ...string) *SGMailV3 { + s.Categories = append(s.Categories, category...) + return s +} + +// SetCustomArg ... +func (s *SGMailV3) SetCustomArg(key string, value string) *SGMailV3 { + if s.CustomArgs == nil { + s.CustomArgs = make(map[string]string) + } + + s.CustomArgs[key] = value + return s +} + +// SetSendAt ... +func (s *SGMailV3) SetSendAt(sendAt int) *SGMailV3 { + s.SendAt = sendAt + return s +} + +// SetBatchID ... +func (s *SGMailV3) SetBatchID(batchID string) *SGMailV3 { + s.BatchID = batchID + return s +} + +// SetASM ... +func (s *SGMailV3) SetASM(asm *Asm) *SGMailV3 { + s.Asm = asm + return s +} + +// SetIPPoolID ... +func (s *SGMailV3) SetIPPoolID(ipPoolID string) *SGMailV3 { + s.IPPoolID = ipPoolID + return s +} + +// SetMailSettings ... +func (s *SGMailV3) SetMailSettings(mailSettings *MailSettings) *SGMailV3 { + s.MailSettings = mailSettings + return s +} + +// SetTrackingSettings ... +func (s *SGMailV3) SetTrackingSettings(trackingSettings *TrackingSettings) *SGMailV3 { + s.TrackingSettings = trackingSettings + return s +} + +// NewPersonalization ... +func NewPersonalization() *Personalization { + return &Personalization{ + To: make([]*Email, 0), + CC: make([]*Email, 0), + BCC: make([]*Email, 0), + Headers: make(map[string]string), + Substitutions: make(map[string]string), + CustomArgs: make(map[string]string), + DynamicTemplateData: make(map[string]interface{}), + Categories: make([]string, 0), + } +} + +// AddTos ... +func (p *Personalization) AddTos(to ...*Email) { + p.To = append(p.To, to...) +} + +//AddFrom ... +func (p *Personalization) AddFrom(from *Email) { + p.From = from +} + +// AddCCs ... +func (p *Personalization) AddCCs(cc ...*Email) { + p.CC = append(p.CC, cc...) +} + +// AddBCCs ... +func (p *Personalization) AddBCCs(bcc ...*Email) { + p.BCC = append(p.BCC, bcc...) +} + +// SetHeader ... +func (p *Personalization) SetHeader(key string, value string) { + p.Headers[key] = value +} + +// SetSubstitution ... +func (p *Personalization) SetSubstitution(key string, value string) { + p.Substitutions[key] = value +} + +// SetCustomArg ... +func (p *Personalization) SetCustomArg(key string, value string) { + p.CustomArgs[key] = value +} + +// SetDynamicTemplateData ... +func (p *Personalization) SetDynamicTemplateData(key string, value interface{}) { + p.DynamicTemplateData[key] = value +} + +// SetSendAt ... +func (p *Personalization) SetSendAt(sendAt int) { + p.SendAt = sendAt +} + +// NewAttachment ... +func NewAttachment() *Attachment { + return &Attachment{} +} + +// SetContent ... +func (a *Attachment) SetContent(content string) *Attachment { + a.Content = content + return a +} + +// SetType ... +func (a *Attachment) SetType(contentType string) *Attachment { + a.Type = contentType + return a +} + +// SetFilename ... +func (a *Attachment) SetFilename(filename string) *Attachment { + a.Filename = filename + return a +} + +// SetDisposition ... +func (a *Attachment) SetDisposition(disposition string) *Attachment { + a.Disposition = disposition + return a +} + +// SetContentID ... +func (a *Attachment) SetContentID(contentID string) *Attachment { + a.ContentID = contentID + return a +} + +// NewASM ... +func NewASM() *Asm { + return &Asm{} +} + +// SetGroupID ... +func (a *Asm) SetGroupID(groupID int) *Asm { + a.GroupID = groupID + return a +} + +// AddGroupsToDisplay ... +func (a *Asm) AddGroupsToDisplay(groupsToDisplay ...int) *Asm { + a.GroupsToDisplay = append(a.GroupsToDisplay, groupsToDisplay...) + return a +} + +// NewMailSettings ... +func NewMailSettings() *MailSettings { + return &MailSettings{} +} + +// SetBCC ... +func (m *MailSettings) SetBCC(bcc *BccSetting) *MailSettings { + m.BCC = bcc + return m +} + +// SetBypassListManagement ... +func (m *MailSettings) SetBypassListManagement(bypassListManagement *Setting) *MailSettings { + m.BypassListManagement = bypassListManagement + return m +} + +// SetFooter ... +func (m *MailSettings) SetFooter(footerSetting *FooterSetting) *MailSettings { + m.Footer = footerSetting + return m +} + +// SetSandboxMode ... +func (m *MailSettings) SetSandboxMode(sandboxMode *Setting) *MailSettings { + m.SandboxMode = sandboxMode + return m +} + +// SetSpamCheckSettings ... +func (m *MailSettings) SetSpamCheckSettings(spamCheckSetting *SpamCheckSetting) *MailSettings { + m.SpamCheckSetting = spamCheckSetting + return m +} + +// NewTrackingSettings ... +func NewTrackingSettings() *TrackingSettings { + return &TrackingSettings{} +} + +// SetClickTracking ... +func (t *TrackingSettings) SetClickTracking(clickTracking *ClickTrackingSetting) *TrackingSettings { + t.ClickTracking = clickTracking + return t + +} + +// SetOpenTracking ... +func (t *TrackingSettings) SetOpenTracking(openTracking *OpenTrackingSetting) *TrackingSettings { + t.OpenTracking = openTracking + return t +} + +// SetSubscriptionTracking ... +func (t *TrackingSettings) SetSubscriptionTracking(subscriptionTracking *SubscriptionTrackingSetting) *TrackingSettings { + t.SubscriptionTracking = subscriptionTracking + return t +} + +// SetGoogleAnalytics ... +func (t *TrackingSettings) SetGoogleAnalytics(googleAnalytics *GaSetting) *TrackingSettings { + t.GoogleAnalytics = googleAnalytics + return t +} + +// NewBCCSetting ... +func NewBCCSetting() *BccSetting { + return &BccSetting{} +} + +// SetEnable ... +func (b *BccSetting) SetEnable(enable bool) *BccSetting { + setEnable := enable + b.Enable = &setEnable + return b +} + +// SetEmail ... +func (b *BccSetting) SetEmail(email string) *BccSetting { + b.Email = email + return b +} + +// NewFooterSetting ... +func NewFooterSetting() *FooterSetting { + return &FooterSetting{} +} + +// SetEnable ... +func (f *FooterSetting) SetEnable(enable bool) *FooterSetting { + setEnable := enable + f.Enable = &setEnable + return f +} + +// SetText ... +func (f *FooterSetting) SetText(text string) *FooterSetting { + f.Text = text + return f +} + +// SetHTML ... +func (f *FooterSetting) SetHTML(html string) *FooterSetting { + f.Html = html + return f +} + +// NewOpenTrackingSetting ... +func NewOpenTrackingSetting() *OpenTrackingSetting { + return &OpenTrackingSetting{} +} + +// SetEnable ... +func (o *OpenTrackingSetting) SetEnable(enable bool) *OpenTrackingSetting { + setEnable := enable + o.Enable = &setEnable + return o +} + +// SetSubstitutionTag ... +func (o *OpenTrackingSetting) SetSubstitutionTag(subTag string) *OpenTrackingSetting { + o.SubstitutionTag = subTag + return o +} + +// NewSubscriptionTrackingSetting ... +func NewSubscriptionTrackingSetting() *SubscriptionTrackingSetting { + return &SubscriptionTrackingSetting{} +} + +// SetEnable ... +func (s *SubscriptionTrackingSetting) SetEnable(enable bool) *SubscriptionTrackingSetting { + setEnable := enable + s.Enable = &setEnable + return s +} + +// SetText ... +func (s *SubscriptionTrackingSetting) SetText(text string) *SubscriptionTrackingSetting { + s.Text = text + return s +} + +// SetHTML ... +func (s *SubscriptionTrackingSetting) SetHTML(html string) *SubscriptionTrackingSetting { + s.Html = html + return s +} + +// SetSubstitutionTag ... +func (s *SubscriptionTrackingSetting) SetSubstitutionTag(subTag string) *SubscriptionTrackingSetting { + s.SubstitutionTag = subTag + return s +} + +// NewGaSetting ... +func NewGaSetting() *GaSetting { + return &GaSetting{} +} + +// SetEnable ... +func (g *GaSetting) SetEnable(enable bool) *GaSetting { + setEnable := enable + g.Enable = &setEnable + return g +} + +// SetCampaignSource ... +func (g *GaSetting) SetCampaignSource(campaignSource string) *GaSetting { + g.CampaignSource = campaignSource + return g +} + +// SetCampaignContent ... +func (g *GaSetting) SetCampaignContent(campaignContent string) *GaSetting { + g.CampaignContent = campaignContent + return g +} + +// SetCampaignTerm ... +func (g *GaSetting) SetCampaignTerm(campaignTerm string) *GaSetting { + g.CampaignTerm = campaignTerm + return g +} + +// SetCampaignName ... +func (g *GaSetting) SetCampaignName(campaignName string) *GaSetting { + g.CampaignName = campaignName + return g +} + +// SetCampaignMedium ... +func (g *GaSetting) SetCampaignMedium(campaignMedium string) *GaSetting { + g.CampaignMedium = campaignMedium + return g +} + +// NewSetting ... +func NewSetting(enable bool) *Setting { + setEnable := enable + return &Setting{Enable: &setEnable} +} + +// NewEmail ... +func NewEmail(name string, address string) *Email { + return &Email{ + Name: name, + Address: address, + } +} + +// NewSingleEmail ... +func NewSingleEmail(from *Email, subject string, to *Email, plainTextContent string, htmlContent string) *SGMailV3 { + var contents []*Content + if plainTextContent != "" { + contents = append(contents, NewContent("text/plain", plainTextContent)) + } + if htmlContent != "" { + contents = append(contents, NewContent("text/html", htmlContent)) + } + return NewV3MailInit(from, subject, to, contents...) +} + +// NewContent ... +func NewContent(contentType string, value string) *Content { + return &Content{ + Type: contentType, + Value: value, + } +} + +// NewClickTrackingSetting ... +func NewClickTrackingSetting() *ClickTrackingSetting { + return &ClickTrackingSetting{} +} + +// SetEnable ... +func (c *ClickTrackingSetting) SetEnable(enable bool) *ClickTrackingSetting { + setEnable := enable + c.Enable = &setEnable + return c +} + +// SetEnableText ... +func (c *ClickTrackingSetting) SetEnableText(enableText bool) *ClickTrackingSetting { + setEnable := enableText + c.EnableText = &setEnable + return c +} + +// NewSpamCheckSetting ... +func NewSpamCheckSetting() *SpamCheckSetting { + return &SpamCheckSetting{} +} + +// SetEnable ... +func (s *SpamCheckSetting) SetEnable(enable bool) *SpamCheckSetting { + setEnable := enable + s.Enable = &setEnable + return s +} + +// SetSpamThreshold ... +func (s *SpamCheckSetting) SetSpamThreshold(spamThreshold int) *SpamCheckSetting { + s.SpamThreshold = spamThreshold + return s +} + +// SetPostToURL ... +func (s *SpamCheckSetting) SetPostToURL(postToURL string) *SpamCheckSetting { + s.PostToURL = postToURL + return s +} + +// NewSandboxModeSetting ... +func NewSandboxModeSetting(enable bool, forwardSpam bool, spamCheck *SpamCheckSetting) *SandboxModeSetting { + setEnable := enable + setForwardSpam := forwardSpam + return &SandboxModeSetting{ + Enable: &setEnable, + ForwardSpam: &setForwardSpam, + SpamCheck: spamCheck, + } +} + +// ParseEmail parses a string that contains an rfc822 formatted email address +// and returns an instance of *Email. +func ParseEmail(emailInfo string) (*Email, error) { + e, err := mail.ParseAddress(emailInfo) + if err != nil { + return nil, err + } + return NewEmail(e.Name, e.Address), nil +} diff --git a/vendor/github.com/sendgrid/sendgrid-go/sendgrid.go b/vendor/github.com/sendgrid/sendgrid-go/sendgrid.go new file mode 100644 index 0000000..77102a0 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/sendgrid.go @@ -0,0 +1,57 @@ +package sendgrid + +import ( + "github.com/sendgrid/rest" +) + +// sendGridOptions for CreateRequest +type sendGridOptions struct { + Key string + Endpoint string + Host string + Subuser string +} + +// GetRequest +// @return [Request] a default request object +func GetRequest(key, endpoint, host string) rest.Request { + return createSendGridRequest(sendGridOptions{key, endpoint, host, ""}) +} + +// GetRequestSubuser like GetRequest but with On-Behalf of Subuser +// @return [Request] a default request object +func GetRequestSubuser(key, endpoint, host, subuser string) rest.Request { + return createSendGridRequest(sendGridOptions{key, endpoint, host, subuser}) +} + +// createSendGridRequest create Request +// @return [Request] a default request object +func createSendGridRequest(sgOptions sendGridOptions) rest.Request { + options := options{ + "Bearer " + sgOptions.Key, + sgOptions.Endpoint, + sgOptions.Host, + sgOptions.Subuser, + } + + if options.Host == "" { + options.Host = "https://api.sendgrid.com" + } + + return requestNew(options) +} + +// NewSendClient constructs a new Twilio SendGrid client given an API key +func NewSendClient(key string) *Client { + request := GetRequest(key, "/v3/mail/send", "") + request.Method = "POST" + return &Client{request} +} + +// GetRequestSubuser like NewSendClient but with On-Behalf of Subuser +// @return [Client] +func NewSendClientSubuser(key, subuser string) *Client { + request := GetRequestSubuser(key, "/v3/mail/send", "", subuser) + request.Method = "POST" + return &Client{request} +} diff --git a/vendor/github.com/sendgrid/sendgrid-go/twilio_email.go b/vendor/github.com/sendgrid/sendgrid-go/twilio_email.go new file mode 100644 index 0000000..52981a4 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/twilio_email.go @@ -0,0 +1,41 @@ +package sendgrid + +import ( + "encoding/base64" + + "github.com/sendgrid/rest" +) + +// TwilioEmailOptions for GetTwilioEmailRequest +type TwilioEmailOptions struct { + Username string + Password string + Endpoint string + Host string +} + +// NewTwilioEmailSendClient constructs a new Twilio Email client given a username and password +func NewTwilioEmailSendClient(username, password string) *Client { + request := GetTwilioEmailRequest(TwilioEmailOptions{Username: username, Password: password, Endpoint: "/v3/mail/send"}) + request.Method = "POST" + return &Client{request} +} + +// GetTwilioEmailRequest create Request +// @return [Request] a default request object +func GetTwilioEmailRequest(twilioEmailOptions TwilioEmailOptions) rest.Request { + credentials := twilioEmailOptions.Username + ":" + twilioEmailOptions.Password + encodedCreds := base64.StdEncoding.EncodeToString([]byte(credentials)) + + options := options{ + Auth: "Basic " + encodedCreds, + Endpoint: twilioEmailOptions.Endpoint, + Host: twilioEmailOptions.Host, + } + + if options.Host == "" { + options.Host = "https://email.twilio.com" + } + + return requestNew(options) +} diff --git a/vendor/github.com/sendgrid/sendgrid-go/twilio_sendgrid_logo.png b/vendor/github.com/sendgrid/sendgrid-go/twilio_sendgrid_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..a4c22239ac0cc70e10a51f828390d713edc8a29b GIT binary patch literal 14596 zcmY+rb8xRQyER$)VFC_ zP*x5W5D>7Im8zzzrkpIdiGv-zk*R~R8NH{S<3BbK5U(fqzowm;s}Z56ovpnKwSkk{GNoLgB`{C}qZ>+um= zy1F`YGcb5~c+h*W&^tI=Ffeg(aWOD5GcYsL{bSI%c-gxedD7Xtko>pE|ED8r=3?S( z<>+eVU{ClTT_a-$H&;Gl;{O=^@Auzvx>}k4|4#NU|C83g1R4G_!oWn&$nd|~|EBW( z2jy09wle$Y{2zUOCf@&I{y*6N`0z6P$Nc{a=D#!jFZ5rk{4l%>|GRAbFtURbT0lS? z=u)CWs-D2txzIVeYJPzH>z&NMFroF>gqd)n(Atcq;HII~wy9*k!yA>|8^$f_G}3-L z#|ObWRV}5h2UT_D3@!1h7&hY|9omGrB$N=Ovci&@p-4>>ECBw=kFVTQE{_{pj}v*Q z=3%bCxu;$;E-OE84yOU%r@XZ;YoWoXoME01Q{V({Ii7ik++BEMF1eiYx~tD96!&;H zAY*T5s5a?*rS=MWY(FBp^d5(=kN7qkw8uMc7z{7u4(re8HwsR~r-!%%+AlUYxha3> zIQgM+vU5qj+z&Un2+C?h&q~Zz23JU<>>YJ)TjQ$I{hL}^*eEe#_CwcU?r1Ef(b z43<>i=T3|pQl4rb?y5r5G9UZ0IPG1qxL5hNd!M}Y*H}-utxi6@dJjLb?;Xew{fW~l z8Xi;QF4OEvYkC(~Fw|ghd$GfTkOGkqQtCZ>B7w4GW?&!WV0Fj(CLTXW^yvdqFF!Oo z^&LAfU(>^CU4CCP4KFP*I$wE3UG~w$$jGmEKgQLb22ieY8Yf+L#n@>JIG(V^V!3r( za-|IOlDD?9pXs4Us8ANCX1hFh+iiM+E`~E=pSOSWM7|$}KV;}8fjv am?oYtaj-7XJQ`J66!_Sj=jUn5tuKna3~mXZX-2A`EbIC6u^K) zASzj)z$$(2SO#>I>dMPU7TujHat(5OcWJ;J| zum+Uuq4?~8OS)6gxPpy5 z58kqGT!z~$u0Qg0GA6xQ8p9*;%eI)|8v}&QMjS8nQo_ydwwteCD%f6d=eci#p<-Qo z-~X_ENq*=Vp|IIt```P31`S1&FA}+M@V8%$#J`Z?vZ%E#KJ$*n_0ZvN-1=J`R+i#l zuC@`~!-S8sS8KNd5BhDv2LJWJP2tEOPCk0zgzHM?^hfaGwwQ>0OIavNc{74Opc|`t z;&lgvs$M^%U@q$vwM7hz2FGv|T=NKN(j$tx0)v;RseV1<-SYV-rcgh3#Mz|PHe2&i zfA9X5(Mx#k_*{}=A?Oy=@?0&xsWc9E2tXuqY)5t@URDDuXb7$JjZ!ytOUcvG#*i|@ z04LMR!h-f806Xx^^9X+^Dw=iLI*NdL)p?L3$sKq3!=aGPv-0@G(Js~KuJRF0M_tw6 zv?#csA%*mDZblifnm;O=mq>b(X^B($qtAD{-ZYlW?dGHpa5d~0^X*s2S-j@aix^@A z0}C38E5|>{WD2iTjV2CXmQqy{(MKi6Q}jhN1_PQbEi&=VT7P(o@9?h$E^@kkNI8-> zJM{=cZMUGw!<13Y!x}uBFZTIe{Hjefe%#m`rY+TZxLHI)sYX~*3~b!1sFA2k3w|_i)L8SlE|8JdkH0pD}<91(kP}{q9tZ|O|s?L2s(V* zxtHE-w3NHaEF&*AFV@4O5Etzq6u=87nnMNYYZJ|#uG*&frTLB5_~B2x^M~>0dx$iO zfRWnWI`Ew;)Q#EHQ%jZJ;T+DTk8!UlOOTCLudVKgeNOIx4)Ak|Kz4nmxiBJjZavs@ zyx(JfA=n66YFr#{VDvz*wXi&J8)}sv8%U!|eTcW<-LeBOW~2DN@8~H)C{hq|b~KS1 z)MV6LGBYRgG1tV&Q#K->on5)?X5m2+sXD%!zE^-#F|Vz1axBg5oQGUg>ykjX=W__18f zGGojP)}Y1c+*DEfy1Txm07J+t-+f=vAV@;eVS{Skoe!Z*Fnt?4zld#gJMd2t(!cM? z%V)^oa>dH@YEmYqyoDX&G@ss}Az15^by{T9jH3AKSZV>)_;;3#TeD>BY(2bj6U)<{ zbrkj?{?=<(y_?j*>L;(N;|myW1XE=H+j4q!=mEG`yUoB^>WG!&19x+SJIi9Kk2Y$M zpq||?1vky!85bvG#|L4!mwD>=Th$C9!U*aqyng`oS)rBIvFM01vO2V+vO@l)JW=(q zwbOpl;eH?)zYU?|vN1SDL{sD-3^&hZ8?rP3L3jy;UVX4ExS=D{D3Li-c(Q2Z} zN@xp>AMF>Vfym<^YPNLWRtEb<`HAFu)!@w6SlVMp7uxpDj(rC9YW4HIU+EXmy|K*7 z#n=0@ir4N*><(^kvxDeEUis2`zizG>YuR;ty3F9KP-&A}SHnkP`DOpSK9)e8=O0DI z4->5ej+T}$(}O2iot)Y)jk#;8zyl!ffsksg-OSQw$=zZFa{t%aTQ;!RW-dyot(N1} znfAjnZs(ml2m|s|keq(u_?;_Gx!Ir=q!w6i;UW}|3g*i2HJ@7MJK_pFha*mkowuv& zS^tY+np_a;fI@n@fwXF6bZ>z_4I&?#U9sA;J%>K4%9nn#o?bD_uAp@NA>q)S9EK73 zPO+Sxgrr@$d}{&Hdd%O7@kBaArR7RCLg#*Dd7OA`$)HHA`g7W*%JIM68Z&+`X`#zY z_2S*`&SUX3bT{FhHmj#hZXte-Rck41HvgS0xfFcUtr((4fZj0C7V;%j>k`MNUd(2< z%i7)9n#$=rh^X;YDSOZsQ4ctV)9jsC?EGl8!=_ypra6v)<_;UZ+ucDr3!?2YKzQ|C zO=FWkjKr2d^{*%liMAGbNc?X9(|}iKNv_-nN!)ZZAyVgLo9KG;&7b_9kV`<`>$0o} zZ@wJ{zY8z2=NKXA=d%CfP8s&&1|9HV<>rO0B*u$bOr zN2lZhdzXDm%v-2-W^97B%)`5k?O+0OD6_?8dYM()ENeL^_-4UyOim?dsj$4Dp-KU= zISg;Gs!WAi{_A*$CY~T)Rc;bNPwq>0(ssWN`Pm2V0zkiXn}R6+l{O$%=rd#UVenWv z;57nKVqQ~sjyYI55LuDO3IU}yJU=w51R1J~E72*WNVL`M&V$ zCV4|L-dHDFC|1QH|EH7lZ|=1Blj~g_wocmmSx_pUaA-G>wEG@x=gecU+sFaR)O+$C zl1Jy$Gc}2%ALy$9X)4jm>TJ8A?q8Q#6AfK4;-(4+AB&s>z0)K)n=}C_A2KJJ+mB`e zlYaN9cr%%+a^%0*QU|%mWPj0QYwAq4nEOIPjq$tPx~KDS1qL4`7`Rtxy&#Iqs0cvK zYr}(@6j~w(gUcKx$DY7pnV|Y$O}V0(^9Kw+8EEDBhk4%h%C~Qc*bN`5y(|uUeR=CC zLuP{m$nZu*b3u@U;S@6YZ)1l&1R##}gEvnt5vkLPrQGgX{KeQ{c;-18U!E;@zCB|8=X!1_d2z%$0+bE7oMs- zXRki^6=u^ochCRfdfhWxo$Uby!?VEe0=T8z5 z3Bo1{V`}DNbw#RZOFt_(y?X&sD|?%I1;U<=QOs&T&y{eU=EqWNlwA!WnLoWFfmH;p z2O$P0?a|)DIQ$MguOL>hL{%y|OqI=J`f)&Ecw>`L(390NpJK(((&ph`HYxMLowe60 zmCt0ub(KG%tP05Gb@~39W;x9*YG`_VlHQKk8^yW1d+D0GLS`J3=hCiM`@t1+mApmv zPo!0Ls~I^c93()KCu#38${qMrR4aJ-PJod-+YI*oMp;6oA%Yvo5_h%+(HM>>?r8OY z*||7MyK}OnN#(q}l4XEdkt5>d?Z;g5dm6IE#tky~47h)khN5VTxlKeG-gWTJ&V zv>m4LgNKRA;kbl7HT$dWWQuUJlhtWoMSyV>f^PDjQ---)OOf43StEL7C4r6Bfu9DBdoH5IU> zBg>QpVxr;9m!87V-IchZff+ODpNKs-%&G4__fQVA&lzJd&$(T>1s4!Y(riWeGH7sd zT!kDQe2F9b%h}AsAr45rQZ;i13kAC!>z+3GFTvF3#dr}&>dIn0JlmpD@aL4w+|ZM4DgM<5`remb>jP&_mj(w+~A?enc) z4$EJM1FVrs`LtY50HU{Pt!@XLOP$3qS~?1(C2iWOH=#$wWqe!lL8f~#P3`)NPuv8? z;)N}rY^|Y5?(a~&2s^T3IKY991>?rjz-M+rL~Je7ZTmd-Vb{*Ji~kqco75e0OHdgZ z^kSQ@>J6>EZqwG)1E>V#nCa$2NH@gpTKDa=$Y0zJGmglRGlzfD6)wJLe7oZ%d2FEuHguMCUO?mRWMAQP7OI6PPVZ#ln0q7XTSD-}-)y&guNzd5 zR`@MPK(-zxEh*_fVAY*CQ4X{PhmYED#sj4C>TWcfs`jIxn1u+o|M&6X-I>*RcuK@U*d7m=~ULWyK~>A0{OjF6jYLy{V7R1urxmaVV1G zPeq^>W{gm+)ulG)vybeq!sb%LBx{0Z70DtM!g`H{Z`N&}$NTKiy%}73dj4fc-7PB) z+q7Nqu0>CmV#2dgYygPdsk0cspSKAkXp!y=jx1SBYIyqygm%7Hjk;+@*iTQOJ~ja^ zQkJAt`~$c?qHsIxe52R$9?2qcYSuK{$fWEjixrK=A~T4+Fa@6H_=wRYE*dKs+`3fr z-_Qf6y;pCHX)xqAU^96F!kNF*m}!xrF((W~L46)Op5u+8&f9Moib{^}iX6@^!z8tO z7Hse&E$%`W4Y~FXgme;{QQ|~=3Jp&n*n59$2CCeB)UxzVEI$?lUT9Z(j?a6>75p;8 zYvQ$&6Csg;N`>C@N3glc<2jxrRkrSUY}!&+uV~rWijI~@D&+i@j$#Kk1dJtzDYdT({P3$n{0=reg;cBC_WZBpOqY^*>cUIYu;+^V&_PYEBX;=I8<)zw(9>uwWd z-h#BgacKE2@{yEV197C}@~yj@<|`J(ubCHI3DZpwbgFe0%ex3r&1dbsAtVs}0{dM; zeg-|w93!KbznL$mJKA+S{SP;e4U|o?ENI)u0X_MK0HD-ZH=TJHA{3k`95+_)o2loj zv~PL1jHYeFa`Y23lotR6ewF%YEmGT*u46GSSbp>(16{!=t`5v|2gkiqsDcIKEz`aBFDM zVYm%)j+{YBPkB}K;v$XFI1a@fw0LR$-;fvSCl`Q@9f zqCrKXY(`JJm1z6nE6P}&I`sj$0&qjR4|Ll&Uu&GU^H2dN=r3Os5)hDf216`=X6lcs zJo*bBJ+ZOYhR*680DIi^*b&#GLd4cN^X0XPnft`NK69?{M&Y&RZVwwwP9DsYWxXNe?Y0RFBKq5O{;6W=TrvdmE z`B%jvJy+-C?Ewvy{$Qq>|CXA6)DX1?Qz>T>0}I$xA&uZ1r`yPPkSxvF_6gdoqY3@P zNr?+RH?u*s`CRoP$}Atkkbl3XJt(^ZgucC7!)r=2U60K9vu@4HlegNb2}9o^w+sG* zsj^PASdDMT>WHa)GB4>JkEzi6e$a`FS28jwtuTa#vKmvvzS z=n9TlgtyQ7+ah=&%ZhL1RQ6RyFsQ?7K{|TKxS+#cqxFt>CUWj|^o7KJ&0?3&>mvjU z2Mle}Zx9xc6Gc|6soJY=p!}EAbsJ1?IRfjNf)WM!<2xe9c~w;V%VWx}?+zM9pag(^d->v#EMq9`{^DQF>Jq&9VK}wTSF%g?QWoE9?P-}V7GyJ^y9bP; zi>{(^X^=KgLlW9OK~#4d|IWvbOf5TEHMoBwH}#sc@}y0nvY0H3c#+da1kxmw#0DQy za*zQGxL!#WTWVxW0ohEZfC|Wfch-(?!bz)^I(xZV=OWn!(PxDCn*KGB;t)Bf!b1}? ztJRYz=Yt(7fiWxHXZ8~2SjzNl3=$4!=Me4hnCmGn^L;qllyIb zv>OO0lfVijUX@$8V9+`wh+k7CiYH7+ec_gCyw^MTOIbetYGk>5NV+nn!rei^;N`uF zUZ_g|=uzAwN*jc7Z?at17P~!mSXO!xv~FS-2az7W2w?p+gkY?LWFbkS7Pqr_GHiLj zzrrW3ZhZN$))m_M%OAZa)PjL_g#T{7w?cCHTD0_fHWz>2EpM|ern+#JU)kS06l*6`R7h!qwQ2o{>)YOW-wmgbWQ)QcSX0ij7Etyi_##_{B~~T z5ck;uiQf|YdFWR(5C-c#n)}>+exObAn?yU|)?mr&T=tr8z18yC?j`JBud{TGn+a$v z@%SZ@GO#fv^}B7CDYoT14l4q92d*B$^jXbuu(^15>SfyIMtiQzm3QLC518%vmNN^m z2qYt^wQU6~J3}EzV73KNh@g~*^l}KLBoOl5LHC(YV6V}>ryM6cx9;%y8LpX~Auc2u zW%v;K=FUL~g&Rbu4kAC>IZ@m8UYSLS!sf+x!748% zOq-G;Sm+~EgB2}xuax}?6Ug*gbT_u$Mc)JhKZv+`Th0#9Nd8EtBIHk5r57Hb&cCJ)#XNn2nQeXX=752y`pm z>}Be_#o^ z5U#jTM_S?%Ml-_`HPZ`PQim8w(M&Kr@e3L-+<+KyySV;#kki#uJel2jtI+0we=Uwd z5tr~fyzY_&axsXqLbkH!jfozHvoYdCkH3w@gM6|-y2&?ucTe0#c7s7V%(&Q2da#XNCY<_7n#5YXRzB+p!Dxj54@IaE0?a^C zQIhD|^3;K6`U0cyAcpc2eh9+6`Im1<0BgxR63Ln6On_2!|5Vflk>&L_Fvky(TG)Qc zHq9OFG$S%OVZT2D&K4w$gPF+yl;Z7kNmn9OVE2w{zQ46h9=j{k+VtDQ&FlkhUE2W2 zrsy=voV#wiec3Z&`_#B0nL}uc0c=W!a9gP5MKQejjHJr0{IJJpZ-zf|Xy&d)pc*KO ziV9?vRk>TE^kQ~S$+^8@2SV2f#(P|e<9FcuZh zFtnn4&S}1u9aiQJ6uDnWFH3m+@o|UHP^5ZA4aB|Fx}L5_tF{voB!Tt#Iqnf3fB#(W z;#QJg_u$r|UW2s2pLaB9-tmCFY?VK)@q^EIeBJ88$j#1A8Z9Ki15YQk?FSN;U2EDL zIjOic^2&m;h@1lQd(K6s=`fKBto%J*-|0GOKh1w=Bn zATQKVJ<4HxoAjQPZ7vzXjM>OYRcUraLjhim2pP4i;GO z>8wI7KOmo@TBjijV7tf$Wzw`sGfFlqk-9Vf$9HCu|JY=A8y0xG`&m#vzuPXXz6uKA zgGx7_de*e!BE!A+v<4KqpmI_f=Qz(oUojqdPFczp&{e47>U{cq_(c`8 z&QUga#-N+*$6Q!!VZC;x3gQRC;KNY4Ho?fNI;jjeg74BkCk0$xh?g%Ke9%Lzfp1?V zvSt4TJZt2U`n1%ek)ja;m`gG@EK#~*$;=X|3aA`z*XlRKG@MXMI09x){BqdO*u!{2 zk<&iG{HO?h(cy%@dI$)Z!G)SHb?RwmGfknWj$rU+;7w*Xg6)-~pm4 zSmR9os5!zj%O)Kd^Kslbc28}46>#?c2DKY_+k;(5z!&jzdC!tVdtDA2j?jh&75trA zmp7zZ_;8pM*$f-*8~f_;%F1Ze`lsHo2tK4Hzc+!MS5C9A?=dhK4*d-t0BcHw!uhcX zbL7?$7#gZ{#x&A=B~EOv`i2zKty-Rgl#)a{#v)d=AG>XnY^i7A-1e1=*PMGJeBw;R zQaH(EveeOH@3)m`u@wk3v~@~t<`S)HkFx8oAS9AIh*gRA`_eLpL>-p(?_uyW&=ayu zr*NV5H!d4phq{K9?x+QU8l1VSZ?uCvE~?bWW=+QyBoct0Zh)swj>CRV;zt-GnfG>?da|PAKl5=4%li$=3>W+CAB@6ARJ?~K&_$@ zZ4R$LP}-qo?2^}&-`Hw-st@(SA9Z5JHi6TUE~(g93oaN=bxI-S?_g4hrUuP76>gLM zwGZrv3U-%?ERr;pwJSb<-(24RfLLb0uN4JRsXUONiQu1B$qTEgX>(xYOYyG_AQV?K zm=l`f{U$GYVm_;|M_~{L73j}U$}Ev|6V!7v5pXdV;#QWM(E2TzEUYwURqcs^SG7?{ z$_M%>l*U_%HU;rf(d`eKYRUssU2u^kE29D|dm+a7je^{a)wy9AP*N5dYCK?PRpX$$ld5j51t_vaJC zqWu~{NJC>7MVH#{989*%`n1t2yeS({Epgg=PHKoXh0TJAD#cP&7JVJaZ7&sbtW*-o;RC9tz-a2 zwM9jgv$4Tl`53rpd{Ny@l&T1|Z7|^3D+GZ#ID%ZV=F^Yjr@v~+fY{16&3b&i#MrOZ*q27`kMaFRK#)qzE56-3&VK>39EL_~SYa}1`$pzciMdl4r0s^W^~1GFc)CXLhG zbeB6$WaW?54NScX+VsJ*e!&18y;lW`R4uQC>q9L|e6-S9##1S^9t0bxP^#I(S=P?) zb!~3vv7MQ|pDTve0KbWbI0?S}x#odj_0pFsm>fdUR5D#PIwy!pU9Wu2gEUncfe{y} zr{Rr3m6CQbf!w&TvNvc|WD`x2S{&r&0YLML*)_{bz4~eim^thgo!a83P)HVX6_D9@w zV+Vyn@`02kiL)WLW%deYZG)doW!ql@c~0f{85L%Ss~qtCQXg5Q)2)kI)J@L7k6Wlm z$>`Bd1e#YmFigg-dkkFznytA%K(C%eS^5S=JKa@)(I@{HmZUpg3)YXq{7rlb3HFn2 zlLYID$ILZV%cYr7+>q{p@?vq$L-!7y92@!Vr_e)N97dTwhEj4SG!ba$V2Q8k#*C0< zA|O)e02;8JUuGzuxwIU-fp%w?nMcZc?)98~NZ^&aSt7bgyYtven*h;Ls}Z0lMGGXA zGd+msiKCo7T4uw5s>kJWdB!Sa%{~!3e&K}>m}QO0f>@{6vt7zR?h?zC1EL!4R>#4A zgqSP#j?}eii?Lji)>{>F-5B zpf)!==)k!!Mw3_L)(-I!&mPWSQAy(aB+Q-6w)R3LH;}RytkL2_z``-m;{6@F%&~;` zZR@V_Rp`F3tSGWL2=P|MRmaN03o$Ek$Q9$&05Q%?+f3L5Qjd?KfR5+&>jjSv8J9^4 zdfL}8*E_LQwQohXUsIuyq#`4_=%VL~Pk+_3Ktx^fS!zF1X-;zKLj`#+|IGpjSb6RT zIKWhD7grWDF!xZxAt9%#_5gBa&uvauuTNKmUM^q5`*`xuWUD_wGPllC!Y{J1 zFAbh-rr!V?Y4I0N`iq%fc{sczJUr=))H53GxzR$$Jrf)&F21Umf_R-(-~v|MxLZIP z_H8E@Ml76sSpbJk9z%$!IVX!4;(me!`xzx@S()vZ=JVS3-=dm`1M##+S!NTcwz|dG zcNyYoK$StpS;&`&kRP;C&xxXN?m=>z4pzNq6}pnTOHHUc%(AxuNpCPFu{s}~=-B$_ zD-76BsS}*ie6>;lxXGA=;y|sq*$41wJOmtnh_gO7OMLvYWiFE})P7qq zof1rjYj3%jW;l|#BnqcBLpK}Dfsi7j^XU%_9&C2zD4X;M^7`|G2<3YY)^~Ai?k>N z#85bO62s4^Tieqh^*$`5(fgs3VulnrAmgQNwHX(?&D2CN&V5?`3;umUAF;phQ~jqc zZLg@Y(7h$KSq?$&$K9PFcdTHc{HROv$|9ed{)++G&)qm8T(;Ok9O-$p{jLHM>5z;$ z`3Xk(ujCb@K2-f27s0LA-@ z4A(d(Q$z;tIA5#f(}S_+kCIBZ5Q>|&cKqj|S@lqr{eX^Bw-&UB?NtJiwwlIjZv11A zcBDCOxfOx|pH)g-kFJYU9dM&(q!6=dE`J8B5+MXex@$ad9{}tV<)t z)Hy}PeUqa3kls{$1AvL0qg2e|@vIqm)C4vKG9O6Wz0H=InSUVGy3uj-Cfy}021~na zzHbW0-bx3ij$gPR_}AaD^21*)!LtV^{(TTbMriQbwNn&bJMgjcMFGq8Rjl(MO(;xD zxS)v|Xryj!lt~k#D}(E12F8@Cq?j9k1{(eq%M*Q}%=!^IfDg&5bx{IfIo_n=?6K%x}ES|MrH($;C zLm@ZneN#L{Xl8>UYhHDYo}Ne~hHOeSam{dcwyy{gP&p%ScAM2kN?x|(>gYLo1MLCe zC#W_=S#k_xST`cFldCSv4~8pwX!l+-Onqs?d2Mx6+OwX%o`)NQ_{8dCwn-xj%@UM` zpRDGymJ{n12?)i=96}@nvY9c>dHR!Td(;X{@~4u^=dK2E%{{w4 zX?gI*ks`&cJA(xi_^M#J#5Lk;ZjX7{UAe4wQ@(v0^$xBd@6Tb$eD1ROVgr#uu=yVv z;<(rB)rxSS{ZSadJ9yx>(Ltt0p-s{hb~-Ot83PxqO5zYq5<@?*lFi57+3fNutfbCq z3PlX$Osp9#=8v4wz95ye;6as!yWiiP*SyeUPdlHhHitc@(2A08Q`_gk1c{u!@!9Tj zw+5Q1g)ZuK^%bIJ<~P#LA9tp$hy^C$Vp2u%5E#?kX0nyscBzhzMglqAY%9?CLqbcc z%_J}DOoU$_ZT}=Q6_|s|7*7U#n~pw=G5%F+e)7qoZgh62Fp4p9*AiaNKl-A&66xTC zO92e1CQKsWmpG__<#Ouz)`)%N28?ff=ou?!8^B{N%D+bL9oXu(TyZc~d?YoV&I?$H zVeH@`Ad@4#{8{VrQ{ChEx^>U!dQb&{lN%E()b~p_OG+}=P^?r_OJ2(chU_PQ&-ayE z=Hi-|?c*G0+Zuc)n-Iy@6Hn&E@+Nt6VKBewY8u;3yUZ<)K>W)+3A&D0o=mjvAqc;mARHw-ga5*ghqq}cX`3|`L@~EO^32v z=(?&x^@F1{2zLd0;NDvnc+>nA1UkI#h6utiCWVX_D24B!U9TQ#H6G3$?r0-v0=S_W z8YiM&(1R~<;pzyZueZ;vfU zvBX(Ix=3$oWkS6?;)cE7J*f4SklE9B=C@b+NC)COS`uJeyy^ZP53%3@Ajd`5u=3_9TCZ>&eHz-&D!!O%hG&UqXC0{yr-QJZdcaQG)gs zC1Y4Q_FOi^)r3kO|JA8!#W^5Gu6i2K@dBvB;u)2HX3)t#kI(j=w3?ssOH;;TlF56q zjeow!);}IKnqdvgVaJ81(3R!!B;99NCXZ)W_~!tp$hZ9Njo6rAi4e*7n(J~>(|bM2 zGV4+iQUpSrqK1~zPbFOQ%pnMXl(mBX(o+p%u@21O=;g7bto6XN|GTP9NJ)Z^o&!Jb zr7U1GxDg#-##0UyD3=SF`Zi};=^F3S$PWeV2OMa{P|3Zc@+^0WiY%4o%xU+|#8$ge zO$~NCy$^39*rGZEINux-j+|UZ= zt?gGmtH}w!kOfdi4EcomoVwj@_Fngq%lXFhepMfZ4QBKtnHJG^&ZrT%K7D+el)SSK zo-GLu!nDH#K9>-<(`Q-<03KJ=E8Py+HY`A)bu%OwnH4qb&Hs*}s`NANUKTpO92-@R z%kHhDnnk*f;CQA8Pq3+tCOp&Qmof-bq%28&o#_hN zhF$nYXd1)P3T#|7m{bolb3#+21)}A+#3A#@Zg8v(A1ZxBms=9w+r(x8gYXZ1SrJDhF6VBG69@IJvQVsy+Y$ z>UhmtM;q8P!6b9NA_Y_je$(6_I{PC^TrYBl-nzrn4^MZBO8 zc$T6I7yP=7Jy<|LDUqcMcsjTfmWEG8$T{8nVNv{ZqaGFQq#19p|UOcS1C`_kg$PY)!p zpNO&q$HKCY7e+JL`Qm#cO+;fX{KYq0JpAWqLvISf`q?S2cmlU`D2MS>x#4g|(m-Ux zdfkstpV86yDAPs`1_jBhjHg(RLaaqbV5`?NUv|=lMq_|EJIb9;?7Vwi8-(t7&c)Y}Q6yRIQ!# z)=FYj3qr5mDnccFh;oLtN*zhx5Cf&)nhbOdGbCJQo|7pOArtCKeNs+@CJ>Xq7WQ%?ZdlXOSmKQO;GKY9mY2F{zW5s(`;vIeIPa}3wP;ks=J zI0-8*)gw>kkY0u?=Y}Cm82wara3O7MAao8ZKS*_?#3Em<4IRdA$}7&>=Mh-! zd9$1jrQd?1HjLwc1~#6sOaM}*Zxyj)>2kY+ZRh_m~KMQ2#=62_Q=BxB0KSY|)Y0XL=lCNzh@ z2`Ee{9D*_kBF4GCoYd4q{rkK`DFJQmOuyVy0Y34~(7*4i}rDLG?+8h zSmEeT+?(_4;ZocY_yIxK(+yxk4`64>fCKpv#L4^C8FFD`gKC78<`BQ`*3%()N=vLz zwkx3mN$A>k-;FJ*9hZ5rL$OL1FpRc9Bj6;Q97>j*QeWQ(4fVlj}$5b zhF%7fu~?XbjSu^n8TRKu1rdlVhwkP|MzGxSA9J8+ah+Q>5IzS8ba>Bj!^YTNk&eh} z>H0*VjW%Ie=E!rTnOi}C9kECwnD;Bc?Q#exm&CjBdSe>qAM4Cb2Bh&XYI1v^5Ldfw4?XT2EpIRL;SnVdqQ6%(i9yIwL ntv~W6Y3X9dsR>Z;|A4AGw<(;8DtG?pt)Y~dylAbkVetO}CY4rB literal 0 HcmV?d00001 diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 0000000..2b00ddb --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 0000000..1fbd3e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go new file mode 100644 index 0000000..6e6c9d1 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/acme.go @@ -0,0 +1,1098 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package acme provides an implementation of the +// Automatic Certificate Management Environment (ACME) spec. +// The intial implementation was based on ACME draft-02 and +// is now being extended to comply with RFC 8555. +// See https://tools.ietf.org/html/draft-ietf-acme-acme-02 +// and https://tools.ietf.org/html/rfc8555 for details. +// +// Most common scenarios will want to use autocert subdirectory instead, +// which provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package acme + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net/http" + "strings" + "sync" + "time" +) + +const ( + // LetsEncryptURL is the Directory endpoint of Let's Encrypt CA. + LetsEncryptURL = "https://acme-v02.api.letsencrypt.org/directory" + + // ALPNProto is the ALPN protocol name used by a CA server when validating + // tls-alpn-01 challenges. + // + // Package users must ensure their servers can negotiate the ACME ALPN in + // order for tls-alpn-01 challenge verifications to succeed. + // See the crypto/tls package's Config.NextProtos field. + ALPNProto = "acme-tls/1" +) + +// idPeACMEIdentifier is the OID for the ACME extension for the TLS-ALPN challenge. +// https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05#section-5.1 +var idPeACMEIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31} + +const ( + maxChainLen = 5 // max depth and breadth of a certificate chain + maxCertSize = 1 << 20 // max size of a certificate, in DER bytes + // Used for decoding certs from application/pem-certificate-chain response, + // the default when in RFC mode. + maxCertChainSize = maxCertSize * maxChainLen + + // Max number of collected nonces kept in memory. + // Expect usual peak of 1 or 2. + maxNonces = 100 +) + +// Client is an ACME client. +// The only required field is Key. An example of creating a client with a new key +// is as follows: +// +// key, err := rsa.GenerateKey(rand.Reader, 2048) +// if err != nil { +// log.Fatal(err) +// } +// client := &Client{Key: key} +// +type Client struct { + // Key is the account key used to register with a CA and sign requests. + // Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey. + // + // The following algorithms are supported: + // RS256, ES256, ES384 and ES512. + // See RFC7518 for more details about the algorithms. + Key crypto.Signer + + // HTTPClient optionally specifies an HTTP client to use + // instead of http.DefaultClient. + HTTPClient *http.Client + + // DirectoryURL points to the CA directory endpoint. + // If empty, LetsEncryptURL is used. + // Mutating this value after a successful call of Client's Discover method + // will have no effect. + DirectoryURL string + + // RetryBackoff computes the duration after which the nth retry of a failed request + // should occur. The value of n for the first call on failure is 1. + // The values of r and resp are the request and response of the last failed attempt. + // If the returned value is negative or zero, no more retries are done and an error + // is returned to the caller of the original method. + // + // Requests which result in a 4xx client error are not retried, + // except for 400 Bad Request due to "bad nonce" errors and 429 Too Many Requests. + // + // If RetryBackoff is nil, a truncated exponential backoff algorithm + // with the ceiling of 10 seconds is used, where each subsequent retry n + // is done after either ("Retry-After" + jitter) or (2^n seconds + jitter), + // preferring the former if "Retry-After" header is found in the resp. + // The jitter is a random value up to 1 second. + RetryBackoff func(n int, r *http.Request, resp *http.Response) time.Duration + + // UserAgent is prepended to the User-Agent header sent to the ACME server, + // which by default is this package's name and version. + // + // Reusable libraries and tools in particular should set this value to be + // identifiable by the server, in case they are causing issues. + UserAgent string + + cacheMu sync.Mutex + dir *Directory // cached result of Client's Discover method + kid keyID // cached Account.URI obtained from registerRFC or getAccountRFC + + noncesMu sync.Mutex + nonces map[string]struct{} // nonces collected from previous responses +} + +// accountKID returns a key ID associated with c.Key, the account identity +// provided by the CA during RFC based registration. +// It assumes c.Discover has already been called. +// +// accountKID requires at most one network roundtrip. +// It caches only successful result. +// +// When in pre-RFC mode or when c.getRegRFC responds with an error, accountKID +// returns noKeyID. +func (c *Client) accountKID(ctx context.Context) keyID { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + if !c.dir.rfcCompliant() { + return noKeyID + } + if c.kid != noKeyID { + return c.kid + } + a, err := c.getRegRFC(ctx) + if err != nil { + return noKeyID + } + c.kid = keyID(a.URI) + return c.kid +} + +// Discover performs ACME server discovery using c.DirectoryURL. +// +// It caches successful result. So, subsequent calls will not result in +// a network round-trip. This also means mutating c.DirectoryURL after successful call +// of this method will have no effect. +func (c *Client) Discover(ctx context.Context) (Directory, error) { + c.cacheMu.Lock() + defer c.cacheMu.Unlock() + if c.dir != nil { + return *c.dir, nil + } + + res, err := c.get(ctx, c.directoryURL(), wantStatus(http.StatusOK)) + if err != nil { + return Directory{}, err + } + defer res.Body.Close() + c.addNonce(res.Header) + + var v struct { + Reg string `json:"new-reg"` + RegRFC string `json:"newAccount"` + Authz string `json:"new-authz"` + AuthzRFC string `json:"newAuthz"` + OrderRFC string `json:"newOrder"` + Cert string `json:"new-cert"` + Revoke string `json:"revoke-cert"` + RevokeRFC string `json:"revokeCert"` + NonceRFC string `json:"newNonce"` + KeyChangeRFC string `json:"keyChange"` + Meta struct { + Terms string `json:"terms-of-service"` + TermsRFC string `json:"termsOfService"` + WebsiteRFC string `json:"website"` + CAA []string `json:"caa-identities"` + CAARFC []string `json:"caaIdentities"` + ExternalAcctRFC bool `json:"externalAccountRequired"` + } + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return Directory{}, err + } + if v.OrderRFC == "" { + // Non-RFC compliant ACME CA. + c.dir = &Directory{ + RegURL: v.Reg, + AuthzURL: v.Authz, + CertURL: v.Cert, + RevokeURL: v.Revoke, + Terms: v.Meta.Terms, + Website: v.Meta.WebsiteRFC, + CAA: v.Meta.CAA, + } + return *c.dir, nil + } + // RFC compliant ACME CA. + c.dir = &Directory{ + RegURL: v.RegRFC, + AuthzURL: v.AuthzRFC, + OrderURL: v.OrderRFC, + RevokeURL: v.RevokeRFC, + NonceURL: v.NonceRFC, + KeyChangeURL: v.KeyChangeRFC, + Terms: v.Meta.TermsRFC, + Website: v.Meta.WebsiteRFC, + CAA: v.Meta.CAARFC, + ExternalAccountRequired: v.Meta.ExternalAcctRFC, + } + return *c.dir, nil +} + +func (c *Client) directoryURL() string { + if c.DirectoryURL != "" { + return c.DirectoryURL + } + return LetsEncryptURL +} + +// CreateCert requests a new certificate using the Certificate Signing Request csr encoded in DER format. +// It is incompatible with RFC 8555. Callers should use CreateOrderCert when interfacing +// with an RFC-compliant CA. +// +// The exp argument indicates the desired certificate validity duration. CA may issue a certificate +// with a different duration. +// If the bundle argument is true, the returned value will also contain the CA (issuer) certificate chain. +// +// In the case where CA server does not provide the issued certificate in the response, +// CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips. +// In such a scenario, the caller can cancel the polling with ctx. +// +// CreateCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. +func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) { + if _, err := c.Discover(ctx); err != nil { + return nil, "", err + } + + req := struct { + Resource string `json:"resource"` + CSR string `json:"csr"` + NotBefore string `json:"notBefore,omitempty"` + NotAfter string `json:"notAfter,omitempty"` + }{ + Resource: "new-cert", + CSR: base64.RawURLEncoding.EncodeToString(csr), + } + now := timeNow() + req.NotBefore = now.Format(time.RFC3339) + if exp > 0 { + req.NotAfter = now.Add(exp).Format(time.RFC3339) + } + + res, err := c.post(ctx, nil, c.dir.CertURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + + curl := res.Header.Get("Location") // cert permanent URL + if res.ContentLength == 0 { + // no cert in the body; poll until we get it + cert, err := c.FetchCert(ctx, curl, bundle) + return cert, curl, err + } + // slurp issued cert and CA chain, if requested + cert, err := c.responseCert(ctx, res, bundle) + return cert, curl, err +} + +// FetchCert retrieves already issued certificate from the given url, in DER format. +// It retries the request until the certificate is successfully retrieved, +// context is cancelled by the caller or an error response is received. +// +// If the bundle argument is true, the returned value also contains the CA (issuer) +// certificate chain. +// +// FetchCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid +// and has expected features. +func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) { + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + if dir.rfcCompliant() { + return c.fetchCertRFC(ctx, url, bundle) + } + + // Legacy non-authenticated GET request. + res, err := c.get(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + return c.responseCert(ctx, res, bundle) +} + +// RevokeCert revokes a previously issued certificate cert, provided in DER format. +// +// The key argument, used to sign the request, must be authorized +// to revoke the certificate. It's up to the CA to decide which keys are authorized. +// For instance, the key pair of the certificate may be authorized. +// If the key is nil, c.Key is used instead. +func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { + dir, err := c.Discover(ctx) + if err != nil { + return err + } + if dir.rfcCompliant() { + return c.revokeCertRFC(ctx, key, cert, reason) + } + + // Legacy CA. + body := &struct { + Resource string `json:"resource"` + Cert string `json:"certificate"` + Reason int `json:"reason"` + }{ + Resource: "revoke-cert", + Cert: base64.RawURLEncoding.EncodeToString(cert), + Reason: int(reason), + } + res, err := c.post(ctx, key, dir.RevokeURL, body, wantStatus(http.StatusOK)) + if err != nil { + return err + } + defer res.Body.Close() + return nil +} + +// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service +// during account registration. See Register method of Client for more details. +func AcceptTOS(tosURL string) bool { return true } + +// Register creates a new account with the CA using c.Key. +// It returns the registered account. The account acct is not modified. +// +// The registration may require the caller to agree to the CA's Terms of Service (TOS). +// If so, and the account has not indicated the acceptance of the terms (see Account for details), +// Register calls prompt with a TOS URL provided by the CA. Prompt should report +// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS. +// +// When interfacing with an RFC-compliant CA, non-RFC 8555 fields of acct are ignored +// and prompt is called if Directory's Terms field is non-zero. +// Also see Error's Instance field for when a CA requires already registered accounts to agree +// to an updated Terms of Service. +func (c *Client) Register(ctx context.Context, acct *Account, prompt func(tosURL string) bool) (*Account, error) { + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + if dir.rfcCompliant() { + return c.registerRFC(ctx, acct, prompt) + } + + // Legacy ACME draft registration flow. + a, err := c.doReg(ctx, dir.RegURL, "new-reg", acct) + if err != nil { + return nil, err + } + var accept bool + if a.CurrentTerms != "" && a.CurrentTerms != a.AgreedTerms { + accept = prompt(a.CurrentTerms) + } + if accept { + a.AgreedTerms = a.CurrentTerms + a, err = c.UpdateReg(ctx, a) + } + return a, err +} + +// GetReg retrieves an existing account associated with c.Key. +// +// The url argument is an Account URI used with pre-RFC 8555 CAs. +// It is ignored when interfacing with an RFC-compliant CA. +func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) { + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + if dir.rfcCompliant() { + return c.getRegRFC(ctx) + } + + // Legacy CA. + a, err := c.doReg(ctx, url, "reg", nil) + if err != nil { + return nil, err + } + a.URI = url + return a, nil +} + +// UpdateReg updates an existing registration. +// It returns an updated account copy. The provided account is not modified. +// +// When interfacing with RFC-compliant CAs, a.URI is ignored and the account URL +// associated with c.Key is used instead. +func (c *Client) UpdateReg(ctx context.Context, acct *Account) (*Account, error) { + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + if dir.rfcCompliant() { + return c.updateRegRFC(ctx, acct) + } + + // Legacy CA. + uri := acct.URI + a, err := c.doReg(ctx, uri, "reg", acct) + if err != nil { + return nil, err + } + a.URI = uri + return a, nil +} + +// Authorize performs the initial step in the pre-authorization flow, +// as opposed to order-based flow. +// The caller will then need to choose from and perform a set of returned +// challenges using c.Accept in order to successfully complete authorization. +// +// Once complete, the caller can use AuthorizeOrder which the CA +// should provision with the already satisfied authorization. +// For pre-RFC CAs, the caller can proceed directly to requesting a certificate +// using CreateCert method. +// +// If an authorization has been previously granted, the CA may return +// a valid authorization which has its Status field set to StatusValid. +// +// More about pre-authorization can be found at +// https://tools.ietf.org/html/rfc8555#section-7.4.1. +func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) { + return c.authorize(ctx, "dns", domain) +} + +// AuthorizeIP is the same as Authorize but requests IP address authorization. +// Clients which successfully obtain such authorization may request to issue +// a certificate for IP addresses. +// +// See the ACME spec extension for more details about IP address identifiers: +// https://tools.ietf.org/html/draft-ietf-acme-ip. +func (c *Client) AuthorizeIP(ctx context.Context, ipaddr string) (*Authorization, error) { + return c.authorize(ctx, "ip", ipaddr) +} + +func (c *Client) authorize(ctx context.Context, typ, val string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + type authzID struct { + Type string `json:"type"` + Value string `json:"value"` + } + req := struct { + Resource string `json:"resource"` + Identifier authzID `json:"identifier"` + }{ + Resource: "new-authz", + Identifier: authzID{Type: typ, Value: val}, + } + res, err := c.post(ctx, nil, c.dir.AuthzURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + if v.Status != StatusPending && v.Status != StatusValid { + return nil, fmt.Errorf("acme: unexpected status: %s", v.Status) + } + return v.authorization(res.Header.Get("Location")), nil +} + +// GetAuthorization retrieves an authorization identified by the given URL. +// +// If a caller needs to poll an authorization until its status is final, +// see the WaitAuthorization method. +func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) { + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + + var res *http.Response + if dir.rfcCompliant() { + res, err = c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + } else { + res, err = c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + } + if err != nil { + return nil, err + } + defer res.Body.Close() + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.authorization(url), nil +} + +// RevokeAuthorization relinquishes an existing authorization identified +// by the given URL. +// The url argument is an Authorization.URI value. +// +// If successful, the caller will be required to obtain a new authorization +// using the Authorize or AuthorizeOrder methods before being able to request +// a new certificate for the domain associated with the authorization. +// +// It does not revoke existing certificates. +func (c *Client) RevokeAuthorization(ctx context.Context, url string) error { + // Required for c.accountKID() when in RFC mode. + if _, err := c.Discover(ctx); err != nil { + return err + } + + req := struct { + Resource string `json:"resource"` + Status string `json:"status"` + Delete bool `json:"delete"` + }{ + Resource: "authz", + Status: "deactivated", + Delete: true, + } + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return err + } + defer res.Body.Close() + return nil +} + +// WaitAuthorization polls an authorization at the given URL +// until it is in one of the final states, StatusValid or StatusInvalid, +// the ACME CA responded with a 4xx error code, or the context is done. +// +// It returns a non-nil Authorization only if its Status is StatusValid. +// In all other cases WaitAuthorization returns an error. +// If the Status is StatusInvalid, the returned error is of type *AuthorizationError. +func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) { + // Required for c.accountKID() when in RFC mode. + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + getfn := c.postAsGet + if !dir.rfcCompliant() { + getfn = c.get + } + + for { + res, err := getfn(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + + var raw wireAuthz + err = json.NewDecoder(res.Body).Decode(&raw) + res.Body.Close() + switch { + case err != nil: + // Skip and retry. + case raw.Status == StatusValid: + return raw.authorization(url), nil + case raw.Status == StatusInvalid: + return nil, raw.error(url) + } + + // Exponential backoff is implemented in c.get above. + // This is just to prevent continuously hitting the CA + // while waiting for a final authorization status. + d := retryAfter(res.Header.Get("Retry-After")) + if d == 0 { + // Given that the fastest challenges TLS-SNI and HTTP-01 + // require a CA to make at least 1 network round trip + // and most likely persist a challenge state, + // this default delay seems reasonable. + d = time.Second + } + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return nil, ctx.Err() + case <-t.C: + // Retry. + } + } +} + +// GetChallenge retrieves the current status of an challenge. +// +// A client typically polls a challenge status using this method. +func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) { + // Required for c.accountKID() when in RFC mode. + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + + getfn := c.postAsGet + if !dir.rfcCompliant() { + getfn = c.get + } + res, err := getfn(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + + defer res.Body.Close() + v := wireChallenge{URI: url} + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// Accept informs the server that the client accepts one of its challenges +// previously obtained with c.Authorize. +// +// The server will then perform the validation asynchronously. +func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) { + // Required for c.accountKID() when in RFC mode. + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + + var req interface{} = json.RawMessage("{}") // RFC-compliant CA + if !dir.rfcCompliant() { + auth, err := keyAuth(c.Key.Public(), chal.Token) + if err != nil { + return nil, err + } + req = struct { + Resource string `json:"resource"` + Type string `json:"type"` + Auth string `json:"keyAuthorization"` + }{ + Resource: "challenge", + Type: chal.Type, + Auth: auth, + } + } + res, err := c.post(ctx, nil, chal.URI, req, wantStatus( + http.StatusOK, // according to the spec + http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md) + )) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v wireChallenge + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response. +// A TXT record containing the returned value must be provisioned under +// "_acme-challenge" name of the domain being validated. +// +// The token argument is a Challenge.Token value. +func (c *Client) DNS01ChallengeRecord(token string) (string, error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(ka)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} + +// HTTP01ChallengeResponse returns the response for an http-01 challenge. +// Servers should respond with the value to HTTP requests at the URL path +// provided by HTTP01ChallengePath to validate the challenge and prove control +// over a domain name. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengeResponse(token string) (string, error) { + return keyAuth(c.Key.Public(), token) +} + +// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge +// should be provided by the servers. +// The response value can be obtained with HTTP01ChallengeResponse. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengePath(token string) string { + return "/.well-known/acme-challenge/" + token +} + +// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response. +// +// Deprecated: This challenge type is unused in both draft-02 and RFC versions of ACME spec. +func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b := sha256.Sum256([]byte(ka)) + h := hex.EncodeToString(b[:]) + name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:]) + cert, err = tlsChallengeCert([]string{name}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, name, nil +} + +// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response. +// +// Deprecated: This challenge type is unused in both draft-02 and RFC versions of ACME spec. +func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + b := sha256.Sum256([]byte(token)) + h := hex.EncodeToString(b[:]) + sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:]) + + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b = sha256.Sum256([]byte(ka)) + h = hex.EncodeToString(b[:]) + sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:]) + + cert, err = tlsChallengeCert([]string{sanA, sanB}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, sanA, nil +} + +// TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. For more details on TLS-ALPN-01 see +// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3 +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol +// has been specified. +func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, err + } + shasum := sha256.Sum256([]byte(ka)) + extValue, err := asn1.Marshal(shasum[:]) + if err != nil { + return tls.Certificate{}, err + } + acmeExtension := pkix.Extension{ + Id: idPeACMEIdentifier, + Critical: true, + Value: extValue, + } + + tmpl := defaultTLSChallengeCertTemplate() + + var newOpt []CertOption + for _, o := range opt { + switch o := o.(type) { + case *certOptTemplate: + t := *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + newOpt = append(newOpt, o) + } + } + tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension) + newOpt = append(newOpt, WithTemplate(tmpl)) + return tlsChallengeCert([]string{domain}, newOpt) +} + +// doReg sends all types of registration requests the old way (pre-RFC world). +// The type of request is identified by typ argument, which is a "resource" +// in the ACME spec terms. +// +// A non-nil acct argument indicates whether the intention is to mutate data +// of the Account. Only Contact and Agreement of its fields are used +// in such cases. +func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Account) (*Account, error) { + req := struct { + Resource string `json:"resource"` + Contact []string `json:"contact,omitempty"` + Agreement string `json:"agreement,omitempty"` + }{ + Resource: typ, + } + if acct != nil { + req.Contact = acct.Contact + req.Agreement = acct.AgreedTerms + } + res, err := c.post(ctx, nil, url, req, wantStatus( + http.StatusOK, // updates and deletes + http.StatusCreated, // new account creation + http.StatusAccepted, // Let's Encrypt divergent implementation + )) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v struct { + Contact []string + Agreement string + Authorizations string + Certificates string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + var tos string + if v := linkHeader(res.Header, "terms-of-service"); len(v) > 0 { + tos = v[0] + } + var authz string + if v := linkHeader(res.Header, "next"); len(v) > 0 { + authz = v[0] + } + return &Account{ + URI: res.Header.Get("Location"), + Contact: v.Contact, + AgreedTerms: v.Agreement, + CurrentTerms: tos, + Authz: authz, + Authorizations: v.Authorizations, + Certificates: v.Certificates, + }, nil +} + +// popNonce returns a nonce value previously stored with c.addNonce +// or fetches a fresh one from c.dir.NonceURL. +// If NonceURL is empty, it first tries c.directoryURL() and, failing that, +// the provided url. +func (c *Client) popNonce(ctx context.Context, url string) (string, error) { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) == 0 { + if c.dir != nil && c.dir.NonceURL != "" { + return c.fetchNonce(ctx, c.dir.NonceURL) + } + dirURL := c.directoryURL() + v, err := c.fetchNonce(ctx, dirURL) + if err != nil && url != dirURL { + v, err = c.fetchNonce(ctx, url) + } + return v, err + } + var nonce string + for nonce = range c.nonces { + delete(c.nonces, nonce) + break + } + return nonce, nil +} + +// clearNonces clears any stored nonces +func (c *Client) clearNonces() { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + c.nonces = make(map[string]struct{}) +} + +// addNonce stores a nonce value found in h (if any) for future use. +func (c *Client) addNonce(h http.Header) { + v := nonceFromHeader(h) + if v == "" { + return + } + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) >= maxNonces { + return + } + if c.nonces == nil { + c.nonces = make(map[string]struct{}) + } + c.nonces[v] = struct{}{} +} + +func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) { + r, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return "", err + } + resp, err := c.doNoRetry(ctx, r) + if err != nil { + return "", err + } + defer resp.Body.Close() + nonce := nonceFromHeader(resp.Header) + if nonce == "" { + if resp.StatusCode > 299 { + return "", responseError(resp) + } + return "", errors.New("acme: nonce not found") + } + return nonce, nil +} + +func nonceFromHeader(h http.Header) string { + return h.Get("Replay-Nonce") +} + +func (c *Client) responseCert(ctx context.Context, res *http.Response, bundle bool) ([][]byte, error) { + b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) + if err != nil { + return nil, fmt.Errorf("acme: response stream: %v", err) + } + if len(b) > maxCertSize { + return nil, errors.New("acme: certificate is too big") + } + cert := [][]byte{b} + if !bundle { + return cert, nil + } + + // Append CA chain cert(s). + // At least one is required according to the spec: + // https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-6.3.1 + up := linkHeader(res.Header, "up") + if len(up) == 0 { + return nil, errors.New("acme: rel=up link not found") + } + if len(up) > maxChainLen { + return nil, errors.New("acme: rel=up link is too large") + } + for _, url := range up { + cc, err := c.chainCert(ctx, url, 0) + if err != nil { + return nil, err + } + cert = append(cert, cc...) + } + return cert, nil +} + +// chainCert fetches CA certificate chain recursively by following "up" links. +// Each recursive call increments the depth by 1, resulting in an error +// if the recursion level reaches maxChainLen. +// +// First chainCert call starts with depth of 0. +func (c *Client) chainCert(ctx context.Context, url string, depth int) ([][]byte, error) { + if depth >= maxChainLen { + return nil, errors.New("acme: certificate chain is too deep") + } + + res, err := c.get(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) + if err != nil { + return nil, err + } + if len(b) > maxCertSize { + return nil, errors.New("acme: certificate is too big") + } + chain := [][]byte{b} + + uplink := linkHeader(res.Header, "up") + if len(uplink) > maxChainLen { + return nil, errors.New("acme: certificate chain is too large") + } + for _, up := range uplink { + cc, err := c.chainCert(ctx, up, depth+1) + if err != nil { + return nil, err + } + chain = append(chain, cc...) + } + + return chain, nil +} + +// linkHeader returns URI-Reference values of all Link headers +// with relation-type rel. +// See https://tools.ietf.org/html/rfc5988#section-5 for details. +func linkHeader(h http.Header, rel string) []string { + var links []string + for _, v := range h["Link"] { + parts := strings.Split(v, ";") + for _, p := range parts { + p = strings.TrimSpace(p) + if !strings.HasPrefix(p, "rel=") { + continue + } + if v := strings.Trim(p[4:], `"`); v == rel { + links = append(links, strings.Trim(parts[0], "<>")) + } + } + } + return links +} + +// keyAuth generates a key authorization string for a given token. +func keyAuth(pub crypto.PublicKey, token string) (string, error) { + th, err := JWKThumbprint(pub) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", token, th), nil +} + +// defaultTLSChallengeCertTemplate is a template used to create challenge certs for TLS challenges. +func defaultTLSChallengeCertTemplate() *x509.Certificate { + return &x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } +} + +// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges +// with the given SANs and auto-generated public/private key pair. +// The Subject Common Name is set to the first SAN to aid debugging. +// To create a cert with a custom key pair, specify WithKey option. +func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { + var key crypto.Signer + tmpl := defaultTLSChallengeCertTemplate() + for _, o := range opt { + switch o := o.(type) { + case *certOptKey: + if key != nil { + return tls.Certificate{}, errors.New("acme: duplicate key option") + } + key = o.key + case *certOptTemplate: + t := *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + // package's fault, if we let this happen: + panic(fmt.Sprintf("unsupported option type %T", o)) + } + } + if key == nil { + var err error + if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { + return tls.Certificate{}, err + } + } + tmpl.DNSNames = san + if len(san) > 0 { + tmpl.Subject.CommonName = san[0] + } + + der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + if err != nil { + return tls.Certificate{}, err + } + return tls.Certificate{ + Certificate: [][]byte{der}, + PrivateKey: key, + }, nil +} + +// encodePEM returns b encoded as PEM with block of type typ. +func encodePEM(typ string, b []byte) []byte { + pb := &pem.Block{Type: typ, Bytes: b} + return pem.EncodeToMemory(pb) +} + +// timeNow is useful for testing for fixed current time. +var timeNow = time.Now diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go new file mode 100644 index 0000000..2ea9e23 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go @@ -0,0 +1,1249 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package autocert provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package autocert + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + mathrand "math/rand" + "net" + "net/http" + "path" + "strings" + "sync" + "time" + + "golang.org/x/crypto/acme" + "golang.org/x/net/idna" +) + +// DefaultACMEDirectory is the default ACME Directory URL used when the Manager's Client is nil. +const DefaultACMEDirectory = "https://acme-v02.api.letsencrypt.org/directory" + +// createCertRetryAfter is how much time to wait before removing a failed state +// entry due to an unsuccessful createCert call. +// This is a variable instead of a const for testing. +// TODO: Consider making it configurable or an exp backoff? +var createCertRetryAfter = time.Minute + +// pseudoRand is safe for concurrent use. +var pseudoRand *lockedMathRand + +func init() { + src := mathrand.NewSource(time.Now().UnixNano()) + pseudoRand = &lockedMathRand{rnd: mathrand.New(src)} +} + +// AcceptTOS is a Manager.Prompt function that always returns true to +// indicate acceptance of the CA's Terms of Service during account +// registration. +func AcceptTOS(tosURL string) bool { return true } + +// HostPolicy specifies which host names the Manager is allowed to respond to. +// It returns a non-nil error if the host should be rejected. +// The returned error is accessible via tls.Conn.Handshake and its callers. +// See Manager's HostPolicy field and GetCertificate method docs for more details. +type HostPolicy func(ctx context.Context, host string) error + +// HostWhitelist returns a policy where only the specified host names are allowed. +// Only exact matches are currently supported. Subdomains, regexp or wildcard +// will not match. +// +// Note that all hosts will be converted to Punycode via idna.Lookup.ToASCII so that +// Manager.GetCertificate can handle the Unicode IDN and mixedcase hosts correctly. +// Invalid hosts will be silently ignored. +func HostWhitelist(hosts ...string) HostPolicy { + whitelist := make(map[string]bool, len(hosts)) + for _, h := range hosts { + if h, err := idna.Lookup.ToASCII(h); err == nil { + whitelist[h] = true + } + } + return func(_ context.Context, host string) error { + if !whitelist[host] { + return fmt.Errorf("acme/autocert: host %q not configured in HostWhitelist", host) + } + return nil + } +} + +// defaultHostPolicy is used when Manager.HostPolicy is not set. +func defaultHostPolicy(context.Context, string) error { + return nil +} + +// Manager is a stateful certificate manager built on top of acme.Client. +// It obtains and refreshes certificates automatically using "tls-alpn-01" +// or "http-01" challenge types, as well as providing them to a TLS server +// via tls.Config. +// +// You must specify a cache implementation, such as DirCache, +// to reuse obtained certificates across program restarts. +// Otherwise your server is very likely to exceed the certificate +// issuer's request rate limits. +type Manager struct { + // Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS). + // The registration may require the caller to agree to the CA's TOS. + // If so, Manager calls Prompt with a TOS URL provided by the CA. Prompt should report + // whether the caller agrees to the terms. + // + // To always accept the terms, the callers can use AcceptTOS. + Prompt func(tosURL string) bool + + // Cache optionally stores and retrieves previously-obtained certificates + // and other state. If nil, certs will only be cached for the lifetime of + // the Manager. Multiple Managers can share the same Cache. + // + // Using a persistent Cache, such as DirCache, is strongly recommended. + Cache Cache + + // HostPolicy controls which domains the Manager will attempt + // to retrieve new certificates for. It does not affect cached certs. + // + // If non-nil, HostPolicy is called before requesting a new cert. + // If nil, all hosts are currently allowed. This is not recommended, + // as it opens a potential attack where clients connect to a server + // by IP address and pretend to be asking for an incorrect host name. + // Manager will attempt to obtain a certificate for that host, incorrectly, + // eventually reaching the CA's rate limit for certificate requests + // and making it impossible to obtain actual certificates. + // + // See GetCertificate for more details. + HostPolicy HostPolicy + + // RenewBefore optionally specifies how early certificates should + // be renewed before they expire. + // + // If zero, they're renewed 30 days before expiration. + RenewBefore time.Duration + + // Client is used to perform low-level operations, such as account registration + // and requesting new certificates. + // + // If Client is nil, a zero-value acme.Client is used with DefaultACMEDirectory + // as the directory endpoint. + // If the Client.Key is nil, a new ECDSA P-256 key is generated and, + // if Cache is not nil, stored in cache. + // + // Mutating the field after the first call of GetCertificate method will have no effect. + Client *acme.Client + + // Email optionally specifies a contact email address. + // This is used by CAs, such as Let's Encrypt, to notify about problems + // with issued certificates. + // + // If the Client's account key is already registered, Email is not used. + Email string + + // ForceRSA used to make the Manager generate RSA certificates. It is now ignored. + // + // Deprecated: the Manager will request the correct type of certificate based + // on what each client supports. + ForceRSA bool + + // ExtraExtensions are used when generating a new CSR (Certificate Request), + // thus allowing customization of the resulting certificate. + // For instance, TLS Feature Extension (RFC 7633) can be used + // to prevent an OCSP downgrade attack. + // + // The field value is passed to crypto/x509.CreateCertificateRequest + // in the template's ExtraExtensions field as is. + ExtraExtensions []pkix.Extension + + clientMu sync.Mutex + client *acme.Client // initialized by acmeClient method + + stateMu sync.Mutex + state map[certKey]*certState + + // renewal tracks the set of domains currently running renewal timers. + renewalMu sync.Mutex + renewal map[certKey]*domainRenewal + + // challengeMu guards tryHTTP01, certTokens and httpTokens. + challengeMu sync.RWMutex + // tryHTTP01 indicates whether the Manager should try "http-01" challenge type + // during the authorization flow. + tryHTTP01 bool + // httpTokens contains response body values for http-01 challenges + // and is keyed by the URL path at which a challenge response is expected + // to be provisioned. + // The entries are stored for the duration of the authorization flow. + httpTokens map[string][]byte + // certTokens contains temporary certificates for tls-alpn-01 challenges + // and is keyed by the domain name which matches the ClientHello server name. + // The entries are stored for the duration of the authorization flow. + certTokens map[string]*tls.Certificate + + // nowFunc, if not nil, returns the current time. This may be set for + // testing purposes. + nowFunc func() time.Time +} + +// certKey is the key by which certificates are tracked in state, renewal and cache. +type certKey struct { + domain string // without trailing dot + isRSA bool // RSA cert for legacy clients (as opposed to default ECDSA) + isToken bool // tls-based challenge token cert; key type is undefined regardless of isRSA +} + +func (c certKey) String() string { + if c.isToken { + return c.domain + "+token" + } + if c.isRSA { + return c.domain + "+rsa" + } + return c.domain +} + +// TLSConfig creates a new TLS config suitable for net/http.Server servers, +// supporting HTTP/2 and the tls-alpn-01 ACME challenge type. +func (m *Manager) TLSConfig() *tls.Config { + return &tls.Config{ + GetCertificate: m.GetCertificate, + NextProtos: []string{ + "h2", "http/1.1", // enable HTTP/2 + acme.ALPNProto, // enable tls-alpn ACME challenges + }, + } +} + +// GetCertificate implements the tls.Config.GetCertificate hook. +// It provides a TLS certificate for hello.ServerName host, including answering +// tls-alpn-01 challenges. +// All other fields of hello are ignored. +// +// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting +// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation. +// The error is propagated back to the caller of GetCertificate and is user-visible. +// This does not affect cached certs. See HostPolicy field description for more details. +// +// If GetCertificate is used directly, instead of via Manager.TLSConfig, package users will +// also have to add acme.ALPNProto to NextProtos for tls-alpn-01, or use HTTPHandler for http-01. +func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { + if m.Prompt == nil { + return nil, errors.New("acme/autocert: Manager.Prompt not set") + } + + name := hello.ServerName + if name == "" { + return nil, errors.New("acme/autocert: missing server name") + } + if !strings.Contains(strings.Trim(name, "."), ".") { + return nil, errors.New("acme/autocert: server name component count invalid") + } + + // Note that this conversion is necessary because some server names in the handshakes + // started by some clients (such as cURL) are not converted to Punycode, which will + // prevent us from obtaining certificates for them. In addition, we should also treat + // example.com and EXAMPLE.COM as equivalent and return the same certificate for them. + // Fortunately, this conversion also helped us deal with this kind of mixedcase problems. + // + // Due to the "σςΣ" problem (see https://unicode.org/faq/idn.html#22), we can't use + // idna.Punycode.ToASCII (or just idna.ToASCII) here. + name, err := idna.Lookup.ToASCII(name) + if err != nil { + return nil, errors.New("acme/autocert: server name contains invalid character") + } + + // In the worst-case scenario, the timeout needs to account for caching, host policy, + // domain ownership verification and certificate issuance. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Check whether this is a token cert requested for TLS-ALPN challenge. + if wantsTokenCert(hello) { + m.challengeMu.RLock() + defer m.challengeMu.RUnlock() + if cert := m.certTokens[name]; cert != nil { + return cert, nil + } + if cert, err := m.cacheGet(ctx, certKey{domain: name, isToken: true}); err == nil { + return cert, nil + } + // TODO: cache error results? + return nil, fmt.Errorf("acme/autocert: no token cert for %q", name) + } + + // regular domain + ck := certKey{ + domain: strings.TrimSuffix(name, "."), // golang.org/issue/18114 + isRSA: !supportsECDSA(hello), + } + cert, err := m.cert(ctx, ck) + if err == nil { + return cert, nil + } + if err != ErrCacheMiss { + return nil, err + } + + // first-time + if err := m.hostPolicy()(ctx, name); err != nil { + return nil, err + } + cert, err = m.createCert(ctx, ck) + if err != nil { + return nil, err + } + m.cachePut(ctx, ck, cert) + return cert, nil +} + +// wantsTokenCert reports whether a TLS request with SNI is made by a CA server +// for a challenge verification. +func wantsTokenCert(hello *tls.ClientHelloInfo) bool { + // tls-alpn-01 + if len(hello.SupportedProtos) == 1 && hello.SupportedProtos[0] == acme.ALPNProto { + return true + } + return false +} + +func supportsECDSA(hello *tls.ClientHelloInfo) bool { + // The "signature_algorithms" extension, if present, limits the key exchange + // algorithms allowed by the cipher suites. See RFC 5246, section 7.4.1.4.1. + if hello.SignatureSchemes != nil { + ecdsaOK := false + schemeLoop: + for _, scheme := range hello.SignatureSchemes { + const tlsECDSAWithSHA1 tls.SignatureScheme = 0x0203 // constant added in Go 1.10 + switch scheme { + case tlsECDSAWithSHA1, tls.ECDSAWithP256AndSHA256, + tls.ECDSAWithP384AndSHA384, tls.ECDSAWithP521AndSHA512: + ecdsaOK = true + break schemeLoop + } + } + if !ecdsaOK { + return false + } + } + if hello.SupportedCurves != nil { + ecdsaOK := false + for _, curve := range hello.SupportedCurves { + if curve == tls.CurveP256 { + ecdsaOK = true + break + } + } + if !ecdsaOK { + return false + } + } + for _, suite := range hello.CipherSuites { + switch suite { + case tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: + return true + } + } + return false +} + +// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses. +// It returns an http.Handler that responds to the challenges and must be +// running on port 80. If it receives a request that is not an ACME challenge, +// it delegates the request to the optional fallback handler. +// +// If fallback is nil, the returned handler redirects all GET and HEAD requests +// to the default TLS port 443 with 302 Found status code, preserving the original +// request path and query. It responds with 400 Bad Request to all other HTTP methods. +// The fallback is not protected by the optional HostPolicy. +// +// Because the fallback handler is run with unencrypted port 80 requests, +// the fallback should not serve TLS-only requests. +// +// If HTTPHandler is never called, the Manager will only use the "tls-alpn-01" +// challenge for domain verification. +func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler { + m.challengeMu.Lock() + defer m.challengeMu.Unlock() + m.tryHTTP01 = true + + if fallback == nil { + fallback = http.HandlerFunc(handleHTTPRedirect) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") { + fallback.ServeHTTP(w, r) + return + } + // A reasonable context timeout for cache and host policy only, + // because we don't wait for a new certificate issuance here. + ctx, cancel := context.WithTimeout(r.Context(), time.Minute) + defer cancel() + if err := m.hostPolicy()(ctx, r.Host); err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + data, err := m.httpToken(ctx, r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Write(data) + }) +} + +func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" && r.Method != "HEAD" { + http.Error(w, "Use HTTPS", http.StatusBadRequest) + return + } + target := "https://" + stripPort(r.Host) + r.URL.RequestURI() + http.Redirect(w, r, target, http.StatusFound) +} + +func stripPort(hostport string) string { + host, _, err := net.SplitHostPort(hostport) + if err != nil { + return hostport + } + return net.JoinHostPort(host, "443") +} + +// cert returns an existing certificate either from m.state or cache. +// If a certificate is found in cache but not in m.state, the latter will be filled +// with the cached value. +func (m *Manager) cert(ctx context.Context, ck certKey) (*tls.Certificate, error) { + m.stateMu.Lock() + if s, ok := m.state[ck]; ok { + m.stateMu.Unlock() + s.RLock() + defer s.RUnlock() + return s.tlscert() + } + defer m.stateMu.Unlock() + cert, err := m.cacheGet(ctx, ck) + if err != nil { + return nil, err + } + signer, ok := cert.PrivateKey.(crypto.Signer) + if !ok { + return nil, errors.New("acme/autocert: private key cannot sign") + } + if m.state == nil { + m.state = make(map[certKey]*certState) + } + s := &certState{ + key: signer, + cert: cert.Certificate, + leaf: cert.Leaf, + } + m.state[ck] = s + go m.renew(ck, s.key, s.leaf.NotAfter) + return cert, nil +} + +// cacheGet always returns a valid certificate, or an error otherwise. +// If a cached certificate exists but is not valid, ErrCacheMiss is returned. +func (m *Manager) cacheGet(ctx context.Context, ck certKey) (*tls.Certificate, error) { + if m.Cache == nil { + return nil, ErrCacheMiss + } + data, err := m.Cache.Get(ctx, ck.String()) + if err != nil { + return nil, err + } + + // private + priv, pub := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, ErrCacheMiss + } + privKey, err := parsePrivateKey(priv.Bytes) + if err != nil { + return nil, err + } + + // public + var pubDER [][]byte + for len(pub) > 0 { + var b *pem.Block + b, pub = pem.Decode(pub) + if b == nil { + break + } + pubDER = append(pubDER, b.Bytes) + } + if len(pub) > 0 { + // Leftover content not consumed by pem.Decode. Corrupt. Ignore. + return nil, ErrCacheMiss + } + + // verify and create TLS cert + leaf, err := validCert(ck, pubDER, privKey, m.now()) + if err != nil { + return nil, ErrCacheMiss + } + tlscert := &tls.Certificate{ + Certificate: pubDER, + PrivateKey: privKey, + Leaf: leaf, + } + return tlscert, nil +} + +func (m *Manager) cachePut(ctx context.Context, ck certKey, tlscert *tls.Certificate) error { + if m.Cache == nil { + return nil + } + + // contains PEM-encoded data + var buf bytes.Buffer + + // private + switch key := tlscert.PrivateKey.(type) { + case *ecdsa.PrivateKey: + if err := encodeECDSAKey(&buf, key); err != nil { + return err + } + case *rsa.PrivateKey: + b := x509.MarshalPKCS1PrivateKey(key) + pb := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + default: + return errors.New("acme/autocert: unknown private key type") + } + + // public + for _, b := range tlscert.Certificate { + pb := &pem.Block{Type: "CERTIFICATE", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + } + + return m.Cache.Put(ctx, ck.String(), buf.Bytes()) +} + +func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error { + b, err := x509.MarshalECPrivateKey(key) + if err != nil { + return err + } + pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + return pem.Encode(w, pb) +} + +// createCert starts the domain ownership verification and returns a certificate +// for that domain upon success. +// +// If the domain is already being verified, it waits for the existing verification to complete. +// Either way, createCert blocks for the duration of the whole process. +func (m *Manager) createCert(ctx context.Context, ck certKey) (*tls.Certificate, error) { + // TODO: maybe rewrite this whole piece using sync.Once + state, err := m.certState(ck) + if err != nil { + return nil, err + } + // state may exist if another goroutine is already working on it + // in which case just wait for it to finish + if !state.locked { + state.RLock() + defer state.RUnlock() + return state.tlscert() + } + + // We are the first; state is locked. + // Unblock the readers when domain ownership is verified + // and we got the cert or the process failed. + defer state.Unlock() + state.locked = false + + der, leaf, err := m.authorizedCert(ctx, state.key, ck) + if err != nil { + // Remove the failed state after some time, + // making the manager call createCert again on the following TLS hello. + time.AfterFunc(createCertRetryAfter, func() { + defer testDidRemoveState(ck) + m.stateMu.Lock() + defer m.stateMu.Unlock() + // Verify the state hasn't changed and it's still invalid + // before deleting. + s, ok := m.state[ck] + if !ok { + return + } + if _, err := validCert(ck, s.cert, s.key, m.now()); err == nil { + return + } + delete(m.state, ck) + }) + return nil, err + } + state.cert = der + state.leaf = leaf + go m.renew(ck, state.key, state.leaf.NotAfter) + return state.tlscert() +} + +// certState returns a new or existing certState. +// If a new certState is returned, state.exist is false and the state is locked. +// The returned error is non-nil only in the case where a new state could not be created. +func (m *Manager) certState(ck certKey) (*certState, error) { + m.stateMu.Lock() + defer m.stateMu.Unlock() + if m.state == nil { + m.state = make(map[certKey]*certState) + } + // existing state + if state, ok := m.state[ck]; ok { + return state, nil + } + + // new locked state + var ( + err error + key crypto.Signer + ) + if ck.isRSA { + key, err = rsa.GenerateKey(rand.Reader, 2048) + } else { + key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + if err != nil { + return nil, err + } + + state := &certState{ + key: key, + locked: true, + } + state.Lock() // will be unlocked by m.certState caller + m.state[ck] = state + return state, nil +} + +// authorizedCert starts the domain ownership verification process and requests a new cert upon success. +// The key argument is the certificate private key. +func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, ck certKey) (der [][]byte, leaf *x509.Certificate, err error) { + csr, err := certRequest(key, ck.domain, m.ExtraExtensions) + if err != nil { + return nil, nil, err + } + + client, err := m.acmeClient(ctx) + if err != nil { + return nil, nil, err + } + dir, err := client.Discover(ctx) + if err != nil { + return nil, nil, err + } + + var chain [][]byte + switch { + // Pre-RFC legacy CA. + case dir.OrderURL == "": + if err := m.verify(ctx, client, ck.domain); err != nil { + return nil, nil, err + } + der, _, err := client.CreateCert(ctx, csr, 0, true) + if err != nil { + return nil, nil, err + } + chain = der + // RFC 8555 compliant CA. + default: + o, err := m.verifyRFC(ctx, client, ck.domain) + if err != nil { + return nil, nil, err + } + der, _, err := client.CreateOrderCert(ctx, o.FinalizeURL, csr, true) + if err != nil { + return nil, nil, err + } + chain = der + } + leaf, err = validCert(ck, chain, key, m.now()) + if err != nil { + return nil, nil, err + } + return chain, leaf, nil +} + +// verify runs the identifier (domain) pre-authorization flow for legacy CAs +// using each applicable ACME challenge type. +func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error { + // Remove all hanging authorizations to reduce rate limit quotas + // after we're done. + var authzURLs []string + defer func() { + go m.deactivatePendingAuthz(authzURLs) + }() + + // errs accumulates challenge failure errors, printed if all fail + errs := make(map[*acme.Challenge]error) + challengeTypes := m.supportedChallengeTypes() + var nextTyp int // challengeType index of the next challenge type to try + for { + // Start domain authorization and get the challenge. + authz, err := client.Authorize(ctx, domain) + if err != nil { + return err + } + authzURLs = append(authzURLs, authz.URI) + // No point in accepting challenges if the authorization status + // is in a final state. + switch authz.Status { + case acme.StatusValid: + return nil // already authorized + case acme.StatusInvalid: + return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI) + } + + // Pick the next preferred challenge. + var chal *acme.Challenge + for chal == nil && nextTyp < len(challengeTypes) { + chal = pickChallenge(challengeTypes[nextTyp], authz.Challenges) + nextTyp++ + } + if chal == nil { + errorMsg := fmt.Sprintf("acme/autocert: unable to authorize %q", domain) + for chal, err := range errs { + errorMsg += fmt.Sprintf("; challenge %q failed with error: %v", chal.Type, err) + } + return errors.New(errorMsg) + } + cleanup, err := m.fulfill(ctx, client, chal, domain) + if err != nil { + errs[chal] = err + continue + } + defer cleanup() + if _, err := client.Accept(ctx, chal); err != nil { + errs[chal] = err + continue + } + + // A challenge is fulfilled and accepted: wait for the CA to validate. + if _, err := client.WaitAuthorization(ctx, authz.URI); err != nil { + errs[chal] = err + continue + } + return nil + } +} + +// verifyRFC runs the identifier (domain) order-based authorization flow for RFC compliant CAs +// using each applicable ACME challenge type. +func (m *Manager) verifyRFC(ctx context.Context, client *acme.Client, domain string) (*acme.Order, error) { + // Try each supported challenge type starting with a new order each time. + // The nextTyp index of the next challenge type to try is shared across + // all order authorizations: if we've tried a challenge type once and it didn't work, + // it will most likely not work on another order's authorization either. + challengeTypes := m.supportedChallengeTypes() + nextTyp := 0 // challengeTypes index +AuthorizeOrderLoop: + for { + o, err := client.AuthorizeOrder(ctx, acme.DomainIDs(domain)) + if err != nil { + return nil, err + } + // Remove all hanging authorizations to reduce rate limit quotas + // after we're done. + defer func(urls []string) { + go m.deactivatePendingAuthz(urls) + }(o.AuthzURLs) + + // Check if there's actually anything we need to do. + switch o.Status { + case acme.StatusReady: + // Already authorized. + return o, nil + case acme.StatusPending: + // Continue normal Order-based flow. + default: + return nil, fmt.Errorf("acme/autocert: invalid new order status %q; order URL: %q", o.Status, o.URI) + } + + // Satisfy all pending authorizations. + for _, zurl := range o.AuthzURLs { + z, err := client.GetAuthorization(ctx, zurl) + if err != nil { + return nil, err + } + if z.Status != acme.StatusPending { + // We are interested only in pending authorizations. + continue + } + // Pick the next preferred challenge. + var chal *acme.Challenge + for chal == nil && nextTyp < len(challengeTypes) { + chal = pickChallenge(challengeTypes[nextTyp], z.Challenges) + nextTyp++ + } + if chal == nil { + return nil, fmt.Errorf("acme/autocert: unable to satisfy %q for domain %q: no viable challenge type found", z.URI, domain) + } + // Respond to the challenge and wait for validation result. + cleanup, err := m.fulfill(ctx, client, chal, domain) + if err != nil { + continue AuthorizeOrderLoop + } + defer cleanup() + if _, err := client.Accept(ctx, chal); err != nil { + continue AuthorizeOrderLoop + } + if _, err := client.WaitAuthorization(ctx, z.URI); err != nil { + continue AuthorizeOrderLoop + } + } + + // All authorizations are satisfied. + // Wait for the CA to update the order status. + o, err = client.WaitOrder(ctx, o.URI) + if err != nil { + continue AuthorizeOrderLoop + } + return o, nil + } +} + +func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge { + for _, c := range chal { + if c.Type == typ { + return c + } + } + return nil +} + +func (m *Manager) supportedChallengeTypes() []string { + m.challengeMu.RLock() + defer m.challengeMu.RUnlock() + typ := []string{"tls-alpn-01"} + if m.tryHTTP01 { + typ = append(typ, "http-01") + } + return typ +} + +// deactivatePendingAuthz relinquishes all authorizations identified by the elements +// of the provided uri slice which are in "pending" state. +// It ignores revocation errors. +// +// deactivatePendingAuthz takes no context argument and instead runs with its own +// "detached" context because deactivations are done in a goroutine separate from +// that of the main issuance or renewal flow. +func (m *Manager) deactivatePendingAuthz(uri []string) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + client, err := m.acmeClient(ctx) + if err != nil { + return + } + for _, u := range uri { + z, err := client.GetAuthorization(ctx, u) + if err == nil && z.Status == acme.StatusPending { + client.RevokeAuthorization(ctx, u) + } + } +} + +// fulfill provisions a response to the challenge chal. +// The cleanup is non-nil only if provisioning succeeded. +func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge, domain string) (cleanup func(), err error) { + switch chal.Type { + case "tls-alpn-01": + cert, err := client.TLSALPN01ChallengeCert(chal.Token, domain) + if err != nil { + return nil, err + } + m.putCertToken(ctx, domain, &cert) + return func() { go m.deleteCertToken(domain) }, nil + case "http-01": + resp, err := client.HTTP01ChallengeResponse(chal.Token) + if err != nil { + return nil, err + } + p := client.HTTP01ChallengePath(chal.Token) + m.putHTTPToken(ctx, p, resp) + return func() { go m.deleteHTTPToken(p) }, nil + } + return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type) +} + +// putCertToken stores the token certificate with the specified name +// in both m.certTokens map and m.Cache. +func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) { + m.challengeMu.Lock() + defer m.challengeMu.Unlock() + if m.certTokens == nil { + m.certTokens = make(map[string]*tls.Certificate) + } + m.certTokens[name] = cert + m.cachePut(ctx, certKey{domain: name, isToken: true}, cert) +} + +// deleteCertToken removes the token certificate with the specified name +// from both m.certTokens map and m.Cache. +func (m *Manager) deleteCertToken(name string) { + m.challengeMu.Lock() + defer m.challengeMu.Unlock() + delete(m.certTokens, name) + if m.Cache != nil { + ck := certKey{domain: name, isToken: true} + m.Cache.Delete(context.Background(), ck.String()) + } +} + +// httpToken retrieves an existing http-01 token value from an in-memory map +// or the optional cache. +func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) { + m.challengeMu.RLock() + defer m.challengeMu.RUnlock() + if v, ok := m.httpTokens[tokenPath]; ok { + return v, nil + } + if m.Cache == nil { + return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath) + } + return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath)) +} + +// putHTTPToken stores an http-01 token value using tokenPath as key +// in both in-memory map and the optional Cache. +// +// It ignores any error returned from Cache.Put. +func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) { + m.challengeMu.Lock() + defer m.challengeMu.Unlock() + if m.httpTokens == nil { + m.httpTokens = make(map[string][]byte) + } + b := []byte(val) + m.httpTokens[tokenPath] = b + if m.Cache != nil { + m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b) + } +} + +// deleteHTTPToken removes an http-01 token value from both in-memory map +// and the optional Cache, ignoring any error returned from the latter. +// +// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout. +func (m *Manager) deleteHTTPToken(tokenPath string) { + m.challengeMu.Lock() + defer m.challengeMu.Unlock() + delete(m.httpTokens, tokenPath) + if m.Cache != nil { + m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath)) + } +} + +// httpTokenCacheKey returns a key at which an http-01 token value may be stored +// in the Manager's optional Cache. +func httpTokenCacheKey(tokenPath string) string { + return path.Base(tokenPath) + "+http-01" +} + +// renew starts a cert renewal timer loop, one per domain. +// +// The loop is scheduled in two cases: +// - a cert was fetched from cache for the first time (wasn't in m.state) +// - a new cert was created by m.createCert +// +// The key argument is a certificate private key. +// The exp argument is the cert expiration time (NotAfter). +func (m *Manager) renew(ck certKey, key crypto.Signer, exp time.Time) { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + if m.renewal[ck] != nil { + // another goroutine is already on it + return + } + if m.renewal == nil { + m.renewal = make(map[certKey]*domainRenewal) + } + dr := &domainRenewal{m: m, ck: ck, key: key} + m.renewal[ck] = dr + dr.start(exp) +} + +// stopRenew stops all currently running cert renewal timers. +// The timers are not restarted during the lifetime of the Manager. +func (m *Manager) stopRenew() { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + for name, dr := range m.renewal { + delete(m.renewal, name) + dr.stop() + } +} + +func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) { + const keyName = "acme_account+key" + + // Previous versions of autocert stored the value under a different key. + const legacyKeyName = "acme_account.key" + + genKey := func() (*ecdsa.PrivateKey, error) { + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + + if m.Cache == nil { + return genKey() + } + + data, err := m.Cache.Get(ctx, keyName) + if err == ErrCacheMiss { + data, err = m.Cache.Get(ctx, legacyKeyName) + } + if err == ErrCacheMiss { + key, err := genKey() + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := encodeECDSAKey(&buf, key); err != nil { + return nil, err + } + if err := m.Cache.Put(ctx, keyName, buf.Bytes()); err != nil { + return nil, err + } + return key, nil + } + if err != nil { + return nil, err + } + + priv, _ := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, errors.New("acme/autocert: invalid account key found in cache") + } + return parsePrivateKey(priv.Bytes) +} + +func (m *Manager) acmeClient(ctx context.Context) (*acme.Client, error) { + m.clientMu.Lock() + defer m.clientMu.Unlock() + if m.client != nil { + return m.client, nil + } + + client := m.Client + if client == nil { + client = &acme.Client{DirectoryURL: DefaultACMEDirectory} + } + if client.Key == nil { + var err error + client.Key, err = m.accountKey(ctx) + if err != nil { + return nil, err + } + } + if client.UserAgent == "" { + client.UserAgent = "autocert" + } + var contact []string + if m.Email != "" { + contact = []string{"mailto:" + m.Email} + } + a := &acme.Account{Contact: contact} + _, err := client.Register(ctx, a, m.Prompt) + if err == nil || isAccountAlreadyExist(err) { + m.client = client + err = nil + } + return m.client, err +} + +// isAccountAlreadyExist reports whether the err, as returned from acme.Client.Register, +// indicates the account has already been registered. +func isAccountAlreadyExist(err error) bool { + if err == acme.ErrAccountAlreadyExists { + return true + } + ae, ok := err.(*acme.Error) + return ok && ae.StatusCode == http.StatusConflict +} + +func (m *Manager) hostPolicy() HostPolicy { + if m.HostPolicy != nil { + return m.HostPolicy + } + return defaultHostPolicy +} + +func (m *Manager) renewBefore() time.Duration { + if m.RenewBefore > renewJitter { + return m.RenewBefore + } + return 720 * time.Hour // 30 days +} + +func (m *Manager) now() time.Time { + if m.nowFunc != nil { + return m.nowFunc() + } + return time.Now() +} + +// certState is ready when its mutex is unlocked for reading. +type certState struct { + sync.RWMutex + locked bool // locked for read/write + key crypto.Signer // private key for cert + cert [][]byte // DER encoding + leaf *x509.Certificate // parsed cert[0]; always non-nil if cert != nil +} + +// tlscert creates a tls.Certificate from s.key and s.cert. +// Callers should wrap it in s.RLock() and s.RUnlock(). +func (s *certState) tlscert() (*tls.Certificate, error) { + if s.key == nil { + return nil, errors.New("acme/autocert: missing signer") + } + if len(s.cert) == 0 { + return nil, errors.New("acme/autocert: missing certificate") + } + return &tls.Certificate{ + PrivateKey: s.key, + Certificate: s.cert, + Leaf: s.leaf, + }, nil +} + +// certRequest generates a CSR for the given common name cn and optional SANs. +func certRequest(key crypto.Signer, cn string, ext []pkix.Extension, san ...string) ([]byte, error) { + req := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: cn}, + DNSNames: san, + ExtraExtensions: ext, + } + return x509.CreateCertificateRequest(rand.Reader, req, key) +} + +// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates +// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. +// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. +// +// Inspired by parsePrivateKey in crypto/tls/tls.go. +func parsePrivateKey(der []byte) (crypto.Signer, error) { + if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { + return key, nil + } + if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { + switch key := key.(type) { + case *rsa.PrivateKey: + return key, nil + case *ecdsa.PrivateKey: + return key, nil + default: + return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping") + } + } + if key, err := x509.ParseECPrivateKey(der); err == nil { + return key, nil + } + + return nil, errors.New("acme/autocert: failed to parse private key") +} + +// validCert parses a cert chain provided as der argument and verifies the leaf and der[0] +// correspond to the private key, the domain and key type match, and expiration dates +// are valid. It doesn't do any revocation checking. +// +// The returned value is the verified leaf cert. +func validCert(ck certKey, der [][]byte, key crypto.Signer, now time.Time) (leaf *x509.Certificate, err error) { + // parse public part(s) + var n int + for _, b := range der { + n += len(b) + } + pub := make([]byte, n) + n = 0 + for _, b := range der { + n += copy(pub[n:], b) + } + x509Cert, err := x509.ParseCertificates(pub) + if err != nil || len(x509Cert) == 0 { + return nil, errors.New("acme/autocert: no public key found") + } + // verify the leaf is not expired and matches the domain name + leaf = x509Cert[0] + if now.Before(leaf.NotBefore) { + return nil, errors.New("acme/autocert: certificate is not valid yet") + } + if now.After(leaf.NotAfter) { + return nil, errors.New("acme/autocert: expired certificate") + } + if err := leaf.VerifyHostname(ck.domain); err != nil { + return nil, err + } + // ensure the leaf corresponds to the private key and matches the certKey type + switch pub := leaf.PublicKey.(type) { + case *rsa.PublicKey: + prv, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.N.Cmp(prv.N) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + if !ck.isRSA && !ck.isToken { + return nil, errors.New("acme/autocert: key type does not match expected value") + } + case *ecdsa.PublicKey: + prv, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + if ck.isRSA && !ck.isToken { + return nil, errors.New("acme/autocert: key type does not match expected value") + } + default: + return nil, errors.New("acme/autocert: unknown public key algorithm") + } + return leaf, nil +} + +type lockedMathRand struct { + sync.Mutex + rnd *mathrand.Rand +} + +func (r *lockedMathRand) int63n(max int64) int64 { + r.Lock() + n := r.rnd.Int63n(max) + r.Unlock() + return n +} + +// For easier testing. +var ( + // Called when a state is removed. + testDidRemoveState = func(certKey) {} +) diff --git a/vendor/golang.org/x/crypto/acme/autocert/cache.go b/vendor/golang.org/x/crypto/acme/autocert/cache.go new file mode 100644 index 0000000..03f6302 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/cache.go @@ -0,0 +1,136 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "errors" + "io/ioutil" + "os" + "path/filepath" +) + +// ErrCacheMiss is returned when a certificate is not found in cache. +var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss") + +// Cache is used by Manager to store and retrieve previously obtained certificates +// and other account data as opaque blobs. +// +// Cache implementations should not rely on the key naming pattern. Keys can +// include any printable ASCII characters, except the following: \/:*?"<>| +type Cache interface { + // Get returns a certificate data for the specified key. + // If there's no such key, Get returns ErrCacheMiss. + Get(ctx context.Context, key string) ([]byte, error) + + // Put stores the data in the cache under the specified key. + // Underlying implementations may use any data storage format, + // as long as the reverse operation, Get, results in the original data. + Put(ctx context.Context, key string, data []byte) error + + // Delete removes a certificate data from the cache under the specified key. + // If there's no such key in the cache, Delete returns nil. + Delete(ctx context.Context, key string) error +} + +// DirCache implements Cache using a directory on the local filesystem. +// If the directory does not exist, it will be created with 0700 permissions. +type DirCache string + +// Get reads a certificate data from the specified file name. +func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) { + name = filepath.Join(string(d), name) + var ( + data []byte + err error + done = make(chan struct{}) + ) + go func() { + data, err = ioutil.ReadFile(name) + close(done) + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-done: + } + if os.IsNotExist(err) { + return nil, ErrCacheMiss + } + return data, err +} + +// Put writes the certificate data to the specified file name. +// The file will be created with 0600 permissions. +func (d DirCache) Put(ctx context.Context, name string, data []byte) error { + if err := os.MkdirAll(string(d), 0700); err != nil { + return err + } + + done := make(chan struct{}) + var err error + go func() { + defer close(done) + var tmp string + if tmp, err = d.writeTempFile(name, data); err != nil { + return + } + defer os.Remove(tmp) + select { + case <-ctx.Done(): + // Don't overwrite the file if the context was canceled. + default: + newName := filepath.Join(string(d), name) + err = os.Rename(tmp, newName) + } + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + return err +} + +// Delete removes the specified file name. +func (d DirCache) Delete(ctx context.Context, name string) error { + name = filepath.Join(string(d), name) + var ( + err error + done = make(chan struct{}) + ) + go func() { + err = os.Remove(name) + close(done) + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// writeTempFile writes b to a temporary file, closes the file and returns its path. +func (d DirCache) writeTempFile(prefix string, b []byte) (name string, reterr error) { + // TempFile uses 0600 permissions + f, err := ioutil.TempFile(string(d), prefix) + if err != nil { + return "", err + } + defer func() { + if reterr != nil { + os.Remove(f.Name()) + } + }() + if _, err := f.Write(b); err != nil { + f.Close() + return "", err + } + return f.Name(), f.Close() +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/listener.go b/vendor/golang.org/x/crypto/acme/autocert/listener.go new file mode 100644 index 0000000..cb48609 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/listener.go @@ -0,0 +1,155 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "crypto/tls" + "log" + "net" + "os" + "path/filepath" + "runtime" + "time" +) + +// NewListener returns a net.Listener that listens on the standard TLS +// port (443) on all interfaces and returns *tls.Conn connections with +// LetsEncrypt certificates for the provided domain or domains. +// +// It enables one-line HTTPS servers: +// +// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler)) +// +// NewListener is a convenience function for a common configuration. +// More complex or custom configurations can use the autocert.Manager +// type instead. +// +// Use of this function implies acceptance of the LetsEncrypt Terms of +// Service. If domains is not empty, the provided domains are passed +// to HostWhitelist. If domains is empty, the listener will do +// LetsEncrypt challenges for any requested domain, which is not +// recommended. +// +// Certificates are cached in a "golang-autocert" directory under an +// operating system-specific cache or temp directory. This may not +// be suitable for servers spanning multiple machines. +// +// The returned listener uses a *tls.Config that enables HTTP/2, and +// should only be used with servers that support HTTP/2. +// +// The returned Listener also enables TCP keep-alives on the accepted +// connections. The returned *tls.Conn are returned before their TLS +// handshake has completed. +func NewListener(domains ...string) net.Listener { + m := &Manager{ + Prompt: AcceptTOS, + } + if len(domains) > 0 { + m.HostPolicy = HostWhitelist(domains...) + } + dir := cacheDir() + if err := os.MkdirAll(dir, 0700); err != nil { + log.Printf("warning: autocert.NewListener not using a cache: %v", err) + } else { + m.Cache = DirCache(dir) + } + return m.Listener() +} + +// Listener listens on the standard TLS port (443) on all interfaces +// and returns a net.Listener returning *tls.Conn connections. +// +// The returned listener uses a *tls.Config that enables HTTP/2, and +// should only be used with servers that support HTTP/2. +// +// The returned Listener also enables TCP keep-alives on the accepted +// connections. The returned *tls.Conn are returned before their TLS +// handshake has completed. +// +// Unlike NewListener, it is the caller's responsibility to initialize +// the Manager m's Prompt, Cache, HostPolicy, and other desired options. +func (m *Manager) Listener() net.Listener { + ln := &listener{ + conf: m.TLSConfig(), + } + ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443") + return ln +} + +type listener struct { + conf *tls.Config + + tcpListener net.Listener + tcpListenErr error +} + +func (ln *listener) Accept() (net.Conn, error) { + if ln.tcpListenErr != nil { + return nil, ln.tcpListenErr + } + conn, err := ln.tcpListener.Accept() + if err != nil { + return nil, err + } + tcpConn := conn.(*net.TCPConn) + + // Because Listener is a convenience function, help out with + // this too. This is not possible for the caller to set once + // we return a *tcp.Conn wrapping an inaccessible net.Conn. + // If callers don't want this, they can do things the manual + // way and tweak as needed. But this is what net/http does + // itself, so copy that. If net/http changes, we can change + // here too. + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(3 * time.Minute) + + return tls.Server(tcpConn, ln.conf), nil +} + +func (ln *listener) Addr() net.Addr { + if ln.tcpListener != nil { + return ln.tcpListener.Addr() + } + // net.Listen failed. Return something non-nil in case callers + // call Addr before Accept: + return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443} +} + +func (ln *listener) Close() error { + if ln.tcpListenErr != nil { + return ln.tcpListenErr + } + return ln.tcpListener.Close() +} + +func homeDir() string { + if runtime.GOOS == "windows" { + return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + } + if h := os.Getenv("HOME"); h != "" { + return h + } + return "/" +} + +func cacheDir() string { + const base = "golang-autocert" + switch runtime.GOOS { + case "darwin": + return filepath.Join(homeDir(), "Library", "Caches", base) + case "windows": + for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} { + if v := os.Getenv(ev); v != "" { + return filepath.Join(v, base) + } + } + // Worst case: + return filepath.Join(homeDir(), base) + } + if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" { + return filepath.Join(xdg, base) + } + return filepath.Join(homeDir(), ".cache", base) +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal.go b/vendor/golang.org/x/crypto/acme/autocert/renewal.go new file mode 100644 index 0000000..665f870 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/renewal.go @@ -0,0 +1,141 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "crypto" + "sync" + "time" +) + +// renewJitter is the maximum deviation from Manager.RenewBefore. +const renewJitter = time.Hour + +// domainRenewal tracks the state used by the periodic timers +// renewing a single domain's cert. +type domainRenewal struct { + m *Manager + ck certKey + key crypto.Signer + + timerMu sync.Mutex + timer *time.Timer +} + +// start starts a cert renewal timer at the time +// defined by the certificate expiration time exp. +// +// If the timer is already started, calling start is a noop. +func (dr *domainRenewal) start(exp time.Time) { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer != nil { + return + } + dr.timer = time.AfterFunc(dr.next(exp), dr.renew) +} + +// stop stops the cert renewal timer. +// If the timer is already stopped, calling stop is a noop. +func (dr *domainRenewal) stop() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer == nil { + return + } + dr.timer.Stop() + dr.timer = nil +} + +// renew is called periodically by a timer. +// The first renew call is kicked off by dr.start. +func (dr *domainRenewal) renew() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer == nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + // TODO: rotate dr.key at some point? + next, err := dr.do(ctx) + if err != nil { + next = renewJitter / 2 + next += time.Duration(pseudoRand.int63n(int64(next))) + } + dr.timer = time.AfterFunc(next, dr.renew) + testDidRenewLoop(next, err) +} + +// updateState locks and replaces the relevant Manager.state item with the given +// state. It additionally updates dr.key with the given state's key. +func (dr *domainRenewal) updateState(state *certState) { + dr.m.stateMu.Lock() + defer dr.m.stateMu.Unlock() + dr.key = state.key + dr.m.state[dr.ck] = state +} + +// do is similar to Manager.createCert but it doesn't lock a Manager.state item. +// Instead, it requests a new certificate independently and, upon success, +// replaces dr.m.state item with a new one and updates cache for the given domain. +// +// It may lock and update the Manager.state if the expiration date of the currently +// cached cert is far enough in the future. +// +// The returned value is a time interval after which the renewal should occur again. +func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { + // a race is likely unavoidable in a distributed environment + // but we try nonetheless + if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil { + next := dr.next(tlscert.Leaf.NotAfter) + if next > dr.m.renewBefore()+renewJitter { + signer, ok := tlscert.PrivateKey.(crypto.Signer) + if ok { + state := &certState{ + key: signer, + cert: tlscert.Certificate, + leaf: tlscert.Leaf, + } + dr.updateState(state) + return next, nil + } + } + } + + der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.ck) + if err != nil { + return 0, err + } + state := &certState{ + key: dr.key, + cert: der, + leaf: leaf, + } + tlscert, err := state.tlscert() + if err != nil { + return 0, err + } + if err := dr.m.cachePut(ctx, dr.ck, tlscert); err != nil { + return 0, err + } + dr.updateState(state) + return dr.next(leaf.NotAfter), nil +} + +func (dr *domainRenewal) next(expiry time.Time) time.Duration { + d := expiry.Sub(dr.m.now()) - dr.m.renewBefore() + // add a bit of randomness to renew deadline + n := pseudoRand.int63n(int64(renewJitter)) + d -= time.Duration(n) + if d < 0 { + return 0 + } + return d +} + +var testDidRenewLoop = func(next time.Duration, err error) {} diff --git a/vendor/golang.org/x/crypto/acme/http.go b/vendor/golang.org/x/crypto/acme/http.go new file mode 100644 index 0000000..c51943e --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/http.go @@ -0,0 +1,321 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "bytes" + "context" + "crypto" + "crypto/rand" + "encoding/json" + "fmt" + "io/ioutil" + "math/big" + "net/http" + "strconv" + "strings" + "time" +) + +// retryTimer encapsulates common logic for retrying unsuccessful requests. +// It is not safe for concurrent use. +type retryTimer struct { + // backoffFn provides backoff delay sequence for retries. + // See Client.RetryBackoff doc comment. + backoffFn func(n int, r *http.Request, res *http.Response) time.Duration + // n is the current retry attempt. + n int +} + +func (t *retryTimer) inc() { + t.n++ +} + +// backoff pauses the current goroutine as described in Client.RetryBackoff. +func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error { + d := t.backoffFn(t.n, r, res) + if d <= 0 { + return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n) + } + wakeup := time.NewTimer(d) + defer wakeup.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-wakeup.C: + return nil + } +} + +func (c *Client) retryTimer() *retryTimer { + f := c.RetryBackoff + if f == nil { + f = defaultBackoff + } + return &retryTimer{backoffFn: f} +} + +// defaultBackoff provides default Client.RetryBackoff implementation +// using a truncated exponential backoff algorithm, +// as described in Client.RetryBackoff. +// +// The n argument is always bounded between 1 and 30. +// The returned value is always greater than 0. +func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration { + const max = 10 * time.Second + var jitter time.Duration + if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil { + // Set the minimum to 1ms to avoid a case where + // an invalid Retry-After value is parsed into 0 below, + // resulting in the 0 returned value which would unintentionally + // stop the retries. + jitter = (1 + time.Duration(x.Int64())) * time.Millisecond + } + if v, ok := res.Header["Retry-After"]; ok { + return retryAfter(v[0]) + jitter + } + + if n < 1 { + n = 1 + } + if n > 30 { + n = 30 + } + d := time.Duration(1< max { + return max + } + return d +} + +// retryAfter parses a Retry-After HTTP header value, +// trying to convert v into an int (seconds) or use http.ParseTime otherwise. +// It returns zero value if v cannot be parsed. +func retryAfter(v string) time.Duration { + if i, err := strconv.Atoi(v); err == nil { + return time.Duration(i) * time.Second + } + t, err := http.ParseTime(v) + if err != nil { + return 0 + } + return t.Sub(timeNow()) +} + +// resOkay is a function that reports whether the provided response is okay. +// It is expected to keep the response body unread. +type resOkay func(*http.Response) bool + +// wantStatus returns a function which reports whether the code +// matches the status code of a response. +func wantStatus(codes ...int) resOkay { + return func(res *http.Response) bool { + for _, code := range codes { + if code == res.StatusCode { + return true + } + } + return false + } +} + +// get issues an unsigned GET request to the specified URL. +// It returns a non-error value only when ok reports true. +// +// get retries unsuccessful attempts according to c.RetryBackoff +// until the context is done or a non-retriable error is received. +func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) { + retry := c.retryTimer() + for { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + res, err := c.doNoRetry(ctx, req) + switch { + case err != nil: + return nil, err + case ok(res): + return res, nil + case isRetriable(res.StatusCode): + retry.inc() + resErr := responseError(res) + res.Body.Close() + // Ignore the error value from retry.backoff + // and return the one from last retry, as received from the CA. + if retry.backoff(ctx, req, res) != nil { + return nil, resErr + } + default: + defer res.Body.Close() + return nil, responseError(res) + } + } +} + +// postAsGet is POST-as-GET, a replacement for GET in RFC8555 +// as described in https://tools.ietf.org/html/rfc8555#section-6.3. +// It makes a POST request in KID form with zero JWS payload. +// See nopayload doc comments in jws.go. +func (c *Client) postAsGet(ctx context.Context, url string, ok resOkay) (*http.Response, error) { + return c.post(ctx, nil, url, noPayload, ok) +} + +// post issues a signed POST request in JWS format using the provided key +// to the specified URL. If key is nil, c.Key is used instead. +// It returns a non-error value only when ok reports true. +// +// post retries unsuccessful attempts according to c.RetryBackoff +// until the context is done or a non-retriable error is received. +// It uses postNoRetry to make individual requests. +func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) { + retry := c.retryTimer() + for { + res, req, err := c.postNoRetry(ctx, key, url, body) + if err != nil { + return nil, err + } + if ok(res) { + return res, nil + } + resErr := responseError(res) + res.Body.Close() + switch { + // Check for bad nonce before isRetriable because it may have been returned + // with an unretriable response code such as 400 Bad Request. + case isBadNonce(resErr): + // Consider any previously stored nonce values to be invalid. + c.clearNonces() + case !isRetriable(res.StatusCode): + return nil, resErr + } + retry.inc() + // Ignore the error value from retry.backoff + // and return the one from last retry, as received from the CA. + if err := retry.backoff(ctx, req, res); err != nil { + return nil, resErr + } + } +} + +// postNoRetry signs the body with the given key and POSTs it to the provided url. +// It is used by c.post to retry unsuccessful attempts. +// The body argument must be JSON-serializable. +// +// If key argument is nil, c.Key is used to sign the request. +// If key argument is nil and c.accountKID returns a non-zero keyID, +// the request is sent in KID form. Otherwise, JWK form is used. +// +// In practice, when interfacing with RFC-compliant CAs most requests are sent in KID form +// and JWK is used only when KID is unavailable: new account endpoint and certificate +// revocation requests authenticated by a cert key. +// See jwsEncodeJSON for other details. +func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) { + kid := noKeyID + if key == nil { + key = c.Key + kid = c.accountKID(ctx) + } + nonce, err := c.popNonce(ctx, url) + if err != nil { + return nil, nil, err + } + b, err := jwsEncodeJSON(body, key, kid, nonce, url) + if err != nil { + return nil, nil, err + } + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, nil, err + } + req.Header.Set("Content-Type", "application/jose+json") + res, err := c.doNoRetry(ctx, req) + if err != nil { + return nil, nil, err + } + c.addNonce(res.Header) + return res, req, nil +} + +// doNoRetry issues a request req, replacing its context (if any) with ctx. +func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", c.userAgent()) + res, err := c.httpClient().Do(req.WithContext(ctx)) + if err != nil { + select { + case <-ctx.Done(): + // Prefer the unadorned context error. + // (The acme package had tests assuming this, previously from ctxhttp's + // behavior, predating net/http supporting contexts natively) + // TODO(bradfitz): reconsider this in the future. But for now this + // requires no test updates. + return nil, ctx.Err() + default: + return nil, err + } + } + return res, nil +} + +func (c *Client) httpClient() *http.Client { + if c.HTTPClient != nil { + return c.HTTPClient + } + return http.DefaultClient +} + +// packageVersion is the version of the module that contains this package, for +// sending as part of the User-Agent header. It's set in version_go112.go. +var packageVersion string + +// userAgent returns the User-Agent header value. It includes the package name, +// the module version (if available), and the c.UserAgent value (if set). +func (c *Client) userAgent() string { + ua := "golang.org/x/crypto/acme" + if packageVersion != "" { + ua += "@" + packageVersion + } + if c.UserAgent != "" { + ua = c.UserAgent + " " + ua + } + return ua +} + +// isBadNonce reports whether err is an ACME "badnonce" error. +func isBadNonce(err error) bool { + // According to the spec badNonce is urn:ietf:params:acme:error:badNonce. + // However, ACME servers in the wild return their versions of the error. + // See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4 + // and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66. + ae, ok := err.(*Error) + return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce") +} + +// isRetriable reports whether a request can be retried +// based on the response status code. +// +// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code. +// Callers should parse the response and check with isBadNonce. +func isRetriable(code int) bool { + return code <= 399 || code >= 500 || code == http.StatusTooManyRequests +} + +// responseError creates an error of Error type from resp. +func responseError(resp *http.Response) error { + // don't care if ReadAll returns an error: + // json.Unmarshal will fail in that case anyway + b, _ := ioutil.ReadAll(resp.Body) + e := &wireError{Status: resp.StatusCode} + if err := json.Unmarshal(b, e); err != nil { + // this is not a regular error response: + // populate detail with anything we received, + // e.Status will already contain HTTP response code value + e.Detail = string(b) + if e.Detail == "" { + e.Detail = resp.Status + } + } + return e.error(resp.Header) +} diff --git a/vendor/golang.org/x/crypto/acme/jws.go b/vendor/golang.org/x/crypto/acme/jws.go new file mode 100644 index 0000000..76e3fda --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/jws.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + _ "crypto/sha512" // need for EC keys + "encoding/asn1" + "encoding/base64" + "encoding/json" + "fmt" + "math/big" +) + +// keyID is the account identity provided by a CA during registration. +type keyID string + +// noKeyID indicates that jwsEncodeJSON should compute and use JWK instead of a KID. +// See jwsEncodeJSON for details. +const noKeyID = keyID("") + +// noPayload indicates jwsEncodeJSON will encode zero-length octet string +// in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make +// authenticated GET requests via POSTing with an empty payload. +// See https://tools.ietf.org/html/rfc8555#section-6.3 for more details. +const noPayload = "" + +// jwsEncodeJSON signs claimset using provided key and a nonce. +// The result is serialized in JSON format containing either kid or jwk +// fields based on the provided keyID value. +// +// If kid is non-empty, its quoted value is inserted in the protected head +// as "kid" field value. Otherwise, JWK is computed using jwkEncode and inserted +// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive. +// +// See https://tools.ietf.org/html/rfc7515#section-7. +func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid keyID, nonce, url string) ([]byte, error) { + alg, sha := jwsHasher(key.Public()) + if alg == "" || !sha.Available() { + return nil, ErrUnsupportedKey + } + var phead string + switch kid { + case noKeyID: + jwk, err := jwkEncode(key.Public()) + if err != nil { + return nil, err + } + phead = fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q,"url":%q}`, alg, jwk, nonce, url) + default: + phead = fmt.Sprintf(`{"alg":%q,"kid":%q,"nonce":%q,"url":%q}`, alg, kid, nonce, url) + } + phead = base64.RawURLEncoding.EncodeToString([]byte(phead)) + var payload string + if claimset != noPayload { + cs, err := json.Marshal(claimset) + if err != nil { + return nil, err + } + payload = base64.RawURLEncoding.EncodeToString(cs) + } + hash := sha.New() + hash.Write([]byte(phead + "." + payload)) + sig, err := jwsSign(key, sha, hash.Sum(nil)) + if err != nil { + return nil, err + } + + enc := struct { + Protected string `json:"protected"` + Payload string `json:"payload"` + Sig string `json:"signature"` + }{ + Protected: phead, + Payload: payload, + Sig: base64.RawURLEncoding.EncodeToString(sig), + } + return json.Marshal(&enc) +} + +// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. +// The result is also suitable for creating a JWK thumbprint. +// https://tools.ietf.org/html/rfc7517 +func jwkEncode(pub crypto.PublicKey) (string, error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.3.1 + n := pub.N + e := big.NewInt(int64(pub.E)) + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, + base64.RawURLEncoding.EncodeToString(e.Bytes()), + base64.RawURLEncoding.EncodeToString(n.Bytes()), + ), nil + case *ecdsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.2.1 + p := pub.Curve.Params() + n := p.BitSize / 8 + if p.BitSize%8 != 0 { + n++ + } + x := pub.X.Bytes() + if n > len(x) { + x = append(make([]byte, n-len(x)), x...) + } + y := pub.Y.Bytes() + if n > len(y) { + y = append(make([]byte, n-len(y)), y...) + } + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, + p.Name, + base64.RawURLEncoding.EncodeToString(x), + base64.RawURLEncoding.EncodeToString(y), + ), nil + } + return "", ErrUnsupportedKey +} + +// jwsSign signs the digest using the given key. +// The hash is unused for ECDSA keys. +func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { + switch pub := key.Public().(type) { + case *rsa.PublicKey: + return key.Sign(rand.Reader, digest, hash) + case *ecdsa.PublicKey: + sigASN1, err := key.Sign(rand.Reader, digest, hash) + if err != nil { + return nil, err + } + + var rs struct{ R, S *big.Int } + if _, err := asn1.Unmarshal(sigASN1, &rs); err != nil { + return nil, err + } + + rb, sb := rs.R.Bytes(), rs.S.Bytes() + size := pub.Params().BitSize / 8 + if size%8 > 0 { + size++ + } + sig := make([]byte, size*2) + copy(sig[size-len(rb):], rb) + copy(sig[size*2-len(sb):], sb) + return sig, nil + } + return nil, ErrUnsupportedKey +} + +// jwsHasher indicates suitable JWS algorithm name and a hash function +// to use for signing a digest with the provided key. +// It returns ("", 0) if the key is not supported. +func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) { + switch pub := pub.(type) { + case *rsa.PublicKey: + return "RS256", crypto.SHA256 + case *ecdsa.PublicKey: + switch pub.Params().Name { + case "P-256": + return "ES256", crypto.SHA256 + case "P-384": + return "ES384", crypto.SHA384 + case "P-521": + return "ES512", crypto.SHA512 + } + } + return "", 0 +} + +// JWKThumbprint creates a JWK thumbprint out of pub +// as specified in https://tools.ietf.org/html/rfc7638. +func JWKThumbprint(pub crypto.PublicKey) (string, error) { + jwk, err := jwkEncode(pub) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(jwk)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} diff --git a/vendor/golang.org/x/crypto/acme/rfc8555.go b/vendor/golang.org/x/crypto/acme/rfc8555.go new file mode 100644 index 0000000..dfb57a6 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/rfc8555.go @@ -0,0 +1,392 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "context" + "crypto" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" +) + +// DeactivateReg permanently disables an existing account associated with c.Key. +// A deactivated account can no longer request certificate issuance or access +// resources related to the account, such as orders or authorizations. +// +// It only works with CAs implementing RFC 8555. +func (c *Client) DeactivateReg(ctx context.Context) error { + url := string(c.accountKID(ctx)) + if url == "" { + return ErrNoAccount + } + req := json.RawMessage(`{"status": "deactivated"}`) + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return err + } + res.Body.Close() + return nil +} + +// registerRFC is quivalent to c.Register but for CAs implementing RFC 8555. +// It expects c.Discover to have already been called. +// TODO: Implement externalAccountBinding. +func (c *Client) registerRFC(ctx context.Context, acct *Account, prompt func(tosURL string) bool) (*Account, error) { + c.cacheMu.Lock() // guard c.kid access + defer c.cacheMu.Unlock() + + req := struct { + TermsAgreed bool `json:"termsOfServiceAgreed,omitempty"` + Contact []string `json:"contact,omitempty"` + }{ + Contact: acct.Contact, + } + if c.dir.Terms != "" { + req.TermsAgreed = prompt(c.dir.Terms) + } + res, err := c.post(ctx, c.Key, c.dir.RegURL, req, wantStatus( + http.StatusOK, // account with this key already registered + http.StatusCreated, // new account created + )) + if err != nil { + return nil, err + } + + defer res.Body.Close() + a, err := responseAccount(res) + if err != nil { + return nil, err + } + // Cache Account URL even if we return an error to the caller. + // It is by all means a valid and usable "kid" value for future requests. + c.kid = keyID(a.URI) + if res.StatusCode == http.StatusOK { + return nil, ErrAccountAlreadyExists + } + return a, nil +} + +// updateGegRFC is equivalent to c.UpdateReg but for CAs implementing RFC 8555. +// It expects c.Discover to have already been called. +func (c *Client) updateRegRFC(ctx context.Context, a *Account) (*Account, error) { + url := string(c.accountKID(ctx)) + if url == "" { + return nil, ErrNoAccount + } + req := struct { + Contact []string `json:"contact,omitempty"` + }{ + Contact: a.Contact, + } + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + return responseAccount(res) +} + +// getGegRFC is equivalent to c.GetReg but for CAs implementing RFC 8555. +// It expects c.Discover to have already been called. +func (c *Client) getRegRFC(ctx context.Context) (*Account, error) { + req := json.RawMessage(`{"onlyReturnExisting": true}`) + res, err := c.post(ctx, c.Key, c.dir.RegURL, req, wantStatus(http.StatusOK)) + if e, ok := err.(*Error); ok && e.ProblemType == "urn:ietf:params:acme:error:accountDoesNotExist" { + return nil, ErrNoAccount + } + if err != nil { + return nil, err + } + + defer res.Body.Close() + return responseAccount(res) +} + +func responseAccount(res *http.Response) (*Account, error) { + var v struct { + Status string + Contact []string + Orders string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid account response: %v", err) + } + return &Account{ + URI: res.Header.Get("Location"), + Status: v.Status, + Contact: v.Contact, + OrdersURL: v.Orders, + }, nil +} + +// AuthorizeOrder initiates the order-based application for certificate issuance, +// as opposed to pre-authorization in Authorize. +// It is only supported by CAs implementing RFC 8555. +// +// The caller then needs to fetch each authorization with GetAuthorization, +// identify those with StatusPending status and fulfill a challenge using Accept. +// Once all authorizations are satisfied, the caller will typically want to poll +// order status using WaitOrder until it's in StatusReady state. +// To finalize the order and obtain a certificate, the caller submits a CSR with CreateOrderCert. +func (c *Client) AuthorizeOrder(ctx context.Context, id []AuthzID, opt ...OrderOption) (*Order, error) { + dir, err := c.Discover(ctx) + if err != nil { + return nil, err + } + + req := struct { + Identifiers []wireAuthzID `json:"identifiers"` + NotBefore string `json:"notBefore,omitempty"` + NotAfter string `json:"notAfter,omitempty"` + }{} + for _, v := range id { + req.Identifiers = append(req.Identifiers, wireAuthzID{ + Type: v.Type, + Value: v.Value, + }) + } + for _, o := range opt { + switch o := o.(type) { + case orderNotBeforeOpt: + req.NotBefore = time.Time(o).Format(time.RFC3339) + case orderNotAfterOpt: + req.NotAfter = time.Time(o).Format(time.RFC3339) + default: + // Package's fault if we let this happen. + panic(fmt.Sprintf("unsupported order option type %T", o)) + } + } + + res, err := c.post(ctx, nil, dir.OrderURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, err + } + defer res.Body.Close() + return responseOrder(res) +} + +// GetOrder retrives an order identified by the given URL. +// For orders created with AuthorizeOrder, the url value is Order.URI. +// +// If a caller needs to poll an order until its status is final, +// see the WaitOrder method. +func (c *Client) GetOrder(ctx context.Context, url string) (*Order, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + return responseOrder(res) +} + +// WaitOrder polls an order from the given URL until it is in one of the final states, +// StatusReady, StatusValid or StatusInvalid, the CA responded with a non-retryable error +// or the context is done. +// +// It returns a non-nil Order only if its Status is StatusReady or StatusValid. +// In all other cases WaitOrder returns an error. +// If the Status is StatusInvalid, the returned error is of type *OrderError. +func (c *Client) WaitOrder(ctx context.Context, url string) (*Order, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + for { + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + o, err := responseOrder(res) + res.Body.Close() + switch { + case err != nil: + // Skip and retry. + case o.Status == StatusInvalid: + return nil, &OrderError{OrderURL: o.URI, Status: o.Status} + case o.Status == StatusReady || o.Status == StatusValid: + return o, nil + } + + d := retryAfter(res.Header.Get("Retry-After")) + if d == 0 { + // Default retry-after. + // Same reasoning as in WaitAuthorization. + d = time.Second + } + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return nil, ctx.Err() + case <-t.C: + // Retry. + } + } +} + +func responseOrder(res *http.Response) (*Order, error) { + var v struct { + Status string + Expires time.Time + Identifiers []wireAuthzID + NotBefore time.Time + NotAfter time.Time + Error *wireError + Authorizations []string + Finalize string + Certificate string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: error reading order: %v", err) + } + o := &Order{ + URI: res.Header.Get("Location"), + Status: v.Status, + Expires: v.Expires, + NotBefore: v.NotBefore, + NotAfter: v.NotAfter, + AuthzURLs: v.Authorizations, + FinalizeURL: v.Finalize, + CertURL: v.Certificate, + } + for _, id := range v.Identifiers { + o.Identifiers = append(o.Identifiers, AuthzID{Type: id.Type, Value: id.Value}) + } + if v.Error != nil { + o.Error = v.Error.error(nil /* headers */) + } + return o, nil +} + +// CreateOrderCert submits the CSR (Certificate Signing Request) to a CA at the specified URL. +// The URL is the FinalizeURL field of an Order created with AuthorizeOrder. +// +// If the bundle argument is true, the returned value also contain the CA (issuer) +// certificate chain. Otherwise, only a leaf certificate is returned. +// The returned URL can be used to re-fetch the certificate using FetchCert. +// +// This method is only supported by CAs implementing RFC 8555. See CreateCert for pre-RFC CAs. +// +// CreateOrderCert returns an error if the CA's response is unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. +func (c *Client) CreateOrderCert(ctx context.Context, url string, csr []byte, bundle bool) (der [][]byte, certURL string, err error) { + if _, err := c.Discover(ctx); err != nil { // required by c.accountKID + return nil, "", err + } + + // RFC describes this as "finalize order" request. + req := struct { + CSR string `json:"csr"` + }{ + CSR: base64.RawURLEncoding.EncodeToString(csr), + } + res, err := c.post(ctx, nil, url, req, wantStatus(http.StatusOK)) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + o, err := responseOrder(res) + if err != nil { + return nil, "", err + } + + // Wait for CA to issue the cert if they haven't. + if o.Status != StatusValid { + o, err = c.WaitOrder(ctx, o.URI) + } + if err != nil { + return nil, "", err + } + // The only acceptable status post finalize and WaitOrder is "valid". + if o.Status != StatusValid { + return nil, "", &OrderError{OrderURL: o.URI, Status: o.Status} + } + crt, err := c.fetchCertRFC(ctx, o.CertURL, bundle) + return crt, o.CertURL, err +} + +// fetchCertRFC downloads issued certificate from the given URL. +// It expects the CA to respond with PEM-encoded certificate chain. +// +// The URL argument is the CertURL field of Order. +func (c *Client) fetchCertRFC(ctx context.Context, url string, bundle bool) ([][]byte, error) { + res, err := c.postAsGet(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + + // Get all the bytes up to a sane maximum. + // Account very roughly for base64 overhead. + const max = maxCertChainSize + maxCertChainSize/33 + b, err := ioutil.ReadAll(io.LimitReader(res.Body, max+1)) + if err != nil { + return nil, fmt.Errorf("acme: fetch cert response stream: %v", err) + } + if len(b) > max { + return nil, errors.New("acme: certificate chain is too big") + } + + // Decode PEM chain. + var chain [][]byte + for { + var p *pem.Block + p, b = pem.Decode(b) + if p == nil { + break + } + if p.Type != "CERTIFICATE" { + return nil, fmt.Errorf("acme: invalid PEM cert type %q", p.Type) + } + + chain = append(chain, p.Bytes) + if !bundle { + return chain, nil + } + if len(chain) > maxChainLen { + return nil, errors.New("acme: certificate chain is too long") + } + } + if len(chain) == 0 { + return nil, errors.New("acme: certificate chain is empty") + } + return chain, nil +} + +// sends a cert revocation request in either JWK form when key is non-nil or KID form otherwise. +func (c *Client) revokeCertRFC(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { + req := &struct { + Cert string `json:"certificate"` + Reason int `json:"reason"` + }{ + Cert: base64.RawURLEncoding.EncodeToString(cert), + Reason: int(reason), + } + res, err := c.post(ctx, key, c.dir.RevokeURL, req, wantStatus(http.StatusOK)) + if err != nil { + if isAlreadyRevoked(err) { + // Assume it is not an error to revoke an already revoked cert. + return nil + } + return err + } + defer res.Body.Close() + return nil +} + +func isAlreadyRevoked(err error) bool { + e, ok := err.(*Error) + return ok && e.ProblemType == "urn:ietf:params:acme:error:alreadyRevoked" +} diff --git a/vendor/golang.org/x/crypto/acme/types.go b/vendor/golang.org/x/crypto/acme/types.go new file mode 100644 index 0000000..e959caf --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/types.go @@ -0,0 +1,560 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +// ACME status values of Account, Order, Authorization and Challenge objects. +// See https://tools.ietf.org/html/rfc8555#section-7.1.6 for details. +const ( + StatusDeactivated = "deactivated" + StatusExpired = "expired" + StatusInvalid = "invalid" + StatusPending = "pending" + StatusProcessing = "processing" + StatusReady = "ready" + StatusRevoked = "revoked" + StatusUnknown = "unknown" + StatusValid = "valid" +) + +// CRLReasonCode identifies the reason for a certificate revocation. +type CRLReasonCode int + +// CRL reason codes as defined in RFC 5280. +const ( + CRLReasonUnspecified CRLReasonCode = 0 + CRLReasonKeyCompromise CRLReasonCode = 1 + CRLReasonCACompromise CRLReasonCode = 2 + CRLReasonAffiliationChanged CRLReasonCode = 3 + CRLReasonSuperseded CRLReasonCode = 4 + CRLReasonCessationOfOperation CRLReasonCode = 5 + CRLReasonCertificateHold CRLReasonCode = 6 + CRLReasonRemoveFromCRL CRLReasonCode = 8 + CRLReasonPrivilegeWithdrawn CRLReasonCode = 9 + CRLReasonAACompromise CRLReasonCode = 10 +) + +var ( + // ErrUnsupportedKey is returned when an unsupported key type is encountered. + ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") + + // ErrAccountAlreadyExists indicates that the Client's key has already been registered + // with the CA. It is returned by Register method. + ErrAccountAlreadyExists = errors.New("acme: account already exists") + + // ErrNoAccount indicates that the Client's key has not been registered with the CA. + ErrNoAccount = errors.New("acme: account does not exist") +) + +// Error is an ACME error, defined in Problem Details for HTTP APIs doc +// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem. +type Error struct { + // StatusCode is The HTTP status code generated by the origin server. + StatusCode int + // ProblemType is a URI reference that identifies the problem type, + // typically in a "urn:acme:error:xxx" form. + ProblemType string + // Detail is a human-readable explanation specific to this occurrence of the problem. + Detail string + // Instance indicates a URL that the client should direct a human user to visit + // in order for instructions on how to agree to the updated Terms of Service. + // In such an event CA sets StatusCode to 403, ProblemType to + // "urn:ietf:params:acme:error:userActionRequired" and a Link header with relation + // "terms-of-service" containing the latest TOS URL. + Instance string + // Header is the original server error response headers. + // It may be nil. + Header http.Header +} + +func (e *Error) Error() string { + return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail) +} + +// AuthorizationError indicates that an authorization for an identifier +// did not succeed. +// It contains all errors from Challenge items of the failed Authorization. +type AuthorizationError struct { + // URI uniquely identifies the failed Authorization. + URI string + + // Identifier is an AuthzID.Value of the failed Authorization. + Identifier string + + // Errors is a collection of non-nil error values of Challenge items + // of the failed Authorization. + Errors []error +} + +func (a *AuthorizationError) Error() string { + e := make([]string, len(a.Errors)) + for i, err := range a.Errors { + e[i] = err.Error() + } + + if a.Identifier != "" { + return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; ")) + } + + return fmt.Sprintf("acme: authorization error: %s", strings.Join(e, "; ")) +} + +// OrderError is returned from Client's order related methods. +// It indicates the order is unusable and the clients should start over with +// AuthorizeOrder. +// +// The clients can still fetch the order object from CA using GetOrder +// to inspect its state. +type OrderError struct { + OrderURL string + Status string +} + +func (oe *OrderError) Error() string { + return fmt.Sprintf("acme: order %s status: %s", oe.OrderURL, oe.Status) +} + +// RateLimit reports whether err represents a rate limit error and +// any Retry-After duration returned by the server. +// +// See the following for more details on rate limiting: +// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6 +func RateLimit(err error) (time.Duration, bool) { + e, ok := err.(*Error) + if !ok { + return 0, false + } + // Some CA implementations may return incorrect values. + // Use case-insensitive comparison. + if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") { + return 0, false + } + if e.Header == nil { + return 0, true + } + return retryAfter(e.Header.Get("Retry-After")), true +} + +// Account is a user account. It is associated with a private key. +// Non-RFC 8555 fields are empty when interfacing with a compliant CA. +type Account struct { + // URI is the account unique ID, which is also a URL used to retrieve + // account data from the CA. + // When interfacing with RFC 8555-compliant CAs, URI is the "kid" field + // value in JWS signed requests. + URI string + + // Contact is a slice of contact info used during registration. + // See https://tools.ietf.org/html/rfc8555#section-7.3 for supported + // formats. + Contact []string + + // Status indicates current account status as returned by the CA. + // Possible values are StatusValid, StatusDeactivated, and StatusRevoked. + Status string + + // OrdersURL is a URL from which a list of orders submitted by this account + // can be fetched. + OrdersURL string + + // The terms user has agreed to. + // A value not matching CurrentTerms indicates that the user hasn't agreed + // to the actual Terms of Service of the CA. + // + // It is non-RFC 8555 compliant. Package users can store the ToS they agree to + // during Client's Register call in the prompt callback function. + AgreedTerms string + + // Actual terms of a CA. + // + // It is non-RFC 8555 compliant. Use Directory's Terms field. + // When a CA updates their terms and requires an account agreement, + // a URL at which instructions to do so is available in Error's Instance field. + CurrentTerms string + + // Authz is the authorization URL used to initiate a new authz flow. + // + // It is non-RFC 8555 compliant. Use Directory's AuthzURL or OrderURL. + Authz string + + // Authorizations is a URI from which a list of authorizations + // granted to this account can be fetched via a GET request. + // + // It is non-RFC 8555 compliant and is obsoleted by OrdersURL. + Authorizations string + + // Certificates is a URI from which a list of certificates + // issued for this account can be fetched via a GET request. + // + // It is non-RFC 8555 compliant and is obsoleted by OrdersURL. + Certificates string +} + +// Directory is ACME server discovery data. +// See https://tools.ietf.org/html/rfc8555#section-7.1.1 for more details. +type Directory struct { + // NonceURL indicates an endpoint where to fetch fresh nonce values from. + NonceURL string + + // RegURL is an account endpoint URL, allowing for creating new accounts. + // Pre-RFC 8555 CAs also allow modifying existing accounts at this URL. + RegURL string + + // OrderURL is used to initiate the certificate issuance flow + // as described in RFC 8555. + OrderURL string + + // AuthzURL is used to initiate identifier pre-authorization flow. + // Empty string indicates the flow is unsupported by the CA. + AuthzURL string + + // CertURL is a new certificate issuance endpoint URL. + // It is non-RFC 8555 compliant and is obsoleted by OrderURL. + CertURL string + + // RevokeURL is used to initiate a certificate revocation flow. + RevokeURL string + + // KeyChangeURL allows to perform account key rollover flow. + KeyChangeURL string + + // Term is a URI identifying the current terms of service. + Terms string + + // Website is an HTTP or HTTPS URL locating a website + // providing more information about the ACME server. + Website string + + // CAA consists of lowercase hostname elements, which the ACME server + // recognises as referring to itself for the purposes of CAA record validation + // as defined in RFC6844. + CAA []string + + // ExternalAccountRequired indicates that the CA requires for all account-related + // requests to include external account binding information. + ExternalAccountRequired bool +} + +// rfcCompliant reports whether the ACME server implements RFC 8555. +// Note that some servers may have incomplete RFC implementation +// even if the returned value is true. +// If rfcCompliant reports false, the server most likely implements draft-02. +func (d *Directory) rfcCompliant() bool { + return d.OrderURL != "" +} + +// Order represents a client's request for a certificate. +// It tracks the request flow progress through to issuance. +type Order struct { + // URI uniquely identifies an order. + URI string + + // Status represents the current status of the order. + // It indicates which action the client should take. + // + // Possible values are StatusPending, StatusReady, StatusProcessing, StatusValid and StatusInvalid. + // Pending means the CA does not believe that the client has fulfilled the requirements. + // Ready indicates that the client has fulfilled all the requirements and can submit a CSR + // to obtain a certificate. This is done with Client's CreateOrderCert. + // Processing means the certificate is being issued. + // Valid indicates the CA has issued the certificate. It can be downloaded + // from the Order's CertURL. This is done with Client's FetchCert. + // Invalid means the certificate will not be issued. Users should consider this order + // abandoned. + Status string + + // Expires is the timestamp after which CA considers this order invalid. + Expires time.Time + + // Identifiers contains all identifier objects which the order pertains to. + Identifiers []AuthzID + + // NotBefore is the requested value of the notBefore field in the certificate. + NotBefore time.Time + + // NotAfter is the requested value of the notAfter field in the certificate. + NotAfter time.Time + + // AuthzURLs represents authorizations to complete before a certificate + // for identifiers specified in the order can be issued. + // It also contains unexpired authorizations that the client has completed + // in the past. + // + // Authorization objects can be fetched using Client's GetAuthorization method. + // + // The required authorizations are dictated by CA policies. + // There may not be a 1:1 relationship between the identifiers and required authorizations. + // Required authorizations can be identified by their StatusPending status. + // + // For orders in the StatusValid or StatusInvalid state these are the authorizations + // which were completed. + AuthzURLs []string + + // FinalizeURL is the endpoint at which a CSR is submitted to obtain a certificate + // once all the authorizations are satisfied. + FinalizeURL string + + // CertURL points to the certificate that has been issued in response to this order. + CertURL string + + // The error that occurred while processing the order as received from a CA, if any. + Error *Error +} + +// OrderOption allows customizing Client.AuthorizeOrder call. +type OrderOption interface { + privateOrderOpt() +} + +// WithOrderNotBefore sets order's NotBefore field. +func WithOrderNotBefore(t time.Time) OrderOption { + return orderNotBeforeOpt(t) +} + +// WithOrderNotAfter sets order's NotAfter field. +func WithOrderNotAfter(t time.Time) OrderOption { + return orderNotAfterOpt(t) +} + +type orderNotBeforeOpt time.Time + +func (orderNotBeforeOpt) privateOrderOpt() {} + +type orderNotAfterOpt time.Time + +func (orderNotAfterOpt) privateOrderOpt() {} + +// Authorization encodes an authorization response. +type Authorization struct { + // URI uniquely identifies a authorization. + URI string + + // Status is the current status of an authorization. + // Possible values are StatusPending, StatusValid, StatusInvalid, StatusDeactivated, + // StatusExpired and StatusRevoked. + Status string + + // Identifier is what the account is authorized to represent. + Identifier AuthzID + + // The timestamp after which the CA considers the authorization invalid. + Expires time.Time + + // Wildcard is true for authorizations of a wildcard domain name. + Wildcard bool + + // Challenges that the client needs to fulfill in order to prove possession + // of the identifier (for pending authorizations). + // For valid authorizations, the challenge that was validated. + // For invalid authorizations, the challenge that was attempted and failed. + // + // RFC 8555 compatible CAs require users to fuflfill only one of the challenges. + Challenges []*Challenge + + // A collection of sets of challenges, each of which would be sufficient + // to prove possession of the identifier. + // Clients must complete a set of challenges that covers at least one set. + // Challenges are identified by their indices in the challenges array. + // If this field is empty, the client needs to complete all challenges. + // + // This field is unused in RFC 8555. + Combinations [][]int +} + +// AuthzID is an identifier that an account is authorized to represent. +type AuthzID struct { + Type string // The type of identifier, "dns" or "ip". + Value string // The identifier itself, e.g. "example.org". +} + +// DomainIDs creates a slice of AuthzID with "dns" identifier type. +func DomainIDs(names ...string) []AuthzID { + a := make([]AuthzID, len(names)) + for i, v := range names { + a[i] = AuthzID{Type: "dns", Value: v} + } + return a +} + +// IPIDs creates a slice of AuthzID with "ip" identifier type. +// Each element of addr is textual form of an address as defined +// in RFC1123 Section 2.1 for IPv4 and in RFC5952 Section 4 for IPv6. +func IPIDs(addr ...string) []AuthzID { + a := make([]AuthzID, len(addr)) + for i, v := range addr { + a[i] = AuthzID{Type: "ip", Value: v} + } + return a +} + +// wireAuthzID is ACME JSON representation of authorization identifier objects. +type wireAuthzID struct { + Type string `json:"type"` + Value string `json:"value"` +} + +// wireAuthz is ACME JSON representation of Authorization objects. +type wireAuthz struct { + Identifier wireAuthzID + Status string + Expires time.Time + Wildcard bool + Challenges []wireChallenge + Combinations [][]int + Error *wireError +} + +func (z *wireAuthz) authorization(uri string) *Authorization { + a := &Authorization{ + URI: uri, + Status: z.Status, + Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value}, + Expires: z.Expires, + Wildcard: z.Wildcard, + Challenges: make([]*Challenge, len(z.Challenges)), + Combinations: z.Combinations, // shallow copy + } + for i, v := range z.Challenges { + a.Challenges[i] = v.challenge() + } + return a +} + +func (z *wireAuthz) error(uri string) *AuthorizationError { + err := &AuthorizationError{ + URI: uri, + Identifier: z.Identifier.Value, + } + + if z.Error != nil { + err.Errors = append(err.Errors, z.Error.error(nil)) + } + + for _, raw := range z.Challenges { + if raw.Error != nil { + err.Errors = append(err.Errors, raw.Error.error(nil)) + } + } + + return err +} + +// Challenge encodes a returned CA challenge. +// Its Error field may be non-nil if the challenge is part of an Authorization +// with StatusInvalid. +type Challenge struct { + // Type is the challenge type, e.g. "http-01", "tls-alpn-01", "dns-01". + Type string + + // URI is where a challenge response can be posted to. + URI string + + // Token is a random value that uniquely identifies the challenge. + Token string + + // Status identifies the status of this challenge. + // In RFC 8555, possible values are StatusPending, StatusProcessing, StatusValid, + // and StatusInvalid. + Status string + + // Validated is the time at which the CA validated this challenge. + // Always zero value in pre-RFC 8555. + Validated time.Time + + // Error indicates the reason for an authorization failure + // when this challenge was used. + // The type of a non-nil value is *Error. + Error error +} + +// wireChallenge is ACME JSON challenge representation. +type wireChallenge struct { + URL string `json:"url"` // RFC + URI string `json:"uri"` // pre-RFC + Type string + Token string + Status string + Validated time.Time + Error *wireError +} + +func (c *wireChallenge) challenge() *Challenge { + v := &Challenge{ + URI: c.URL, + Type: c.Type, + Token: c.Token, + Status: c.Status, + } + if v.URI == "" { + v.URI = c.URI // c.URL was empty; use legacy + } + if v.Status == "" { + v.Status = StatusPending + } + if c.Error != nil { + v.Error = c.Error.error(nil) + } + return v +} + +// wireError is a subset of fields of the Problem Details object +// as described in https://tools.ietf.org/html/rfc7807#section-3.1. +type wireError struct { + Status int + Type string + Detail string + Instance string +} + +func (e *wireError) error(h http.Header) *Error { + return &Error{ + StatusCode: e.Status, + ProblemType: e.Type, + Detail: e.Detail, + Instance: e.Instance, + Header: h, + } +} + +// CertOption is an optional argument type for the TLS ChallengeCert methods for +// customizing a temporary certificate for TLS-based challenges. +type CertOption interface { + privateCertOpt() +} + +// WithKey creates an option holding a private/public key pair. +// The private part signs a certificate, and the public part represents the signee. +func WithKey(key crypto.Signer) CertOption { + return &certOptKey{key} +} + +type certOptKey struct { + key crypto.Signer +} + +func (*certOptKey) privateCertOpt() {} + +// WithTemplate creates an option for specifying a certificate template. +// See x509.CreateCertificate for template usage details. +// +// In TLS ChallengeCert methods, the template is also used as parent, +// resulting in a self-signed certificate. +// The DNSNames field of t is always overwritten for tls-sni challenge certs. +func WithTemplate(t *x509.Certificate) CertOption { + return (*certOptTemplate)(t) +} + +type certOptTemplate x509.Certificate + +func (*certOptTemplate) privateCertOpt() {} diff --git a/vendor/golang.org/x/crypto/acme/version_go112.go b/vendor/golang.org/x/crypto/acme/version_go112.go new file mode 100644 index 0000000..b58f245 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/version_go112.go @@ -0,0 +1,27 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.12 + +package acme + +import "runtime/debug" + +func init() { + // Set packageVersion if the binary was built in modules mode and x/crypto + // was not replaced with a different module. + info, ok := debug.ReadBuildInfo() + if !ok { + return + } + for _, m := range info.Deps { + if m.Path != "golang.org/x/crypto" { + continue + } + if m.Replace == nil { + packageVersion = m.Version + } + break + } +} diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 0000000..fc31160 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 0000000..aeb73f8 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,295 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "golang.org/x/crypto/blowfish" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n++ + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n++ + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 0000000..9d80f19 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 0000000..213bf20 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,99 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +// +// Blowfish is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See https://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 0000000..d040775 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// https://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 0000000..cd0a8ac --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 0000000..2a93886 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,783 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x1a06 + AcceptCharset Atom = 0x1a0e + Accesskey Atom = 0x2c09 + Acronym Atom = 0xaa07 + Action Atom = 0x27206 + Address Atom = 0x6f307 + Align Atom = 0xb105 + Allowfullscreen Atom = 0x2080f + Allowpaymentrequest Atom = 0xc113 + Allowusermedia Atom = 0xdd0e + Alt Atom = 0xf303 + Annotation Atom = 0x1c90a + AnnotationXml Atom = 0x1c90e + Applet Atom = 0x31906 + Area Atom = 0x35604 + Article Atom = 0x3fc07 + As Atom = 0x3c02 + Aside Atom = 0x10705 + Async Atom = 0xff05 + Audio Atom = 0x11505 + Autocomplete Atom = 0x2780c + Autofocus Atom = 0x12109 + Autoplay Atom = 0x13c08 + B Atom = 0x101 + Base Atom = 0x3b04 + Basefont Atom = 0x3b08 + Bdi Atom = 0xba03 + Bdo Atom = 0x14b03 + Bgsound Atom = 0x15e07 + Big Atom = 0x17003 + Blink Atom = 0x17305 + Blockquote Atom = 0x1870a + Body Atom = 0x2804 + Br Atom = 0x202 + Button Atom = 0x19106 + Canvas Atom = 0x10306 + Caption Atom = 0x23107 + Center Atom = 0x22006 + Challenge Atom = 0x29b09 + Charset Atom = 0x2107 + Checked Atom = 0x47907 + Cite Atom = 0x19c04 + Class Atom = 0x56405 + Code Atom = 0x5c504 + Col Atom = 0x1ab03 + Colgroup Atom = 0x1ab08 + Color Atom = 0x1bf05 + Cols Atom = 0x1c404 + Colspan Atom = 0x1c407 + Command Atom = 0x1d707 + Content Atom = 0x58b07 + Contenteditable Atom = 0x58b0f + Contextmenu Atom = 0x3800b + Controls Atom = 0x1de08 + Coords Atom = 0x1ea06 + Crossorigin Atom = 0x1fb0b + Data Atom = 0x4a504 + Datalist Atom = 0x4a508 + Datetime Atom = 0x2b808 + Dd Atom = 0x2d702 + Default Atom = 0x10a07 + Defer Atom = 0x5c705 + Del Atom = 0x45203 + Desc Atom = 0x56104 + Details Atom = 0x7207 + Dfn Atom = 0x8703 + Dialog Atom = 0xbb06 + Dir Atom = 0x9303 + Dirname Atom = 0x9307 + Disabled Atom = 0x16408 + Div Atom = 0x16b03 + Dl Atom = 0x5e602 + Download Atom = 0x46308 + Draggable Atom = 0x17a09 + Dropzone Atom = 0x40508 + Dt Atom = 0x64b02 + Em Atom = 0x6e02 + Embed Atom = 0x6e05 + Enctype Atom = 0x28d07 + Face Atom = 0x21e04 + Fieldset Atom = 0x22608 + Figcaption Atom = 0x22e0a + Figure Atom = 0x24806 + Font Atom = 0x3f04 + Footer Atom = 0xf606 + For Atom = 0x25403 + ForeignObject Atom = 0x2540d + Foreignobject Atom = 0x2610d + Form Atom = 0x26e04 + Formaction Atom = 0x26e0a + Formenctype Atom = 0x2890b + Formmethod Atom = 0x2a40a + Formnovalidate Atom = 0x2ae0e + Formtarget Atom = 0x2c00a + Frame Atom = 0x8b05 + Frameset Atom = 0x8b08 + H1 Atom = 0x15c02 + H2 Atom = 0x2de02 + H3 Atom = 0x30d02 + H4 Atom = 0x34502 + H5 Atom = 0x34f02 + H6 Atom = 0x64d02 + Head Atom = 0x33104 + Header Atom = 0x33106 + Headers Atom = 0x33107 + Height Atom = 0x5206 + Hgroup Atom = 0x2ca06 + Hidden Atom = 0x2d506 + High Atom = 0x2db04 + Hr Atom = 0x15702 + Href Atom = 0x2e004 + Hreflang Atom = 0x2e008 + Html Atom = 0x5604 + HttpEquiv Atom = 0x2e80a + I Atom = 0x601 + Icon Atom = 0x58a04 + Id Atom = 0x10902 + Iframe Atom = 0x2fc06 + Image Atom = 0x30205 + Img Atom = 0x30703 + Input Atom = 0x44b05 + Inputmode Atom = 0x44b09 + Ins Atom = 0x20403 + Integrity Atom = 0x23f09 + Is Atom = 0x16502 + Isindex Atom = 0x30f07 + Ismap Atom = 0x31605 + Itemid Atom = 0x38b06 + Itemprop Atom = 0x19d08 + Itemref Atom = 0x3cd07 + Itemscope Atom = 0x67109 + Itemtype Atom = 0x31f08 + Kbd Atom = 0xb903 + Keygen Atom = 0x3206 + Keytype Atom = 0xd607 + Kind Atom = 0x17704 + Label Atom = 0x5905 + Lang Atom = 0x2e404 + Legend Atom = 0x18106 + Li Atom = 0xb202 + Link Atom = 0x17404 + List Atom = 0x4a904 + Listing Atom = 0x4a907 + Loop Atom = 0x5d04 + Low Atom = 0xc303 + Main Atom = 0x1004 + Malignmark Atom = 0xb00a + Manifest Atom = 0x6d708 + Map Atom = 0x31803 + Mark Atom = 0xb604 + Marquee Atom = 0x32707 + Math Atom = 0x32e04 + Max Atom = 0x33d03 + Maxlength Atom = 0x33d09 + Media Atom = 0xe605 + Mediagroup Atom = 0xe60a + Menu Atom = 0x38704 + Menuitem Atom = 0x38708 + Meta Atom = 0x4b804 + Meter Atom = 0x9805 + Method Atom = 0x2a806 + Mglyph Atom = 0x30806 + Mi Atom = 0x34702 + Min Atom = 0x34703 + Minlength Atom = 0x34709 + Mn Atom = 0x2b102 + Mo Atom = 0xa402 + Ms Atom = 0x67402 + Mtext Atom = 0x35105 + Multiple Atom = 0x35f08 + Muted Atom = 0x36705 + Name Atom = 0x9604 + Nav Atom = 0x1303 + Nobr Atom = 0x3704 + Noembed Atom = 0x6c07 + Noframes Atom = 0x8908 + Nomodule Atom = 0xa208 + Nonce Atom = 0x1a605 + Noscript Atom = 0x21608 + Novalidate Atom = 0x2b20a + Object Atom = 0x26806 + Ol Atom = 0x13702 + Onabort Atom = 0x19507 + Onafterprint Atom = 0x2360c + Onautocomplete Atom = 0x2760e + Onautocompleteerror Atom = 0x27613 + Onauxclick Atom = 0x61f0a + Onbeforeprint Atom = 0x69e0d + Onbeforeunload Atom = 0x6e70e + Onblur Atom = 0x56d06 + Oncancel Atom = 0x11908 + Oncanplay Atom = 0x14d09 + Oncanplaythrough Atom = 0x14d10 + Onchange Atom = 0x41b08 + Onclick Atom = 0x2f507 + Onclose Atom = 0x36c07 + Oncontextmenu Atom = 0x37e0d + Oncopy Atom = 0x39106 + Oncuechange Atom = 0x3970b + Oncut Atom = 0x3a205 + Ondblclick Atom = 0x3a70a + Ondrag Atom = 0x3b106 + Ondragend Atom = 0x3b109 + Ondragenter Atom = 0x3ba0b + Ondragexit Atom = 0x3c50a + Ondragleave Atom = 0x3df0b + Ondragover Atom = 0x3ea0a + Ondragstart Atom = 0x3f40b + Ondrop Atom = 0x40306 + Ondurationchange Atom = 0x41310 + Onemptied Atom = 0x40a09 + Onended Atom = 0x42307 + Onerror Atom = 0x42a07 + Onfocus Atom = 0x43107 + Onhashchange Atom = 0x43d0c + Oninput Atom = 0x44907 + Oninvalid Atom = 0x45509 + Onkeydown Atom = 0x45e09 + Onkeypress Atom = 0x46b0a + Onkeyup Atom = 0x48007 + Onlanguagechange Atom = 0x48d10 + Onload Atom = 0x49d06 + Onloadeddata Atom = 0x49d0c + Onloadedmetadata Atom = 0x4b010 + Onloadend Atom = 0x4c609 + Onloadstart Atom = 0x4cf0b + Onmessage Atom = 0x4da09 + Onmessageerror Atom = 0x4da0e + Onmousedown Atom = 0x4e80b + Onmouseenter Atom = 0x4f30c + Onmouseleave Atom = 0x4ff0c + Onmousemove Atom = 0x50b0b + Onmouseout Atom = 0x5160a + Onmouseover Atom = 0x5230b + Onmouseup Atom = 0x52e09 + Onmousewheel Atom = 0x53c0c + Onoffline Atom = 0x54809 + Ononline Atom = 0x55108 + Onpagehide Atom = 0x5590a + Onpageshow Atom = 0x5730a + Onpaste Atom = 0x57f07 + Onpause Atom = 0x59a07 + Onplay Atom = 0x5a406 + Onplaying Atom = 0x5a409 + Onpopstate Atom = 0x5ad0a + Onprogress Atom = 0x5b70a + Onratechange Atom = 0x5cc0c + Onrejectionhandled Atom = 0x5d812 + Onreset Atom = 0x5ea07 + Onresize Atom = 0x5f108 + Onscroll Atom = 0x60008 + Onsecuritypolicyviolation Atom = 0x60819 + Onseeked Atom = 0x62908 + Onseeking Atom = 0x63109 + Onselect Atom = 0x63a08 + Onshow Atom = 0x64406 + Onsort Atom = 0x64f06 + Onstalled Atom = 0x65909 + Onstorage Atom = 0x66209 + Onsubmit Atom = 0x66b08 + Onsuspend Atom = 0x67b09 + Ontimeupdate Atom = 0x400c + Ontoggle Atom = 0x68408 + Onunhandledrejection Atom = 0x68c14 + Onunload Atom = 0x6ab08 + Onvolumechange Atom = 0x6b30e + Onwaiting Atom = 0x6c109 + Onwheel Atom = 0x6ca07 + Open Atom = 0x1a304 + Optgroup Atom = 0x5f08 + Optimum Atom = 0x6d107 + Option Atom = 0x6e306 + Output Atom = 0x51d06 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x6607 + Picture Atom = 0x7b07 + Ping Atom = 0xef04 + Placeholder Atom = 0x1310b + Plaintext Atom = 0x1b209 + Playsinline Atom = 0x1400b + Poster Atom = 0x2cf06 + Pre Atom = 0x47003 + Preload Atom = 0x48607 + Progress Atom = 0x5b908 + Prompt Atom = 0x53606 + Public Atom = 0x58606 + Q Atom = 0xcf01 + Radiogroup Atom = 0x30a + Rb Atom = 0x3a02 + Readonly Atom = 0x35708 + Referrerpolicy Atom = 0x3d10e + Rel Atom = 0x48703 + Required Atom = 0x24c08 + Reversed Atom = 0x8008 + Rows Atom = 0x9c04 + Rowspan Atom = 0x9c07 + Rp Atom = 0x23c02 + Rt Atom = 0x19a02 + Rtc Atom = 0x19a03 + Ruby Atom = 0xfb04 + S Atom = 0x2501 + Samp Atom = 0x7804 + Sandbox Atom = 0x12907 + Scope Atom = 0x67505 + Scoped Atom = 0x67506 + Script Atom = 0x21806 + Seamless Atom = 0x37108 + Section Atom = 0x56807 + Select Atom = 0x63c06 + Selected Atom = 0x63c08 + Shape Atom = 0x1e505 + Size Atom = 0x5f504 + Sizes Atom = 0x5f505 + Slot Atom = 0x1ef04 + Small Atom = 0x20605 + Sortable Atom = 0x65108 + Sorted Atom = 0x33706 + Source Atom = 0x37806 + Spacer Atom = 0x43706 + Span Atom = 0x9f04 + Spellcheck Atom = 0x4740a + Src Atom = 0x5c003 + Srcdoc Atom = 0x5c006 + Srclang Atom = 0x5f907 + Srcset Atom = 0x6f906 + Start Atom = 0x3fa05 + Step Atom = 0x58304 + Strike Atom = 0xd206 + Strong Atom = 0x6dd06 + Style Atom = 0x6ff05 + Sub Atom = 0x66d03 + Summary Atom = 0x70407 + Sup Atom = 0x70b03 + Svg Atom = 0x70e03 + System Atom = 0x71106 + Tabindex Atom = 0x4be08 + Table Atom = 0x59505 + Target Atom = 0x2c406 + Tbody Atom = 0x2705 + Td Atom = 0x9202 + Template Atom = 0x71408 + Textarea Atom = 0x35208 + Tfoot Atom = 0xf505 + Th Atom = 0x15602 + Thead Atom = 0x33005 + Time Atom = 0x4204 + Title Atom = 0x11005 + Tr Atom = 0xcc02 + Track Atom = 0x1ba05 + Translate Atom = 0x1f209 + Tt Atom = 0x6802 + Type Atom = 0xd904 + Typemustmatch Atom = 0x2900d + U Atom = 0xb01 + Ul Atom = 0xa702 + Updateviacache Atom = 0x460e + Usemap Atom = 0x59e06 + Value Atom = 0x1505 + Var Atom = 0x16d03 + Video Atom = 0x2f105 + Wbr Atom = 0x57c03 + Width Atom = 0x64905 + Workertype Atom = 0x71c0a + Wrap Atom = 0x72604 + Xmp Atom = 0x12f03 +) + +const hash0 = 0x81cdf10e + +const maxAtomLen = 25 + +var table = [1 << 9]Atom{ + 0x1: 0xe60a, // mediagroup + 0x2: 0x2e404, // lang + 0x4: 0x2c09, // accesskey + 0x5: 0x8b08, // frameset + 0x7: 0x63a08, // onselect + 0x8: 0x71106, // system + 0xa: 0x64905, // width + 0xc: 0x2890b, // formenctype + 0xd: 0x13702, // ol + 0xe: 0x3970b, // oncuechange + 0x10: 0x14b03, // bdo + 0x11: 0x11505, // audio + 0x12: 0x17a09, // draggable + 0x14: 0x2f105, // video + 0x15: 0x2b102, // mn + 0x16: 0x38704, // menu + 0x17: 0x2cf06, // poster + 0x19: 0xf606, // footer + 0x1a: 0x2a806, // method + 0x1b: 0x2b808, // datetime + 0x1c: 0x19507, // onabort + 0x1d: 0x460e, // updateviacache + 0x1e: 0xff05, // async + 0x1f: 0x49d06, // onload + 0x21: 0x11908, // oncancel + 0x22: 0x62908, // onseeked + 0x23: 0x30205, // image + 0x24: 0x5d812, // onrejectionhandled + 0x26: 0x17404, // link + 0x27: 0x51d06, // output + 0x28: 0x33104, // head + 0x29: 0x4ff0c, // onmouseleave + 0x2a: 0x57f07, // onpaste + 0x2b: 0x5a409, // onplaying + 0x2c: 0x1c407, // colspan + 0x2f: 0x1bf05, // color + 0x30: 0x5f504, // size + 0x31: 0x2e80a, // http-equiv + 0x33: 0x601, // i + 0x34: 0x5590a, // onpagehide + 0x35: 0x68c14, // onunhandledrejection + 0x37: 0x42a07, // onerror + 0x3a: 0x3b08, // basefont + 0x3f: 0x1303, // nav + 0x40: 0x17704, // kind + 0x41: 0x35708, // readonly + 0x42: 0x30806, // mglyph + 0x44: 0xb202, // li + 0x46: 0x2d506, // hidden + 0x47: 0x70e03, // svg + 0x48: 0x58304, // step + 0x49: 0x23f09, // integrity + 0x4a: 0x58606, // public + 0x4c: 0x1ab03, // col + 0x4d: 0x1870a, // blockquote + 0x4e: 0x34f02, // h5 + 0x50: 0x5b908, // progress + 0x51: 0x5f505, // sizes + 0x52: 0x34502, // h4 + 0x56: 0x33005, // thead + 0x57: 0xd607, // keytype + 0x58: 0x5b70a, // onprogress + 0x59: 0x44b09, // inputmode + 0x5a: 0x3b109, // ondragend + 0x5d: 0x3a205, // oncut + 0x5e: 0x43706, // spacer + 0x5f: 0x1ab08, // colgroup + 0x62: 0x16502, // is + 0x65: 0x3c02, // as + 0x66: 0x54809, // onoffline + 0x67: 0x33706, // sorted + 0x69: 0x48d10, // onlanguagechange + 0x6c: 0x43d0c, // onhashchange + 0x6d: 0x9604, // name + 0x6e: 0xf505, // tfoot + 0x6f: 0x56104, // desc + 0x70: 0x33d03, // max + 0x72: 0x1ea06, // coords + 0x73: 0x30d02, // h3 + 0x74: 0x6e70e, // onbeforeunload + 0x75: 0x9c04, // rows + 0x76: 0x63c06, // select + 0x77: 0x9805, // meter + 0x78: 0x38b06, // itemid + 0x79: 0x53c0c, // onmousewheel + 0x7a: 0x5c006, // srcdoc + 0x7d: 0x1ba05, // track + 0x7f: 0x31f08, // itemtype + 0x82: 0xa402, // mo + 0x83: 0x41b08, // onchange + 0x84: 0x33107, // headers + 0x85: 0x5cc0c, // onratechange + 0x86: 0x60819, // onsecuritypolicyviolation + 0x88: 0x4a508, // datalist + 0x89: 0x4e80b, // onmousedown + 0x8a: 0x1ef04, // slot + 0x8b: 0x4b010, // onloadedmetadata + 0x8c: 0x1a06, // accept + 0x8d: 0x26806, // object + 0x91: 0x6b30e, // onvolumechange + 0x92: 0x2107, // charset + 0x93: 0x27613, // onautocompleteerror + 0x94: 0xc113, // allowpaymentrequest + 0x95: 0x2804, // body + 0x96: 0x10a07, // default + 0x97: 0x63c08, // selected + 0x98: 0x21e04, // face + 0x99: 0x1e505, // shape + 0x9b: 0x68408, // ontoggle + 0x9e: 0x64b02, // dt + 0x9f: 0xb604, // mark + 0xa1: 0xb01, // u + 0xa4: 0x6ab08, // onunload + 0xa5: 0x5d04, // loop + 0xa6: 0x16408, // disabled + 0xaa: 0x42307, // onended + 0xab: 0xb00a, // malignmark + 0xad: 0x67b09, // onsuspend + 0xae: 0x35105, // mtext + 0xaf: 0x64f06, // onsort + 0xb0: 0x19d08, // itemprop + 0xb3: 0x67109, // itemscope + 0xb4: 0x17305, // blink + 0xb6: 0x3b106, // ondrag + 0xb7: 0xa702, // ul + 0xb8: 0x26e04, // form + 0xb9: 0x12907, // sandbox + 0xba: 0x8b05, // frame + 0xbb: 0x1505, // value + 0xbc: 0x66209, // onstorage + 0xbf: 0xaa07, // acronym + 0xc0: 0x19a02, // rt + 0xc2: 0x202, // br + 0xc3: 0x22608, // fieldset + 0xc4: 0x2900d, // typemustmatch + 0xc5: 0xa208, // nomodule + 0xc6: 0x6c07, // noembed + 0xc7: 0x69e0d, // onbeforeprint + 0xc8: 0x19106, // button + 0xc9: 0x2f507, // onclick + 0xca: 0x70407, // summary + 0xcd: 0xfb04, // ruby + 0xce: 0x56405, // class + 0xcf: 0x3f40b, // ondragstart + 0xd0: 0x23107, // caption + 0xd4: 0xdd0e, // allowusermedia + 0xd5: 0x4cf0b, // onloadstart + 0xd9: 0x16b03, // div + 0xda: 0x4a904, // list + 0xdb: 0x32e04, // math + 0xdc: 0x44b05, // input + 0xdf: 0x3ea0a, // ondragover + 0xe0: 0x2de02, // h2 + 0xe2: 0x1b209, // plaintext + 0xe4: 0x4f30c, // onmouseenter + 0xe7: 0x47907, // checked + 0xe8: 0x47003, // pre + 0xea: 0x35f08, // multiple + 0xeb: 0xba03, // bdi + 0xec: 0x33d09, // maxlength + 0xed: 0xcf01, // q + 0xee: 0x61f0a, // onauxclick + 0xf0: 0x57c03, // wbr + 0xf2: 0x3b04, // base + 0xf3: 0x6e306, // option + 0xf5: 0x41310, // ondurationchange + 0xf7: 0x8908, // noframes + 0xf9: 0x40508, // dropzone + 0xfb: 0x67505, // scope + 0xfc: 0x8008, // reversed + 0xfd: 0x3ba0b, // ondragenter + 0xfe: 0x3fa05, // start + 0xff: 0x12f03, // xmp + 0x100: 0x5f907, // srclang + 0x101: 0x30703, // img + 0x104: 0x101, // b + 0x105: 0x25403, // for + 0x106: 0x10705, // aside + 0x107: 0x44907, // oninput + 0x108: 0x35604, // area + 0x109: 0x2a40a, // formmethod + 0x10a: 0x72604, // wrap + 0x10c: 0x23c02, // rp + 0x10d: 0x46b0a, // onkeypress + 0x10e: 0x6802, // tt + 0x110: 0x34702, // mi + 0x111: 0x36705, // muted + 0x112: 0xf303, // alt + 0x113: 0x5c504, // code + 0x114: 0x6e02, // em + 0x115: 0x3c50a, // ondragexit + 0x117: 0x9f04, // span + 0x119: 0x6d708, // manifest + 0x11a: 0x38708, // menuitem + 0x11b: 0x58b07, // content + 0x11d: 0x6c109, // onwaiting + 0x11f: 0x4c609, // onloadend + 0x121: 0x37e0d, // oncontextmenu + 0x123: 0x56d06, // onblur + 0x124: 0x3fc07, // article + 0x125: 0x9303, // dir + 0x126: 0xef04, // ping + 0x127: 0x24c08, // required + 0x128: 0x45509, // oninvalid + 0x129: 0xb105, // align + 0x12b: 0x58a04, // icon + 0x12c: 0x64d02, // h6 + 0x12d: 0x1c404, // cols + 0x12e: 0x22e0a, // figcaption + 0x12f: 0x45e09, // onkeydown + 0x130: 0x66b08, // onsubmit + 0x131: 0x14d09, // oncanplay + 0x132: 0x70b03, // sup + 0x133: 0xc01, // p + 0x135: 0x40a09, // onemptied + 0x136: 0x39106, // oncopy + 0x137: 0x19c04, // cite + 0x138: 0x3a70a, // ondblclick + 0x13a: 0x50b0b, // onmousemove + 0x13c: 0x66d03, // sub + 0x13d: 0x48703, // rel + 0x13e: 0x5f08, // optgroup + 0x142: 0x9c07, // rowspan + 0x143: 0x37806, // source + 0x144: 0x21608, // noscript + 0x145: 0x1a304, // open + 0x146: 0x20403, // ins + 0x147: 0x2540d, // foreignObject + 0x148: 0x5ad0a, // onpopstate + 0x14a: 0x28d07, // enctype + 0x14b: 0x2760e, // onautocomplete + 0x14c: 0x35208, // textarea + 0x14e: 0x2780c, // autocomplete + 0x14f: 0x15702, // hr + 0x150: 0x1de08, // controls + 0x151: 0x10902, // id + 0x153: 0x2360c, // onafterprint + 0x155: 0x2610d, // foreignobject + 0x156: 0x32707, // marquee + 0x157: 0x59a07, // onpause + 0x158: 0x5e602, // dl + 0x159: 0x5206, // height + 0x15a: 0x34703, // min + 0x15b: 0x9307, // dirname + 0x15c: 0x1f209, // translate + 0x15d: 0x5604, // html + 0x15e: 0x34709, // minlength + 0x15f: 0x48607, // preload + 0x160: 0x71408, // template + 0x161: 0x3df0b, // ondragleave + 0x162: 0x3a02, // rb + 0x164: 0x5c003, // src + 0x165: 0x6dd06, // strong + 0x167: 0x7804, // samp + 0x168: 0x6f307, // address + 0x169: 0x55108, // ononline + 0x16b: 0x1310b, // placeholder + 0x16c: 0x2c406, // target + 0x16d: 0x20605, // small + 0x16e: 0x6ca07, // onwheel + 0x16f: 0x1c90a, // annotation + 0x170: 0x4740a, // spellcheck + 0x171: 0x7207, // details + 0x172: 0x10306, // canvas + 0x173: 0x12109, // autofocus + 0x174: 0xc05, // param + 0x176: 0x46308, // download + 0x177: 0x45203, // del + 0x178: 0x36c07, // onclose + 0x179: 0xb903, // kbd + 0x17a: 0x31906, // applet + 0x17b: 0x2e004, // href + 0x17c: 0x5f108, // onresize + 0x17e: 0x49d0c, // onloadeddata + 0x180: 0xcc02, // tr + 0x181: 0x2c00a, // formtarget + 0x182: 0x11005, // title + 0x183: 0x6ff05, // style + 0x184: 0xd206, // strike + 0x185: 0x59e06, // usemap + 0x186: 0x2fc06, // iframe + 0x187: 0x1004, // main + 0x189: 0x7b07, // picture + 0x18c: 0x31605, // ismap + 0x18e: 0x4a504, // data + 0x18f: 0x5905, // label + 0x191: 0x3d10e, // referrerpolicy + 0x192: 0x15602, // th + 0x194: 0x53606, // prompt + 0x195: 0x56807, // section + 0x197: 0x6d107, // optimum + 0x198: 0x2db04, // high + 0x199: 0x15c02, // h1 + 0x19a: 0x65909, // onstalled + 0x19b: 0x16d03, // var + 0x19c: 0x4204, // time + 0x19e: 0x67402, // ms + 0x19f: 0x33106, // header + 0x1a0: 0x4da09, // onmessage + 0x1a1: 0x1a605, // nonce + 0x1a2: 0x26e0a, // formaction + 0x1a3: 0x22006, // center + 0x1a4: 0x3704, // nobr + 0x1a5: 0x59505, // table + 0x1a6: 0x4a907, // listing + 0x1a7: 0x18106, // legend + 0x1a9: 0x29b09, // challenge + 0x1aa: 0x24806, // figure + 0x1ab: 0xe605, // media + 0x1ae: 0xd904, // type + 0x1af: 0x3f04, // font + 0x1b0: 0x4da0e, // onmessageerror + 0x1b1: 0x37108, // seamless + 0x1b2: 0x8703, // dfn + 0x1b3: 0x5c705, // defer + 0x1b4: 0xc303, // low + 0x1b5: 0x19a03, // rtc + 0x1b6: 0x5230b, // onmouseover + 0x1b7: 0x2b20a, // novalidate + 0x1b8: 0x71c0a, // workertype + 0x1ba: 0x3cd07, // itemref + 0x1bd: 0x1, // a + 0x1be: 0x31803, // map + 0x1bf: 0x400c, // ontimeupdate + 0x1c0: 0x15e07, // bgsound + 0x1c1: 0x3206, // keygen + 0x1c2: 0x2705, // tbody + 0x1c5: 0x64406, // onshow + 0x1c7: 0x2501, // s + 0x1c8: 0x6607, // pattern + 0x1cc: 0x14d10, // oncanplaythrough + 0x1ce: 0x2d702, // dd + 0x1cf: 0x6f906, // srcset + 0x1d0: 0x17003, // big + 0x1d2: 0x65108, // sortable + 0x1d3: 0x48007, // onkeyup + 0x1d5: 0x5a406, // onplay + 0x1d7: 0x4b804, // meta + 0x1d8: 0x40306, // ondrop + 0x1da: 0x60008, // onscroll + 0x1db: 0x1fb0b, // crossorigin + 0x1dc: 0x5730a, // onpageshow + 0x1dd: 0x4, // abbr + 0x1de: 0x9202, // td + 0x1df: 0x58b0f, // contenteditable + 0x1e0: 0x27206, // action + 0x1e1: 0x1400b, // playsinline + 0x1e2: 0x43107, // onfocus + 0x1e3: 0x2e008, // hreflang + 0x1e5: 0x5160a, // onmouseout + 0x1e6: 0x5ea07, // onreset + 0x1e7: 0x13c08, // autoplay + 0x1e8: 0x63109, // onseeking + 0x1ea: 0x67506, // scoped + 0x1ec: 0x30a, // radiogroup + 0x1ee: 0x3800b, // contextmenu + 0x1ef: 0x52e09, // onmouseup + 0x1f1: 0x2ca06, // hgroup + 0x1f2: 0x2080f, // allowfullscreen + 0x1f3: 0x4be08, // tabindex + 0x1f6: 0x30f07, // isindex + 0x1f7: 0x1a0e, // accept-charset + 0x1f8: 0x2ae0e, // formnovalidate + 0x1fb: 0x1c90e, // annotation-xml + 0x1fc: 0x6e05, // embed + 0x1fd: 0x21806, // script + 0x1fe: 0xbb06, // dialog + 0x1ff: 0x1d707, // command +} + +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + + "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + + "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + + "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + + "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + + "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + + "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + + "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + + "ignObjectforeignobjectformactionautocompleteerrorformenctype" + + "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + + "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + + "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + + "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + + "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + + "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + + "articleondropzonemptiedondurationchangeonendedonerroronfocus" + + "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + + "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + + "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + + "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + + "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + + "classectionbluronpageshowbronpastepublicontenteditableonpaus" + + "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + + "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + + "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + + "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + + "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + + "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + + "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 0000000..ff7acf2 --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,111 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.4.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. + "li": true, + "link": true, + "listing": true, + "main": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "math": + switch element.Data { + case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": + return true + } + case "svg": + switch element.Data { + case "foreignObject", "desc", "title": + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 0000000..822ed42 --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case html.ErrorToken: + return z.Err() + case html.TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case html.StartTagToken, html.EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == html.StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 0000000..c484e5a --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 0000000..b628880 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 0000000..d856139 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,258 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a': + esc = ">" + case '"': + // """ is shorter than """. + esc = """ + case '\r': + esc = " " + default: + panic("unrecognized escape character") + } + s = s[i+1:] + if _, err := w.WriteString(esc); err != nil { + return err + } + i = strings.IndexAny(s, escapedChars) + } + _, err := w.WriteString(s) + return err +} + +// EscapeString escapes special characters like "<" to become "<". It +// escapes only five such characters: <, >, &, ' and ". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func EscapeString(s string) string { + if strings.IndexAny(s, escapedChars) == -1 { + return s + } + var buf bytes.Buffer + escape(&buf, s) + return buf.String() +} + +// UnescapeString unescapes entities like "<" to become "<". It unescapes a +// larger range of entities than EscapeString escapes. For example, "á" +// unescapes to "á", as does "á" and "&xE1;". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func UnescapeString(s string) string { + for _, c := range s { + if c == '&' { + return string(unescape([]byte(s), false)) + } + } + return s +} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go new file mode 100644 index 0000000..9da9e9d --- /dev/null +++ b/vendor/golang.org/x/net/html/foreign.go @@ -0,0 +1,222 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { + for i := range aa { + if newName, ok := nameMap[aa[i].Key]; ok { + aa[i].Key = newName + } + } +} + +func adjustForeignAttributes(aa []Attribute) { + for i, a := range aa { + if a.Key == "" || a.Key[0] != 'x' { + continue + } + switch a.Key { + case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", + "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": + j := strings.Index(a.Key, ":") + aa[i].Namespace = a.Key[:j] + aa[i].Key = a.Key[j+1:] + } + } +} + +func htmlIntegrationPoint(n *Node) bool { + if n.Type != ElementNode { + return false + } + switch n.Namespace { + case "math": + if n.Data == "annotation-xml" { + for _, a := range n.Attr { + if a.Key == "encoding" { + val := strings.ToLower(a.Val) + if val == "text/html" || val == "application/xhtml+xml" { + return true + } + } + } + } + case "svg": + switch n.Data { + case "desc", "foreignObject", "title": + return true + } + } + return false +} + +func mathMLTextIntegrationPoint(n *Node) bool { + if n.Namespace != "math" { + return false + } + switch n.Data { + case "mi", "mo", "mn", "ms", "mtext": + return true + } + return false +} + +// Section 12.2.6.5. +var breakout = map[string]bool{ + "b": true, + "big": true, + "blockquote": true, + "body": true, + "br": true, + "center": true, + "code": true, + "dd": true, + "div": true, + "dl": true, + "dt": true, + "em": true, + "embed": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "hr": true, + "i": true, + "img": true, + "li": true, + "listing": true, + "menu": true, + "meta": true, + "nobr": true, + "ol": true, + "p": true, + "pre": true, + "ruby": true, + "s": true, + "small": true, + "span": true, + "strong": true, + "strike": true, + "sub": true, + "sup": true, + "table": true, + "tt": true, + "u": true, + "ul": true, + "var": true, +} + +// Section 12.2.6.5. +var svgTagNameAdjustments = map[string]string{ + "altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath", +} + +// Section 12.2.6.1 +var mathMLAttributeAdjustments = map[string]string{ + "definitionurl": "definitionURL", +} + +var svgAttributeAdjustments = map[string]string{ + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go new file mode 100644 index 0000000..1350eef --- /dev/null +++ b/vendor/golang.org/x/net/html/node.go @@ -0,0 +1,225 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "golang.org/x/net/html/atom" +) + +// A NodeType is the type of a Node. +type NodeType uint32 + +const ( + ErrorNode NodeType = iota + TextNode + DocumentNode + ElementNode + CommentNode + DoctypeNode + // RawNode nodes are not returned by the parser, but can be part of the + // Node tree passed to func Render to insert raw HTML (without escaping). + // If so, this package makes no guarantee that the rendered HTML is secure + // (from e.g. Cross Site Scripting attacks) or well-formed. + RawNode + scopeMarkerNode +) + +// Section 12.2.4.3 says "The markers are inserted when entering applet, +// object, marquee, template, td, th, and caption elements, and are used +// to prevent formatting from "leaking" into applet, object, marquee, +// template, td, th, and caption elements". +var scopeMarker = Node{Type: scopeMarkerNode} + +// A Node consists of a NodeType and some Data (tag name for element nodes, +// content for text) and are part of a tree of Nodes. Element nodes may also +// have a Namespace and contain a slice of Attributes. Data is unescaped, so +// that it looks like "a 0 { + return (*s)[i-1] + } + return nil +} + +// index returns the index of the top-most occurrence of n in the stack, or -1 +// if n is not present. +func (s *nodeStack) index(n *Node) int { + for i := len(*s) - 1; i >= 0; i-- { + if (*s)[i] == n { + return i + } + } + return -1 +} + +// contains returns whether a is within s. +func (s *nodeStack) contains(a atom.Atom) bool { + for _, n := range *s { + if n.DataAtom == a && n.Namespace == "" { + return true + } + } + return false +} + +// insert inserts a node at the given index. +func (s *nodeStack) insert(i int, n *Node) { + (*s) = append(*s, nil) + copy((*s)[i+1:], (*s)[i:]) + (*s)[i] = n +} + +// remove removes a node from the stack. It is a no-op if n is not present. +func (s *nodeStack) remove(n *Node) { + i := s.index(n) + if i == -1 { + return + } + copy((*s)[i:], (*s)[i+1:]) + j := len(*s) - 1 + (*s)[j] = nil + *s = (*s)[:j] +} + +type insertionModeStack []insertionMode + +func (s *insertionModeStack) pop() (im insertionMode) { + i := len(*s) + im = (*s)[i-1] + *s = (*s)[:i-1] + return im +} + +func (s *insertionModeStack) top() insertionMode { + if i := len(*s); i > 0 { + return (*s)[i-1] + } + return nil +} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go new file mode 100644 index 0000000..f91466f --- /dev/null +++ b/vendor/golang.org/x/net/html/parse.go @@ -0,0 +1,2438 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "errors" + "fmt" + "io" + "strings" + + a "golang.org/x/net/html/atom" +) + +// A parser implements the HTML5 parsing algorithm: +// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction +type parser struct { + // tokenizer provides the tokens for the parser. + tokenizer *Tokenizer + // tok is the most recently read token. + tok Token + // Self-closing tags like
are treated as start tags, except that + // hasSelfClosingToken is set while they are being processed. + hasSelfClosingToken bool + // doc is the document root element. + doc *Node + // The stack of open elements (section 12.2.4.2) and active formatting + // elements (section 12.2.4.3). + oe, afe nodeStack + // Element pointers (section 12.2.4.4). + head, form *Node + // Other parsing state flags (section 12.2.4.5). + scripting, framesetOK bool + // The stack of template insertion modes + templateStack insertionModeStack + // im is the current insertion mode. + im insertionMode + // originalIM is the insertion mode to go back to after completing a text + // or inTableText insertion mode. + originalIM insertionMode + // fosterParenting is whether new elements should be inserted according to + // the foster parenting rules (section 12.2.6.1). + fosterParenting bool + // quirks is whether the parser is operating in "quirks mode." + quirks bool + // fragment is whether the parser is parsing an HTML fragment. + fragment bool + // context is the context element when parsing an HTML fragment + // (section 12.4). + context *Node +} + +func (p *parser) top() *Node { + if n := p.oe.top(); n != nil { + return n + } + return p.doc +} + +// Stop tags for use in popUntil. These come from section 12.2.4.2. +var ( + defaultScopeStopTags = map[string][]a.Atom{ + "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, + "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, + "svg": {a.Desc, a.ForeignObject, a.Title}, + } +) + +type scope int + +const ( + defaultScope scope = iota + listItemScope + buttonScope + tableScope + tableRowScope + tableBodyScope + selectScope +) + +// popUntil pops the stack of open elements at the highest element whose tag +// is in matchTags, provided there is no higher element in the scope's stop +// tags (as defined in section 12.2.4.2). It returns whether or not there was +// such an element. If there was not, popUntil leaves the stack unchanged. +// +// For example, the set of stop tags for table scope is: "html", "table". If +// the stack was: +// ["html", "body", "font", "table", "b", "i", "u"] +// then popUntil(tableScope, "font") would return false, but +// popUntil(tableScope, "i") would return true and the stack would become: +// ["html", "body", "font", "table", "b"] +// +// If an element's tag is in both the stop tags and matchTags, then the stack +// will be popped and the function returns true (provided, of course, there was +// no higher element in the stack that was also in the stop tags). For example, +// popUntil(tableScope, "table") returns true and leaves: +// ["html", "body", "font"] +func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { + if i := p.indexOfElementInScope(s, matchTags...); i != -1 { + p.oe = p.oe[:i] + return true + } + return false +} + +// indexOfElementInScope returns the index in p.oe of the highest element whose +// tag is in matchTags that is in scope. If no matching element is in scope, it +// returns -1. +func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + if p.oe[i].Namespace == "" { + for _, t := range matchTags { + if t == tagAtom { + return i + } + } + switch s { + case defaultScope: + // No-op. + case listItemScope: + if tagAtom == a.Ol || tagAtom == a.Ul { + return -1 + } + case buttonScope: + if tagAtom == a.Button { + return -1 + } + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { + return -1 + } + case selectScope: + if tagAtom != a.Optgroup && tagAtom != a.Option { + return -1 + } + default: + panic("unreachable") + } + } + switch s { + case defaultScope, listItemScope, buttonScope: + for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { + if t == tagAtom { + return -1 + } + } + } + } + return -1 +} + +// elementInScope is like popUntil, except that it doesn't modify the stack of +// open elements. +func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { + return p.indexOfElementInScope(s, matchTags...) != -1 +} + +// clearStackToContext pops elements off the stack of open elements until a +// scope-defined element is found. +func (p *parser) clearStackToContext(s scope) { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + switch s { + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + case tableRowScope: + if tagAtom == a.Html || tagAtom == a.Tr || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + case tableBodyScope: + if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead || tagAtom == a.Template { + p.oe = p.oe[:i+1] + return + } + default: + panic("unreachable") + } + } +} + +// parseGenericRawTextElements implements the generic raw text element parsing +// algorithm defined in 12.2.6.2. +// https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text +// TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part +// officially, need to make tokenizer consider both states. +func (p *parser) parseGenericRawTextElement() { + p.addElement() + p.originalIM = p.im + p.im = textIM +} + +// generateImpliedEndTags pops nodes off the stack of open elements as long as +// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. +// If exceptions are specified, nodes with that name will not be popped off. +func (p *parser) generateImpliedEndTags(exceptions ...string) { + var i int +loop: + for i = len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Type != ElementNode { + break + } + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: + for _, except := range exceptions { + if n.Data == except { + break loop + } + } + continue + } + break + } + + p.oe = p.oe[:i+1] +} + +// addChild adds a child node n to the top element, and pushes n onto the stack +// of open elements if it is an element node. +func (p *parser) addChild(n *Node) { + if p.shouldFosterParent() { + p.fosterParent(n) + } else { + p.top().AppendChild(n) + } + + if n.Type == ElementNode { + p.oe = append(p.oe, n) + } +} + +// shouldFosterParent returns whether the next node to be added should be +// foster parented. +func (p *parser) shouldFosterParent() bool { + if p.fosterParenting { + switch p.top().DataAtom { + case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: + return true + } + } + return false +} + +// fosterParent adds a child node according to the foster parenting rules. +// Section 12.2.6.1, "foster parenting". +func (p *parser) fosterParent(n *Node) { + var table, parent, prev, template *Node + var i int + for i = len(p.oe) - 1; i >= 0; i-- { + if p.oe[i].DataAtom == a.Table { + table = p.oe[i] + break + } + } + + var j int + for j = len(p.oe) - 1; j >= 0; j-- { + if p.oe[j].DataAtom == a.Template { + template = p.oe[j] + break + } + } + + if template != nil && (table == nil || j > i) { + template.AppendChild(n) + return + } + + if table == nil { + // The foster parent is the html element. + parent = p.oe[0] + } else { + parent = table.Parent + } + if parent == nil { + parent = p.oe[i-1] + } + + if table != nil { + prev = table.PrevSibling + } else { + prev = parent.LastChild + } + if prev != nil && prev.Type == TextNode && n.Type == TextNode { + prev.Data += n.Data + return + } + + parent.InsertBefore(n, table) +} + +// addText adds text to the preceding node if it is a text node, or else it +// calls addChild with a new text node. +func (p *parser) addText(text string) { + if text == "" { + return + } + + if p.shouldFosterParent() { + p.fosterParent(&Node{ + Type: TextNode, + Data: text, + }) + return + } + + t := p.top() + if n := t.LastChild; n != nil && n.Type == TextNode { + n.Data += text + return + } + p.addChild(&Node{ + Type: TextNode, + Data: text, + }) +} + +// addElement adds a child element based on the current token. +func (p *parser) addElement() { + p.addChild(&Node{ + Type: ElementNode, + DataAtom: p.tok.DataAtom, + Data: p.tok.Data, + Attr: p.tok.Attr, + }) +} + +// Section 12.2.4.3. +func (p *parser) addFormattingElement() { + tagAtom, attr := p.tok.DataAtom, p.tok.Attr + p.addElement() + + // Implement the Noah's Ark clause, but with three per family instead of two. + identicalElements := 0 +findIdenticalElements: + for i := len(p.afe) - 1; i >= 0; i-- { + n := p.afe[i] + if n.Type == scopeMarkerNode { + break + } + if n.Type != ElementNode { + continue + } + if n.Namespace != "" { + continue + } + if n.DataAtom != tagAtom { + continue + } + if len(n.Attr) != len(attr) { + continue + } + compareAttributes: + for _, t0 := range n.Attr { + for _, t1 := range attr { + if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { + // Found a match for this attribute, continue with the next attribute. + continue compareAttributes + } + } + // If we get here, there is no attribute that matches a. + // Therefore the element is not identical to the new one. + continue findIdenticalElements + } + + identicalElements++ + if identicalElements >= 3 { + p.afe.remove(n) + } + } + + p.afe = append(p.afe, p.top()) +} + +// Section 12.2.4.3. +func (p *parser) clearActiveFormattingElements() { + for { + if n := p.afe.pop(); len(p.afe) == 0 || n.Type == scopeMarkerNode { + return + } + } +} + +// Section 12.2.4.3. +func (p *parser) reconstructActiveFormattingElements() { + n := p.afe.top() + if n == nil { + return + } + if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { + return + } + i := len(p.afe) - 1 + for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { + if i == 0 { + i = -1 + break + } + i-- + n = p.afe[i] + } + for { + i++ + clone := p.afe[i].clone() + p.addChild(clone) + p.afe[i] = clone + if i == len(p.afe)-1 { + break + } + } +} + +// Section 12.2.5. +func (p *parser) acknowledgeSelfClosingTag() { + p.hasSelfClosingToken = false +} + +// An insertion mode (section 12.2.4.1) is the state transition function from +// a particular state in the HTML5 parser's state machine. It updates the +// parser's fields depending on parser.tok (where ErrorToken means EOF). +// It returns whether the token was consumed. +type insertionMode func(*parser) bool + +// setOriginalIM sets the insertion mode to return to after completing a text or +// inTableText insertion mode. +// Section 12.2.4.1, "using the rules for". +func (p *parser) setOriginalIM() { + if p.originalIM != nil { + panic("html: bad parser state: originalIM was set twice") + } + p.originalIM = p.im +} + +// Section 12.2.4.1, "reset the insertion mode". +func (p *parser) resetInsertionMode() { + for i := len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + last := i == 0 + if last && p.context != nil { + n = p.context + } + + switch n.DataAtom { + case a.Select: + if !last { + for ancestor, first := n, p.oe[0]; ancestor != first; { + ancestor = p.oe[p.oe.index(ancestor)-1] + switch ancestor.DataAtom { + case a.Template: + p.im = inSelectIM + return + case a.Table: + p.im = inSelectInTableIM + return + } + } + } + p.im = inSelectIM + case a.Td, a.Th: + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.im = inCellIM + case a.Tr: + p.im = inRowIM + case a.Tbody, a.Thead, a.Tfoot: + p.im = inTableBodyIM + case a.Caption: + p.im = inCaptionIM + case a.Colgroup: + p.im = inColumnGroupIM + case a.Table: + p.im = inTableIM + case a.Template: + // TODO: remove this divergence from the HTML5 spec. + if n.Namespace != "" { + continue + } + p.im = p.templateStack.top() + case a.Head: + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.im = inHeadIM + case a.Body: + p.im = inBodyIM + case a.Frameset: + p.im = inFramesetIM + case a.Html: + if p.head == nil { + p.im = beforeHeadIM + } else { + p.im = afterHeadIM + } + default: + if last { + p.im = inBodyIM + return + } + continue + } + return + } +} + +const whitespace = " \t\r\n\f" + +// Section 12.2.6.4.1. +func initialIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + n, quirks := parseDoctype(p.tok.Data) + p.doc.AppendChild(n) + p.quirks = quirks + p.im = beforeHTMLIM + return true + } + p.quirks = true + p.im = beforeHTMLIM + return false +} + +// Section 12.2.6.4.2. +func beforeHTMLIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + if p.tok.DataAtom == a.Html { + p.addElement() + p.im = beforeHeadIM + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + } + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false +} + +// Section 12.2.6.4.3. +func beforeHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Head: + p.addElement() + p.head = p.top() + p.im = inHeadIM + return true + case a.Html: + return inBodyIM(p) + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.6.4.4. +func inHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta: + p.addElement() + p.oe.pop() + p.acknowledgeSelfClosingTag() + return true + case a.Noscript: + if p.scripting { + p.parseGenericRawTextElement() + return true + } + p.addElement() + p.im = inHeadNoscriptIM + // Don't let the tokenizer go into raw text mode when scripting is disabled. + p.tokenizer.NextIsNotRawText() + return true + case a.Script, a.Title: + p.addElement() + p.setOriginalIM() + p.im = textIM + return true + case a.Noframes, a.Style: + p.parseGenericRawTextElement() + return true + case a.Head: + // Ignore the token. + return true + case a.Template: + p.addElement() + p.afe = append(p.afe, &scopeMarker) + p.framesetOK = false + p.im = inTemplateIM + p.templateStack = append(p.templateStack, inTemplateIM) + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head: + p.oe.pop() + p.im = afterHeadIM + return true + case a.Body, a.Html, a.Br: + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false + case a.Template: + if !p.oe.contains(a.Template) { + return true + } + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.generateImpliedEndTags() + for i := len(p.oe) - 1; i >= 0; i-- { + if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template { + p.oe = p.oe[:i] + break + } + } + p.clearActiveFormattingElements() + p.templateStack.pop() + p.resetInsertionMode() + return true + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false +} + +// 12.2.6.4.5. +func inHeadNoscriptIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style: + return inHeadIM(p) + case a.Head: + // Ignore the token. + return true + case a.Noscript: + // Don't let the tokenizer go into raw text mode even when a