mirror of
https://github.com/openshift/installer.git
synced 2026-02-06 00:48:45 +01:00
vendor: switch from glide to dep
$ rm -rf tests $ dep init // edit Gopkg.toml ```toml [prune] non-go = true go-tests = true unused-packages = true ``` $ dep ensure
This commit is contained in:
512
Gopkg.lock
generated
Normal file
512
Gopkg.lock
generated
Normal file
@@ -0,0 +1,512 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f0aed4c37a2ca3736b8e7d31cc4ca34f5e7fd32f71a4a77dcfe30f4010a5f044"
|
||||
name = "github.com/ajeddeloh/go-json"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "73d058cf8437a1989030afe571eeab9f90eebbbd"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f3793f8a708522400cef1dba23385e901aede5519f68971fd69938ef330b07a1"
|
||||
name = "github.com/alecthomas/template"
|
||||
packages = [
|
||||
".",
|
||||
"parse",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fdd419e104ec26bb5bd63cc62637c640453ed2929a7453f3afadbd9a0223da66"
|
||||
name = "github.com/alecthomas/units"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ef98942770803ae37c4787ad6bf70e401c99834dfba5e3034b97ba7c24e66c10"
|
||||
name = "github.com/apparentlymart/go-cidr"
|
||||
packages = ["cidr"]
|
||||
pruneopts = "NUT"
|
||||
revision = "2bd8b58cf4275aeb086ade613de226773e29e853"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:06aeb1ae6ecd7695cddddb9a203df3fbb818c2d9864f8ac8f6fb372a8ef8f00c"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/csm",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/sdkuri",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/ec2query",
|
||||
"private/protocol/eventstream",
|
||||
"private/protocol/eventstream/eventstreamapi",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/restxml",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/autoscaling",
|
||||
"service/ec2",
|
||||
"service/elb",
|
||||
"service/iam",
|
||||
"service/route53",
|
||||
"service/s3",
|
||||
"service/s3/s3iface",
|
||||
"service/s3/s3manager",
|
||||
"service/sts",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "85d9dfd77e6d694e83c3ac054141cb5e81eecc7f"
|
||||
version = "v1.15.43"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5f5b0d2ed45f61f3add4d4a8d26812c36fc78f177e3e16941170426b1b4fcf69"
|
||||
name = "github.com/coreos/go-semver"
|
||||
packages = ["semver"]
|
||||
pruneopts = "NUT"
|
||||
revision = "294930c1e79c64e7dbe360054274fdad492c8cf5"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2ed7e62c94645be6b47be0911035fbabaa6271b20138d3643488716b5c6cad41"
|
||||
name = "github.com/coreos/go-systemd"
|
||||
packages = ["unit"]
|
||||
pruneopts = "NUT"
|
||||
revision = "39ca1b05acc7ad1220e09f133283b8859a8b71ab"
|
||||
version = "v17"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1a216755b1570c329bada24deac0032f487c1a5b7c690963c96ad2da9b8c3ec9"
|
||||
name = "github.com/coreos/ignition"
|
||||
packages = [
|
||||
"config/shared/errors",
|
||||
"config/shared/validations",
|
||||
"config/util",
|
||||
"config/v2_2/types",
|
||||
"config/v2_3_experimental/types",
|
||||
"config/validate/report",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "76107251acd117c6d3e5b4dae2b47f82f944984b"
|
||||
version = "v0.26.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7b66b274b356ceef90428bb165f05f3a04fddd69e0a6c14ef43686ae14e2422e"
|
||||
name = "github.com/coreos/tectonic-config"
|
||||
packages = [
|
||||
"config/kube-addon",
|
||||
"config/kube-core",
|
||||
"config/tectonic-network",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "0d649ebfd3552dfa5c6cc2cf053e17ba924b7024"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = "NUT"
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:abfe129dc92b16fbf0cc9d6336096a2823151756f62072a700eb10754141b38e"
|
||||
name = "github.com/ghodss/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:74d9b0a7b4107b41e0ade759fac64502876f82d29fb23d77b3dd24b194ee3dd5"
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5cf292cae48347c2490ac1a58fe36735fb78df7e"
|
||||
version = "v1.38.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a6afc27b2a73a5506832f3c5a1c19a30772cb69e7bd1ced4639eb36a55db224f"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"sortkeys",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c0656edd0d9eab7c66d1eb0c568f9039345796f7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:78b8040ece2ff622580def2708b9eb0b2857711b6744c475439bf337e9c677ea"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a27662d4ea9bea722743550d9e6a686e36b74d7583b23009777ce3f63f3dba02"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "7d79101e329e5a3adf994758c578dab82b90c017"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f9425215dccf1c63f659ec781ca46bc81804341821d0cd8d2459c5b58f8bd067"
|
||||
name = "github.com/google/gofuzz"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b3fcc1e30ee909b7c81b8b4dc503f78169f51209b320c4c520b23c9c59b4ac75"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [
|
||||
".",
|
||||
"diskcache",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ac6d01547ec4f7f673311b4663909269bfb8249952de3279799289467837c3cc"
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:cf68a79fd02ab0f6942b9567d03d054b6568212fcc22de2a4afdefd096923749"
|
||||
name = "github.com/kballard/go-shellquote"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "95032a82bc518f77982ea72343cc1ade730072f0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c8a452cc8dd4ef9f857570ce2be31ca257a0928bf3c2b08cd7e11972b985c6d7"
|
||||
name = "github.com/konsorten/go-windows-terminal-sequences"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "b729f2633dfe35f4d1d8a32385f6685610ce1cb5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6a6ff326e35990ede49758d308ed52176c0800d3f1d68d496b1db310399d2590"
|
||||
name = "github.com/libvirt/libvirt-go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "d5e0adac44d4365a2d97e259f51ab76cc3c01b20"
|
||||
version = "v4.7.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9785a54031460a402fab4e4bbb3124c8dd9e9f7b1982109fef605cb91632d480"
|
||||
name = "github.com/mattn/go-colorable"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "efa589957cd060542a26d2dd7832fd6a6c6c3ade"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bffa444ca07c69c599ae5876bc18b25bfd5fa85b297ca10a25594d284a7e9c5d"
|
||||
name = "github.com/mattn/go-isatty"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c"
|
||||
version = "v0.0.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:063d55b87e200bced5e2be658cc70acafb4c5bbc4afa04d4b82f66298b73d089"
|
||||
name = "github.com/mgutz/ansi"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9520e82c474b0a04dd04f8a40959027271bab992"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:eb617d906c06530ab079b2f3471960adeea2152a38f6d508c1c072b4c93ab44a"
|
||||
name = "github.com/openshift/hive"
|
||||
packages = ["contrib/pkg/awstagdeprovision"]
|
||||
pruneopts = "NUT"
|
||||
revision = "a050d77562a1ae37fc2358e047edef10623fc6f7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9072181164e616e422cbfbe48ca9ac249a4d76301ca0876c9f56b937cf214a2f"
|
||||
name = "github.com/pborman/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
pruneopts = "NUT"
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = "NUT"
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7ca2584fa7da0520cd2d1136a10194fe5a5b220bdb215074ab6f7b5ad91115f4"
|
||||
name = "github.com/shurcooL/httpfs"
|
||||
packages = ["vfsutil"]
|
||||
pruneopts = "NUT"
|
||||
revision = "809beceb23714880abc4a382a00c05f89d13b1cc"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:43c44ab8b001881225afe48c6aedcf2dd4f93495efd4586d1649c9dc2c7c7eaa"
|
||||
name = "github.com/shurcooL/vfsgen"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "33ae1944be3fe078a54c597f107e0867da19c713"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "a67f783a3814b8729bd2dac5780b5f78f8dbd64d"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4f09996e1ec8589cb383db8245839dbfc878344da6e7e3bcca6b9f7fda244a87"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||
version = "v0.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3ab855aa584d08db6541ce99dad60c12bd6a328ecb8a7358363887f85c976347"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bacb8b590716ab7c33f2277240972c9582d389593ee8d66fc10074e0508b8126"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert"]
|
||||
pruneopts = "NUT"
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c9b456727ce4101594aabddaa816c80464cd21a1e5fcd145297a902303be0085"
|
||||
name = "github.com/vincent-petithory/dataurl"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9a301d65acbb728fcc3ace14f45f511a4cfeea9c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:91ed6116b126ec7fa7b9f9460356ee6b3005e5ed320b8e85510937f44d3d62d5"
|
||||
name = "go4.org"
|
||||
packages = ["errorutil"]
|
||||
pruneopts = "NUT"
|
||||
revision = "03efcb870d84809319ea509714dd6d19a1498483"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = "NUT"
|
||||
revision = "df8d4716b3472e4a531c33cedbe537dae921a1a9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d1a6ebe75268a41b6fbb1d43947cf8688d8580423b7484fa5ae608beef6df24d"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"lex/httplex",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a969921c213e6d90554219689f484043e1e897d1936e524cfbb4fd6cb8ae2736"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "95c6576299259db960f6c5b9b69ea52422860fce"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:97337ef8cb438f9e3a99ea91a300e916ed9a96fbf3ad50f9a020d30ea9f8692f"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"internal/gen",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:41147ed90b063df283fc4c694cc3f9fb4524a4b1a16d065237b928ee127647e5"
|
||||
name = "gopkg.in/AlecAivazis/survey.v1"
|
||||
packages = [
|
||||
".",
|
||||
"core",
|
||||
"terminal",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "f30c5d1830c892f533140f29a1de89141dc217f5"
|
||||
version = "v1.6.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:22b2dee6f30bc8601f087449a2a819df8388e54e9547349c658f14d8f8c590f2"
|
||||
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
|
||||
version = "v2.2.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ef72505cf098abdd34efeea032103377bec06abb61d8a06f002d5d296a4b1185"
|
||||
name = "gopkg.in/inf.v0"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
|
||||
version = "v0.9.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:302ad18387350c3d9792da66de666f76d2ca8c62c47dd6b9434269c7cfa18971"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "53feefa2559fb8dfa8d81baad31be332c97d6c77"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b20a21815244073c3a801d5343ef631ce3072af4c2cf4949508f643dcb2a6a9b"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/resource",
|
||||
"pkg/apis/meta/v1",
|
||||
"pkg/conversion",
|
||||
"pkg/conversion/queryparams",
|
||||
"pkg/fields",
|
||||
"pkg/labels",
|
||||
"pkg/runtime",
|
||||
"pkg/runtime/schema",
|
||||
"pkg/selection",
|
||||
"pkg/types",
|
||||
"pkg/util/errors",
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/json",
|
||||
"pkg/util/net",
|
||||
"pkg/util/rand",
|
||||
"pkg/util/runtime",
|
||||
"pkg/util/sets",
|
||||
"pkg/util/validation",
|
||||
"pkg/util/validation/field",
|
||||
"pkg/util/wait",
|
||||
"pkg/watch",
|
||||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "def12e63c512da17043b4f0293f52d1006603d9f"
|
||||
version = "kubernetes-1.11.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3f8508e5e496dc2be1c8349968016bfed05f723e9dfb95c06f034805ef3c5534"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"tools/clientcmd/api",
|
||||
"tools/clientcmd/api/v1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "78700dec6369ba22221b72770783300f143df150"
|
||||
version = "v6.0.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/apparentlymart/go-cidr/cidr",
|
||||
"github.com/aws/aws-sdk-go/aws",
|
||||
"github.com/aws/aws-sdk-go/aws/session",
|
||||
"github.com/aws/aws-sdk-go/service/ec2",
|
||||
"github.com/coreos/ignition/config/util",
|
||||
"github.com/coreos/ignition/config/v2_2/types",
|
||||
"github.com/coreos/tectonic-config/config/kube-addon",
|
||||
"github.com/coreos/tectonic-config/config/kube-core",
|
||||
"github.com/coreos/tectonic-config/config/tectonic-network",
|
||||
"github.com/ghodss/yaml",
|
||||
"github.com/gregjones/httpcache",
|
||||
"github.com/gregjones/httpcache/diskcache",
|
||||
"github.com/libvirt/libvirt-go",
|
||||
"github.com/openshift/hive/contrib/pkg/awstagdeprovision",
|
||||
"github.com/pborman/uuid",
|
||||
"github.com/shurcooL/vfsgen",
|
||||
"github.com/sirupsen/logrus",
|
||||
"github.com/stretchr/testify/assert",
|
||||
"github.com/vincent-petithory/dataurl",
|
||||
"gopkg.in/AlecAivazis/survey.v1",
|
||||
"gopkg.in/alecthomas/kingpin.v2",
|
||||
"gopkg.in/yaml.v2",
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"k8s.io/apimachinery/pkg/util/rand",
|
||||
"k8s.io/apimachinery/pkg/util/wait",
|
||||
"k8s.io/client-go/tools/clientcmd/api/v1",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
54
Gopkg.toml
Normal file
54
Gopkg.toml
Normal file
@@ -0,0 +1,54 @@
|
||||
ignored = ["github.com/openshift/installer/tests*"]
|
||||
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/AlecAivazis/survey.v1"
|
||||
version = "1.6.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.15.39"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/coreos/ignition"
|
||||
version = "0.26.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/tectonic-config"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/libvirt/libvirt-go"
|
||||
version = "4.7.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/shurcooL/vfsgen"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/vincent-petithory/dataurl"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||
version = "2.2.6"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.11.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "6.0.0"
|
||||
226
glide.lock
generated
226
glide.lock
generated
@@ -1,226 +0,0 @@
|
||||
hash: 79d11a0d5fd7c9068b246a5a0ca0e776a194e56014ab53560d905ff2a5910655
|
||||
updated: 2018-10-01T16:43:43.753289671-07:00
|
||||
imports:
|
||||
- name: github.com/ajeddeloh/go-json
|
||||
version: 73d058cf8437a1989030afe571eeab9f90eebbbd
|
||||
- name: github.com/alecthomas/template
|
||||
version: a0175ee3bccc567396460bf5acd36800cb10c49c
|
||||
subpackages:
|
||||
- parse
|
||||
- name: github.com/alecthomas/units
|
||||
version: 2efee857e7cfd4f3d0138cc3cbb1b4966962b93a
|
||||
- name: github.com/apparentlymart/go-cidr
|
||||
version: 2bd8b58cf4275aeb086ade613de226773e29e853
|
||||
subpackages:
|
||||
- cidr
|
||||
- name: github.com/aws/aws-sdk-go
|
||||
version: 53e36ebb6b2c26d228ba9563d06caf306227f110
|
||||
subpackages:
|
||||
- aws
|
||||
- aws/awserr
|
||||
- aws/awsutil
|
||||
- aws/client
|
||||
- aws/client/metadata
|
||||
- aws/corehandlers
|
||||
- aws/credentials
|
||||
- aws/credentials/ec2rolecreds
|
||||
- aws/credentials/endpointcreds
|
||||
- aws/credentials/stscreds
|
||||
- aws/csm
|
||||
- aws/defaults
|
||||
- aws/ec2metadata
|
||||
- aws/endpoints
|
||||
- aws/request
|
||||
- aws/session
|
||||
- aws/signer/v4
|
||||
- internal/sdkio
|
||||
- internal/sdkrand
|
||||
- internal/sdkuri
|
||||
- internal/shareddefaults
|
||||
- private/protocol
|
||||
- private/protocol/ec2query
|
||||
- private/protocol/eventstream
|
||||
- private/protocol/eventstream/eventstreamapi
|
||||
- private/protocol/query
|
||||
- private/protocol/query/queryutil
|
||||
- private/protocol/rest
|
||||
- private/protocol/restxml
|
||||
- private/protocol/xml/xmlutil
|
||||
- service/autoscaling
|
||||
- service/ec2
|
||||
- service/elb
|
||||
- service/iam
|
||||
- service/route53
|
||||
- service/s3
|
||||
- service/s3/s3iface
|
||||
- service/s3/s3manager
|
||||
- service/sts
|
||||
- name: github.com/coreos/go-semver
|
||||
version: 294930c1e79c64e7dbe360054274fdad492c8cf5
|
||||
subpackages:
|
||||
- semver
|
||||
- name: github.com/coreos/go-systemd
|
||||
version: 39ca1b05acc7ad1220e09f133283b8859a8b71ab
|
||||
subpackages:
|
||||
- dbus
|
||||
- unit
|
||||
- name: github.com/coreos/ignition
|
||||
version: 76107251acd117c6d3e5b4dae2b47f82f944984b
|
||||
subpackages:
|
||||
- config/shared/errors
|
||||
- config/shared/validations
|
||||
- config/util
|
||||
- config/v2_2/types
|
||||
- config/v2_3_experimental/types
|
||||
- config/validate/report
|
||||
- name: github.com/coreos/tectonic-config
|
||||
version: 0d649ebfd3552dfa5c6cc2cf053e17ba924b7024
|
||||
subpackages:
|
||||
- config/kube-addon
|
||||
- config/kube-core
|
||||
- config/tectonic-network
|
||||
- name: github.com/ghodss/yaml
|
||||
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
- name: github.com/go-ini/ini
|
||||
version: c787282c39ac1fc618827141a1f762240def08a3
|
||||
- name: github.com/gogo/protobuf
|
||||
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
|
||||
subpackages:
|
||||
- proto
|
||||
- sortkeys
|
||||
- name: github.com/golang/glog
|
||||
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
- name: github.com/google/btree
|
||||
version: 7d79101e329e5a3adf994758c578dab82b90c017
|
||||
- name: github.com/google/gofuzz
|
||||
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
||||
- name: github.com/gregjones/httpcache
|
||||
version: 787624de3eb7bd915c329cba748687a3b22666a6
|
||||
subpackages:
|
||||
- diskcache
|
||||
- name: github.com/inconshreveable/mousetrap
|
||||
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
- name: github.com/jmespath/go-jmespath
|
||||
version: c2b33e8439af944379acbdd9c3a5fe0bc44bd8a5
|
||||
- name: github.com/kballard/go-shellquote
|
||||
version: 95032a82bc518f77982ea72343cc1ade730072f0
|
||||
- name: github.com/konsorten/go-windows-terminal-sequences
|
||||
version: b729f2633dfe35f4d1d8a32385f6685610ce1cb5
|
||||
- name: github.com/libvirt/libvirt-go
|
||||
version: d5e0adac44d4365a2d97e259f51ab76cc3c01b20
|
||||
- name: github.com/mattn/go-colorable
|
||||
version: efa589957cd060542a26d2dd7832fd6a6c6c3ade
|
||||
- name: github.com/mattn/go-isatty
|
||||
version: 3fb116b820352b7f0c281308a4d6250c22d94e27
|
||||
- name: github.com/mgutz/ansi
|
||||
version: 9520e82c474b0a04dd04f8a40959027271bab992
|
||||
- name: github.com/openshift/hive
|
||||
version: 7b650f6de8f2185fa21aca9237c68a9aa8d86d61
|
||||
subpackages:
|
||||
- contrib/pkg/awstagdeprovision
|
||||
- name: github.com/pborman/uuid
|
||||
version: ca53cad383cad2479bbba7f7a1a05797ec1386e4
|
||||
- name: github.com/peterbourgon/diskv
|
||||
version: 5f041e8faa004a95c88a202771f4cc3e991971e6
|
||||
- name: github.com/sirupsen/logrus
|
||||
version: a67f783a3814b8729bd2dac5780b5f78f8dbd64d
|
||||
- name: github.com/spf13/cobra
|
||||
version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b
|
||||
- name: github.com/spf13/pflag
|
||||
version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
|
||||
- name: github.com/vincent-petithory/dataurl
|
||||
version: 9a301d65acbb728fcc3ace14f45f511a4cfeea9c
|
||||
- name: go4.org
|
||||
version: 03efcb870d84809319ea509714dd6d19a1498483
|
||||
subpackages:
|
||||
- errorutil
|
||||
- name: golang.org/x/crypto
|
||||
version: df8d4716b3472e4a531c33cedbe537dae921a1a9
|
||||
subpackages:
|
||||
- ssh/terminal
|
||||
- name: golang.org/x/net
|
||||
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
|
||||
subpackages:
|
||||
- html
|
||||
- html/atom
|
||||
- http2
|
||||
- http2/hpack
|
||||
- idna
|
||||
- lex/httplex
|
||||
- websocket
|
||||
- name: golang.org/x/sys
|
||||
version: 95c6576299259db960f6c5b9b69ea52422860fce
|
||||
subpackages:
|
||||
- unix
|
||||
- windows
|
||||
- name: golang.org/x/text
|
||||
version: b19bf474d317b857955b12035d2c5acb57ce8b01
|
||||
subpackages:
|
||||
- secure/bidirule
|
||||
- transform
|
||||
- unicode/bidi
|
||||
- unicode/norm
|
||||
- name: gopkg.in/AlecAivazis/survey.v1
|
||||
version: f30c5d1830c892f533140f29a1de89141dc217f5
|
||||
subpackages:
|
||||
- core
|
||||
- terminal
|
||||
- name: gopkg.in/alecthomas/kingpin.v2
|
||||
version: 947dcec5ba9c011838740e680966fd7087a71d0d
|
||||
- name: gopkg.in/inf.v0
|
||||
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||
- name: k8s.io/apimachinery
|
||||
version: def12e63c512da17043b4f0293f52d1006603d9f
|
||||
subpackages:
|
||||
- pkg/api/resource
|
||||
- pkg/apis/meta/v1
|
||||
- pkg/conversion
|
||||
- pkg/conversion/queryparams
|
||||
- pkg/fields
|
||||
- pkg/labels
|
||||
- pkg/runtime
|
||||
- pkg/runtime/schema
|
||||
- pkg/selection
|
||||
- pkg/types
|
||||
- pkg/util/errors
|
||||
- pkg/util/intstr
|
||||
- pkg/util/json
|
||||
- pkg/util/net
|
||||
- pkg/util/rand
|
||||
- pkg/util/runtime
|
||||
- pkg/util/sets
|
||||
- pkg/util/validation
|
||||
- pkg/util/validation/field
|
||||
- pkg/util/wait
|
||||
- pkg/watch
|
||||
- third_party/forked/golang/reflect
|
||||
- name: k8s.io/client-go
|
||||
version: 78700dec6369ba22221b72770783300f143df150
|
||||
subpackages:
|
||||
- tools/clientcmd/api
|
||||
- tools/clientcmd/api/v1
|
||||
- name: sigs.k8s.io/cluster-api
|
||||
version: fbc85d97affbf5c9ad97054c66b23ad2bc0ca097
|
||||
subpackages:
|
||||
- pkg/client/clientset_generated/clientset
|
||||
testImports:
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 782f4967f2dc4564575ca782fe2d04090b5faca8
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/pmezard/go-difflib
|
||||
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
|
||||
subpackages:
|
||||
- difflib
|
||||
- name: github.com/shurcooL/httpfs
|
||||
version: 809beceb23714880abc4a382a00c05f89d13b1cc
|
||||
subpackages:
|
||||
- vfsutil
|
||||
- name: github.com/shurcooL/vfsgen
|
||||
version: 33ae1944be3fe078a54c597f107e0867da19c713
|
||||
- name: github.com/stretchr/testify
|
||||
version: f35b8ab0b5a2cef36673838d662e249dd9c94686
|
||||
subpackages:
|
||||
- assert
|
||||
40
glide.yaml
40
glide.yaml
@@ -1,40 +0,0 @@
|
||||
package: github.com/openshift/installer
|
||||
excludeDirs:
|
||||
- tests
|
||||
import:
|
||||
- package: github.com/ghodss/yaml
|
||||
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
- package: github.com/sirupsen/logrus
|
||||
version: 1.1.0
|
||||
- package: gopkg.in/yaml.v2
|
||||
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||
- package: gopkg.in/alecthomas/kingpin.v2
|
||||
version: 947dcec5ba9c011838740e680966fd7087a71d0d
|
||||
- package: github.com/coreos/tectonic-config
|
||||
version: 0d649ebfd3552dfa5c6cc2cf053e17ba924b7024
|
||||
- package: k8s.io/apimachinery
|
||||
version: kubernetes-1.11.3
|
||||
- package: golang.org/x/crypto
|
||||
version: df8d4716b3472e4a531c33cedbe537dae921a1a9
|
||||
- package: github.com/coreos/ignition
|
||||
version: v0.26.0
|
||||
- package: github.com/vincent-petithory/dataurl
|
||||
version: 9a301d65acbb728fcc3ace14f45f511a4cfeea9c
|
||||
- package: sigs.k8s.io/cluster-api
|
||||
version: 0.0.0-alpha.2
|
||||
subpackages:
|
||||
- pkg/client/clientset_generated/clientset
|
||||
- package: github.com/aws/aws-sdk-go
|
||||
version: ^1.15.39
|
||||
- package: github.com/libvirt/libvirt-go
|
||||
version: ^v4.7.0
|
||||
- package: gopkg.in/AlecAivazis/survey.v1
|
||||
version: ^1.6.2
|
||||
- package: github.com/openshift/hive
|
||||
testImport:
|
||||
- package: github.com/stretchr/testify
|
||||
version: ^1.2.2
|
||||
subpackages:
|
||||
- assert
|
||||
- package: github.com/shurcooL/vfsgen
|
||||
- package: github.com/shurcooL/httpfs/vfsutil
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.15.46"
|
||||
const SDKVersion = "1.15.43"
|
||||
|
||||
405
vendor/github.com/aws/aws-sdk-go/doc.go
generated
vendored
405
vendor/github.com/aws/aws-sdk-go/doc.go
generated
vendored
@@ -1,405 +0,0 @@
|
||||
// Package sdk is the official AWS SDK for the Go programming language.
|
||||
//
|
||||
// The AWS SDK for Go provides APIs and utilities that developers can use to
|
||||
// build Go applications that use AWS services, such as Amazon Elastic Compute
|
||||
// Cloud (Amazon EC2) and Amazon Simple Storage Service (Amazon S3).
|
||||
//
|
||||
// The SDK removes the complexity of coding directly against a web service
|
||||
// interface. It hides a lot of the lower-level plumbing, such as authentication,
|
||||
// request retries, and error handling.
|
||||
//
|
||||
// The SDK also includes helpful utilities on top of the AWS APIs that add additional
|
||||
// capabilities and functionality. For example, the Amazon S3 Download and Upload
|
||||
// Manager will automatically split up large objects into multiple parts and
|
||||
// transfer them concurrently.
|
||||
//
|
||||
// See the s3manager package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/
|
||||
//
|
||||
// Getting More Information
|
||||
//
|
||||
// Checkout the Getting Started Guide and API Reference Docs detailed the SDK's
|
||||
// components and details on each AWS client the SDK supports.
|
||||
//
|
||||
// The Getting Started Guide provides examples and detailed description of how
|
||||
// to get setup with the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/welcome.html
|
||||
//
|
||||
// The API Reference Docs include a detailed breakdown of the SDK's components
|
||||
// such as utilities and AWS clients. Use this as a reference of the Go types
|
||||
// included with the SDK, such as AWS clients, API operations, and API parameters.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/
|
||||
//
|
||||
// Overview of SDK's Packages
|
||||
//
|
||||
// The SDK is composed of two main components, SDK core, and service clients.
|
||||
// The SDK core packages are all available under the aws package at the root of
|
||||
// the SDK. Each client for a supported AWS service is available within its own
|
||||
// package under the service folder at the root of the SDK.
|
||||
//
|
||||
// * aws - SDK core, provides common shared types such as Config, Logger,
|
||||
// and utilities to make working with API parameters easier.
|
||||
//
|
||||
// * awserr - Provides the error interface that the SDK will use for all
|
||||
// errors that occur in the SDK's processing. This includes service API
|
||||
// response errors as well. The Error type is made up of a code and message.
|
||||
// Cast the SDK's returned error type to awserr.Error and call the Code
|
||||
// method to compare returned error to specific error codes. See the package's
|
||||
// documentation for additional values that can be extracted such as RequestId.
|
||||
//
|
||||
// * credentials - Provides the types and built in credentials providers
|
||||
// the SDK will use to retrieve AWS credentials to make API requests with.
|
||||
// Nested under this folder are also additional credentials providers such as
|
||||
// stscreds for assuming IAM roles, and ec2rolecreds for EC2 Instance roles.
|
||||
//
|
||||
// * endpoints - Provides the AWS Regions and Endpoints metadata for the SDK.
|
||||
// Use this to lookup AWS service endpoint information such as which services
|
||||
// are in a region, and what regions a service is in. Constants are also provided
|
||||
// for all region identifiers, e.g UsWest2RegionID for "us-west-2".
|
||||
//
|
||||
// * session - Provides initial default configuration, and load
|
||||
// configuration from external sources such as environment and shared
|
||||
// credentials file.
|
||||
//
|
||||
// * request - Provides the API request sending, and retry logic for the SDK.
|
||||
// This package also includes utilities for defining your own request
|
||||
// retryer, and configuring how the SDK processes the request.
|
||||
//
|
||||
// * service - Clients for AWS services. All services supported by the SDK are
|
||||
// available under this folder.
|
||||
//
|
||||
// How to Use the SDK's AWS Service Clients
|
||||
//
|
||||
// The SDK includes the Go types and utilities you can use to make requests to
|
||||
// AWS service APIs. Within the service folder at the root of the SDK you'll find
|
||||
// a package for each AWS service the SDK supports. All service clients follows
|
||||
// a common pattern of creation and usage.
|
||||
//
|
||||
// When creating a client for an AWS service you'll first need to have a Session
|
||||
// value constructed. The Session provides shared configuration that can be shared
|
||||
// between your service clients. When service clients are created you can pass
|
||||
// in additional configuration via the aws.Config type to override configuration
|
||||
// provided by in the Session to create service client instances with custom
|
||||
// configuration.
|
||||
//
|
||||
// Once the service's client is created you can use it to make API requests the
|
||||
// AWS service. These clients are safe to use concurrently.
|
||||
//
|
||||
// Configuring the SDK
|
||||
//
|
||||
// In the AWS SDK for Go, you can configure settings for service clients, such
|
||||
// as the log level and maximum number of retries. Most settings are optional;
|
||||
// however, for each service client, you must specify a region and your credentials.
|
||||
// The SDK uses these values to send requests to the correct AWS region and sign
|
||||
// requests with the correct credentials. You can specify these values as part
|
||||
// of a session or as environment variables.
|
||||
//
|
||||
// See the SDK's configuration guide for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html
|
||||
//
|
||||
// See the session package documentation for more information on how to use Session
|
||||
// with the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/session/
|
||||
//
|
||||
// See the Config type in the aws package for more information on configuration
|
||||
// options.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
|
||||
//
|
||||
// Configuring Credentials
|
||||
//
|
||||
// When using the SDK you'll generally need your AWS credentials to authenticate
|
||||
// with AWS services. The SDK supports multiple methods of supporting these
|
||||
// credentials. By default the SDK will source credentials automatically from
|
||||
// its default credential chain. See the session package for more information
|
||||
// on this chain, and how to configure it. The common items in the credential
|
||||
// chain are the following:
|
||||
//
|
||||
// * Environment Credentials - Set of environment variables that are useful
|
||||
// when sub processes are created for specific roles.
|
||||
//
|
||||
// * Shared Credentials file (~/.aws/credentials) - This file stores your
|
||||
// credentials based on a profile name and is useful for local development.
|
||||
//
|
||||
// * EC2 Instance Role Credentials - Use EC2 Instance Role to assign credentials
|
||||
// to application running on an EC2 instance. This removes the need to manage
|
||||
// credential files in production.
|
||||
//
|
||||
// Credentials can be configured in code as well by setting the Config's Credentials
|
||||
// value to a custom provider or using one of the providers included with the
|
||||
// SDK to bypass the default credential chain and use a custom one. This is
|
||||
// helpful when you want to instruct the SDK to only use a specific set of
|
||||
// credentials or providers.
|
||||
//
|
||||
// This example creates a credential provider for assuming an IAM role, "myRoleARN"
|
||||
// and configures the S3 service client to use that role for API requests.
|
||||
//
|
||||
// // Initial credentials loaded from SDK's default credential chain. Such as
|
||||
// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
|
||||
// // Role. These credentials will be used to to make the STS Assume Role API.
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create the credentials from AssumeRoleProvider to assume the role
|
||||
// // referenced by the "myRoleARN" ARN.
|
||||
// creds := stscreds.NewCredentials(sess, "myRoleArn")
|
||||
//
|
||||
// // Create service client value configured for credentials
|
||||
// // from assumed role.
|
||||
// svc := s3.New(sess, &aws.Config{Credentials: creds})/
|
||||
//
|
||||
// See the credentials package documentation for more information on credential
|
||||
// providers included with the SDK, and how to customize the SDK's usage of
|
||||
// credentials.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials
|
||||
//
|
||||
// The SDK has support for the shared configuration file (~/.aws/config). This
|
||||
// support can be enabled by setting the environment variable, "AWS_SDK_LOAD_CONFIG=1",
|
||||
// or enabling the feature in code when creating a Session via the
|
||||
// Option's SharedConfigState parameter.
|
||||
//
|
||||
// sess := session.Must(session.NewSessionWithOptions(session.Options{
|
||||
// SharedConfigState: session.SharedConfigEnable,
|
||||
// }))
|
||||
//
|
||||
// Configuring AWS Region
|
||||
//
|
||||
// In addition to the credentials you'll need to specify the region the SDK
|
||||
// will use to make AWS API requests to. In the SDK you can specify the region
|
||||
// either with an environment variable, or directly in code when a Session or
|
||||
// service client is created. The last value specified in code wins if the region
|
||||
// is specified multiple ways.
|
||||
//
|
||||
// To set the region via the environment variable set the "AWS_REGION" to the
|
||||
// region you want to the SDK to use. Using this method to set the region will
|
||||
// allow you to run your application in multiple regions without needing additional
|
||||
// code in the application to select the region.
|
||||
//
|
||||
// AWS_REGION=us-west-2
|
||||
//
|
||||
// The endpoints package includes constants for all regions the SDK knows. The
|
||||
// values are all suffixed with RegionID. These values are helpful, because they
|
||||
// reduce the need to type the region string manually.
|
||||
//
|
||||
// To set the region on a Session use the aws package's Config struct parameter
|
||||
// Region to the AWS region you want the service clients created from the session to
|
||||
// use. This is helpful when you want to create multiple service clients, and
|
||||
// all of the clients make API requests to the same region.
|
||||
//
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// Region: aws.String(endpoints.UsWest2RegionID),
|
||||
// }))
|
||||
//
|
||||
// See the endpoints package for the AWS Regions and Endpoints metadata.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/
|
||||
//
|
||||
// In addition to setting the region when creating a Session you can also set
|
||||
// the region on a per service client bases. This overrides the region of a
|
||||
// Session. This is helpful when you want to create service clients in specific
|
||||
// regions different from the Session's region.
|
||||
//
|
||||
// svc := s3.New(sess, &aws.Config{
|
||||
// Region: aws.String(endpoints.UsWest2RegionID),
|
||||
// })
|
||||
//
|
||||
// See the Config type in the aws package for more information and additional
|
||||
// options such as setting the Endpoint, and other service client configuration options.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
|
||||
//
|
||||
// Making API Requests
|
||||
//
|
||||
// Once the client is created you can make an API request to the service.
|
||||
// Each API method takes a input parameter, and returns the service response
|
||||
// and an error. The SDK provides methods for making the API call in multiple ways.
|
||||
//
|
||||
// In this list we'll use the S3 ListObjects API as an example for the different
|
||||
// ways of making API requests.
|
||||
//
|
||||
// * ListObjects - Base API operation that will make the API request to the service.
|
||||
//
|
||||
// * ListObjectsRequest - API methods suffixed with Request will construct the
|
||||
// API request, but not send it. This is also helpful when you want to get a
|
||||
// presigned URL for a request, and share the presigned URL instead of your
|
||||
// application making the request directly.
|
||||
//
|
||||
// * ListObjectsPages - Same as the base API operation, but uses a callback to
|
||||
// automatically handle pagination of the API's response.
|
||||
//
|
||||
// * ListObjectsWithContext - Same as base API operation, but adds support for
|
||||
// the Context pattern. This is helpful for controlling the canceling of in
|
||||
// flight requests. See the Go standard library context package for more
|
||||
// information. This method also takes request package's Option functional
|
||||
// options as the variadic argument for modifying how the request will be
|
||||
// made, or extracting information from the raw HTTP response.
|
||||
//
|
||||
// * ListObjectsPagesWithContext - same as ListObjectsPages, but adds support for
|
||||
// the Context pattern. Similar to ListObjectsWithContext this method also
|
||||
// takes the request package's Option function option types as the variadic
|
||||
// argument.
|
||||
//
|
||||
// In addition to the API operations the SDK also includes several higher level
|
||||
// methods that abstract checking for and waiting for an AWS resource to be in
|
||||
// a desired state. In this list we'll use WaitUntilBucketExists to demonstrate
|
||||
// the different forms of waiters.
|
||||
//
|
||||
// * WaitUntilBucketExists. - Method to make API request to query an AWS service for
|
||||
// a resource's state. Will return successfully when that state is accomplished.
|
||||
//
|
||||
// * WaitUntilBucketExistsWithContext - Same as WaitUntilBucketExists, but adds
|
||||
// support for the Context pattern. In addition these methods take request
|
||||
// package's WaiterOptions to configure the waiter, and how underlying request
|
||||
// will be made by the SDK.
|
||||
//
|
||||
// The API method will document which error codes the service might return for
|
||||
// the operation. These errors will also be available as const strings prefixed
|
||||
// with "ErrCode" in the service client's package. If there are no errors listed
|
||||
// in the API's SDK documentation you'll need to consult the AWS service's API
|
||||
// documentation for the errors that could be returned.
|
||||
//
|
||||
// ctx := context.Background()
|
||||
//
|
||||
// result, err := svc.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||
// Bucket: aws.String("my-bucket"),
|
||||
// Key: aws.String("my-key"),
|
||||
// })
|
||||
// if err != nil {
|
||||
// // Cast err to awserr.Error to handle specific error codes.
|
||||
// aerr, ok := err.(awserr.Error)
|
||||
// if ok && aerr.Code() == s3.ErrCodeNoSuchKey {
|
||||
// // Specific error code handling
|
||||
// }
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // Make sure to close the body when done with it for S3 GetObject APIs or
|
||||
// // will leak connections.
|
||||
// defer result.Body.Close()
|
||||
//
|
||||
// fmt.Println("Object Size:", aws.StringValue(result.ContentLength))
|
||||
//
|
||||
// API Request Pagination and Resource Waiters
|
||||
//
|
||||
// Pagination helper methods are suffixed with "Pages", and provide the
|
||||
// functionality needed to round trip API page requests. Pagination methods
|
||||
// take a callback function that will be called for each page of the API's response.
|
||||
//
|
||||
// objects := []string{}
|
||||
// err := svc.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{
|
||||
// Bucket: aws.String(myBucket),
|
||||
// }, func(p *s3.ListObjectsOutput, lastPage bool) bool {
|
||||
// for _, o := range p.Contents {
|
||||
// objects = append(objects, aws.StringValue(o.Key))
|
||||
// }
|
||||
// return true // continue paging
|
||||
// })
|
||||
// if err != nil {
|
||||
// panic(fmt.Sprintf("failed to list objects for bucket, %s, %v", myBucket, err))
|
||||
// }
|
||||
//
|
||||
// fmt.Println("Objects in bucket:", objects)
|
||||
//
|
||||
// Waiter helper methods provide the functionality to wait for an AWS resource
|
||||
// state. These methods abstract the logic needed to to check the state of an
|
||||
// AWS resource, and wait until that resource is in a desired state. The waiter
|
||||
// will block until the resource is in the state that is desired, an error occurs,
|
||||
// or the waiter times out. If a resource times out the error code returned will
|
||||
// be request.WaiterResourceNotReadyErrorCode.
|
||||
//
|
||||
// err := svc.WaitUntilBucketExistsWithContext(ctx, &s3.HeadBucketInput{
|
||||
// Bucket: aws.String(myBucket),
|
||||
// })
|
||||
// if err != nil {
|
||||
// aerr, ok := err.(awserr.Error)
|
||||
// if ok && aerr.Code() == request.WaiterResourceNotReadyErrorCode {
|
||||
// fmt.Fprintf(os.Stderr, "timed out while waiting for bucket to exist")
|
||||
// }
|
||||
// panic(fmt.Errorf("failed to wait for bucket to exist, %v", err))
|
||||
// }
|
||||
// fmt.Println("Bucket", myBucket, "exists")
|
||||
//
|
||||
// Complete SDK Example
|
||||
//
|
||||
// This example shows a complete working Go file which will upload a file to S3
|
||||
// and use the Context pattern to implement timeout logic that will cancel the
|
||||
// request if it takes too long. This example highlights how to use sessions,
|
||||
// create a service client, make a request, handle the error, and process the
|
||||
// response.
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "context"
|
||||
// "flag"
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "time"
|
||||
//
|
||||
// "github.com/aws/aws-sdk-go/aws"
|
||||
// "github.com/aws/aws-sdk-go/aws/awserr"
|
||||
// "github.com/aws/aws-sdk-go/aws/request"
|
||||
// "github.com/aws/aws-sdk-go/aws/session"
|
||||
// "github.com/aws/aws-sdk-go/service/s3"
|
||||
// )
|
||||
//
|
||||
// // Uploads a file to S3 given a bucket and object key. Also takes a duration
|
||||
// // value to terminate the update if it doesn't complete within that time.
|
||||
// //
|
||||
// // The AWS Region needs to be provided in the AWS shared config or on the
|
||||
// // environment variable as `AWS_REGION`. Credentials also must be provided
|
||||
// // Will default to shared config file, but can load from environment if provided.
|
||||
// //
|
||||
// // Usage:
|
||||
// // # Upload myfile.txt to myBucket/myKey. Must complete within 10 minutes or will fail
|
||||
// // go run withContext.go -b mybucket -k myKey -d 10m < myfile.txt
|
||||
// func main() {
|
||||
// var bucket, key string
|
||||
// var timeout time.Duration
|
||||
//
|
||||
// flag.StringVar(&bucket, "b", "", "Bucket name.")
|
||||
// flag.StringVar(&key, "k", "", "Object key name.")
|
||||
// flag.DurationVar(&timeout, "d", 0, "Upload timeout.")
|
||||
// flag.Parse()
|
||||
//
|
||||
// // All clients require a Session. The Session provides the client with
|
||||
// // shared configuration such as region, endpoint, and credentials. A
|
||||
// // Session should be shared where possible to take advantage of
|
||||
// // configuration and credential caching. See the session package for
|
||||
// // more information.
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a new instance of the service's client with a Session.
|
||||
// // Optional aws.Config values can also be provided as variadic arguments
|
||||
// // to the New function. This option allows you to provide service
|
||||
// // specific configuration.
|
||||
// svc := s3.New(sess)
|
||||
//
|
||||
// // Create a context with a timeout that will abort the upload if it takes
|
||||
// // more than the passed in timeout.
|
||||
// ctx := context.Background()
|
||||
// var cancelFn func()
|
||||
// if timeout > 0 {
|
||||
// ctx, cancelFn = context.WithTimeout(ctx, timeout)
|
||||
// }
|
||||
// // Ensure the context is canceled to prevent leaking.
|
||||
// // See context package for more information, https://golang.org/pkg/context/
|
||||
// defer cancelFn()
|
||||
//
|
||||
// // Uploads the object to S3. The Context will interrupt the request if the
|
||||
// // timeout expires.
|
||||
// _, err := svc.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
||||
// Bucket: aws.String(bucket),
|
||||
// Key: aws.String(key),
|
||||
// Body: os.Stdin,
|
||||
// })
|
||||
// if err != nil {
|
||||
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == request.CanceledErrorCode {
|
||||
// // If the SDK can determine the request or retry delay was canceled
|
||||
// // by a context the CanceledErrorCode error code will be returned.
|
||||
// fmt.Fprintf(os.Stderr, "upload canceled due to timeout, %v\n", err)
|
||||
// } else {
|
||||
// fmt.Fprintf(os.Stderr, "failed to upload object, %v\n", err)
|
||||
// }
|
||||
// os.Exit(1)
|
||||
// }
|
||||
//
|
||||
// fmt.Printf("successfully uploaded file to %s/%s\n", bucket, key)
|
||||
// }
|
||||
package sdk
|
||||
9
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
@@ -71161,15 +71161,6 @@ const (
|
||||
|
||||
// InstanceTypeZ1d12xlarge is a InstanceType enum value
|
||||
InstanceTypeZ1d12xlarge = "z1d.12xlarge"
|
||||
|
||||
// InstanceTypeU6tb1Metal is a InstanceType enum value
|
||||
InstanceTypeU6tb1Metal = "u-6tb1.metal"
|
||||
|
||||
// InstanceTypeU9tb1Metal is a InstanceType enum value
|
||||
InstanceTypeU9tb1Metal = "u-9tb1.metal"
|
||||
|
||||
// InstanceTypeU12tb1Metal is a InstanceType enum value
|
||||
InstanceTypeU12tb1Metal = "u-12tb1.metal"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
20
vendor/github.com/coreos/go-semver/example.go
generated
vendored
20
vendor/github.com/coreos/go-semver/example.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
vA, err := semver.NewVersion(os.Args[1])
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
vB, err := semver.NewVersion(os.Args[2])
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
|
||||
fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB))
|
||||
}
|
||||
240
vendor/github.com/coreos/go-systemd/dbus/dbus.go
generated
vendored
240
vendor/github.com/coreos/go-systemd/dbus/dbus.go
generated
vendored
@@ -1,240 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
const (
|
||||
alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
|
||||
num = `0123456789`
|
||||
alphanum = alpha + num
|
||||
signalBuffer = 100
|
||||
)
|
||||
|
||||
// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
|
||||
func needsEscape(i int, b byte) bool {
|
||||
// Escape everything that is not a-z-A-Z-0-9
|
||||
// Also escape 0-9 if it's the first character
|
||||
return strings.IndexByte(alphanum, b) == -1 ||
|
||||
(i == 0 && strings.IndexByte(num, b) != -1)
|
||||
}
|
||||
|
||||
// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
|
||||
// rules that systemd uses for serializing special characters.
|
||||
func PathBusEscape(path string) string {
|
||||
// Special case the empty string
|
||||
if len(path) == 0 {
|
||||
return "_"
|
||||
}
|
||||
n := []byte{}
|
||||
for i := 0; i < len(path); i++ {
|
||||
c := path[i]
|
||||
if needsEscape(i, c) {
|
||||
e := fmt.Sprintf("_%x", c)
|
||||
n = append(n, []byte(e)...)
|
||||
} else {
|
||||
n = append(n, c)
|
||||
}
|
||||
}
|
||||
return string(n)
|
||||
}
|
||||
|
||||
// pathBusUnescape is the inverse of PathBusEscape.
|
||||
func pathBusUnescape(path string) string {
|
||||
if path == "_" {
|
||||
return ""
|
||||
}
|
||||
n := []byte{}
|
||||
for i := 0; i < len(path); i++ {
|
||||
c := path[i]
|
||||
if c == '_' && i+2 < len(path) {
|
||||
res, err := hex.DecodeString(path[i+1 : i+3])
|
||||
if err == nil {
|
||||
n = append(n, res...)
|
||||
}
|
||||
i += 2
|
||||
} else {
|
||||
n = append(n, c)
|
||||
}
|
||||
}
|
||||
return string(n)
|
||||
}
|
||||
|
||||
// Conn is a connection to systemd's dbus endpoint.
|
||||
type Conn struct {
|
||||
// sysconn/sysobj are only used to call dbus methods
|
||||
sysconn *dbus.Conn
|
||||
sysobj dbus.BusObject
|
||||
|
||||
// sigconn/sigobj are only used to receive dbus signals
|
||||
sigconn *dbus.Conn
|
||||
sigobj dbus.BusObject
|
||||
|
||||
jobListener struct {
|
||||
jobs map[dbus.ObjectPath]chan<- string
|
||||
sync.Mutex
|
||||
}
|
||||
subStateSubscriber struct {
|
||||
updateCh chan<- *SubStateUpdate
|
||||
errCh chan<- error
|
||||
sync.Mutex
|
||||
ignore map[dbus.ObjectPath]int64
|
||||
cleanIgnore int64
|
||||
}
|
||||
propertiesSubscriber struct {
|
||||
updateCh chan<- *PropertiesUpdate
|
||||
errCh chan<- error
|
||||
sync.Mutex
|
||||
}
|
||||
}
|
||||
|
||||
// New establishes a connection to any available bus and authenticates.
|
||||
// Callers should call Close() when done with the connection.
|
||||
func New() (*Conn, error) {
|
||||
conn, err := NewSystemConnection()
|
||||
if err != nil && os.Geteuid() == 0 {
|
||||
return NewSystemdConnection()
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// NewSystemConnection establishes a connection to the system bus and authenticates.
|
||||
// Callers should call Close() when done with the connection
|
||||
func NewSystemConnection() (*Conn, error) {
|
||||
return NewConnection(func() (*dbus.Conn, error) {
|
||||
return dbusAuthHelloConnection(dbus.SystemBusPrivate)
|
||||
})
|
||||
}
|
||||
|
||||
// NewUserConnection establishes a connection to the session bus and
|
||||
// authenticates. This can be used to connect to systemd user instances.
|
||||
// Callers should call Close() when done with the connection.
|
||||
func NewUserConnection() (*Conn, error) {
|
||||
return NewConnection(func() (*dbus.Conn, error) {
|
||||
return dbusAuthHelloConnection(dbus.SessionBusPrivate)
|
||||
})
|
||||
}
|
||||
|
||||
// NewSystemdConnection establishes a private, direct connection to systemd.
|
||||
// This can be used for communicating with systemd without a dbus daemon.
|
||||
// Callers should call Close() when done with the connection.
|
||||
func NewSystemdConnection() (*Conn, error) {
|
||||
return NewConnection(func() (*dbus.Conn, error) {
|
||||
// We skip Hello when talking directly to systemd.
|
||||
return dbusAuthConnection(func() (*dbus.Conn, error) {
|
||||
return dbus.Dial("unix:path=/run/systemd/private")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Close closes an established connection
|
||||
func (c *Conn) Close() {
|
||||
c.sysconn.Close()
|
||||
c.sigconn.Close()
|
||||
}
|
||||
|
||||
// NewConnection establishes a connection to a bus using a caller-supplied function.
|
||||
// This allows connecting to remote buses through a user-supplied mechanism.
|
||||
// The supplied function may be called multiple times, and should return independent connections.
|
||||
// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded,
|
||||
// and any authentication should be handled by the function.
|
||||
func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) {
|
||||
sysconn, err := dialBus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sigconn, err := dialBus()
|
||||
if err != nil {
|
||||
sysconn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &Conn{
|
||||
sysconn: sysconn,
|
||||
sysobj: systemdObject(sysconn),
|
||||
sigconn: sigconn,
|
||||
sigobj: systemdObject(sigconn),
|
||||
}
|
||||
|
||||
c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64)
|
||||
c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
|
||||
|
||||
// Setup the listeners on jobs so that we can get completions
|
||||
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||
"type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
|
||||
|
||||
c.dispatch()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
|
||||
// interface. The value is returned in its string representation, as defined at
|
||||
// https://developer.gnome.org/glib/unstable/gvariant-text.html
|
||||
func (c *Conn) GetManagerProperty(prop string) (string, error) {
|
||||
variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return variant.String(), nil
|
||||
}
|
||||
|
||||
func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
|
||||
conn, err := createBus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only use EXTERNAL method, and hardcode the uid (not username)
|
||||
// to avoid a username lookup (which requires a dynamically linked
|
||||
// libc)
|
||||
methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
|
||||
|
||||
err = conn.Auth(methods)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
|
||||
conn, err := dbusAuthConnection(createBus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = conn.Hello(); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func systemdObject(conn *dbus.Conn) dbus.BusObject {
|
||||
return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
|
||||
}
|
||||
592
vendor/github.com/coreos/go-systemd/dbus/methods.go
generated
vendored
592
vendor/github.com/coreos/go-systemd/dbus/methods.go
generated
vendored
@@ -1,592 +0,0 @@
|
||||
// Copyright 2015, 2018 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
func (c *Conn) jobComplete(signal *dbus.Signal) {
|
||||
var id uint32
|
||||
var job dbus.ObjectPath
|
||||
var unit string
|
||||
var result string
|
||||
dbus.Store(signal.Body, &id, &job, &unit, &result)
|
||||
c.jobListener.Lock()
|
||||
out, ok := c.jobListener.jobs[job]
|
||||
if ok {
|
||||
out <- result
|
||||
delete(c.jobListener.jobs, job)
|
||||
}
|
||||
c.jobListener.Unlock()
|
||||
}
|
||||
|
||||
func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
|
||||
if ch != nil {
|
||||
c.jobListener.Lock()
|
||||
defer c.jobListener.Unlock()
|
||||
}
|
||||
|
||||
var p dbus.ObjectPath
|
||||
err := c.sysobj.Call(job, 0, args...).Store(&p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if ch != nil {
|
||||
c.jobListener.jobs[p] = ch
|
||||
}
|
||||
|
||||
// ignore error since 0 is fine if conversion fails
|
||||
jobID, _ := strconv.Atoi(path.Base(string(p)))
|
||||
|
||||
return jobID, nil
|
||||
}
|
||||
|
||||
// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
|
||||
// specified by the mode string).
|
||||
//
|
||||
// Takes the unit to activate, plus a mode string. The mode needs to be one of
|
||||
// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
|
||||
// "replace" the call will start the unit and its dependencies, possibly
|
||||
// replacing already queued jobs that conflict with this. If "fail" the call
|
||||
// will start the unit and its dependencies, but will fail if this would change
|
||||
// an already queued job. If "isolate" the call will start the unit in question
|
||||
// and terminate all units that aren't dependencies of it. If
|
||||
// "ignore-dependencies" it will start a unit but ignore all its dependencies.
|
||||
// If "ignore-requirements" it will start a unit but only ignore the
|
||||
// requirement dependencies. It is not recommended to make use of the latter
|
||||
// two options.
|
||||
//
|
||||
// If the provided channel is non-nil, a result string will be sent to it upon
|
||||
// job completion: one of done, canceled, timeout, failed, dependency, skipped.
|
||||
// done indicates successful execution of a job. canceled indicates that a job
|
||||
// has been canceled before it finished execution. timeout indicates that the
|
||||
// job timeout was reached. failed indicates that the job failed. dependency
|
||||
// indicates that a job this job has been depending on failed and the job hence
|
||||
// has been removed too. skipped indicates that a job was skipped because it
|
||||
// didn't apply to the units current state.
|
||||
//
|
||||
// If no error occurs, the ID of the underlying systemd job will be returned. There
|
||||
// does exist the possibility for no error to be returned, but for the returned job
|
||||
// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
|
||||
// should not be considered authoritative.
|
||||
//
|
||||
// If an error does occur, it will be returned to the user alongside a job ID of 0.
|
||||
func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
|
||||
}
|
||||
|
||||
// StopUnit is similar to StartUnit but stops the specified unit rather
|
||||
// than starting it.
|
||||
func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
|
||||
}
|
||||
|
||||
// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
|
||||
func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
|
||||
}
|
||||
|
||||
// RestartUnit restarts a service. If a service is restarted that isn't
|
||||
// running it will be started.
|
||||
func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// TryRestartUnit is like RestartUnit, except that a service that isn't running
|
||||
// is not affected by the restart.
|
||||
func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// ReloadOrRestart attempts a reload if the unit supports it and use a restart
|
||||
// otherwise.
|
||||
func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
|
||||
// flavored restart otherwise.
|
||||
func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
|
||||
}
|
||||
|
||||
// StartTransientUnit() may be used to create and start a transient unit, which
|
||||
// will be released as soon as it is not running or referenced anymore or the
|
||||
// system is rebooted. name is the unit name including suffix, and must be
|
||||
// unique. mode is the same as in StartUnit(), properties contains properties
|
||||
// of the unit.
|
||||
func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
|
||||
return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
|
||||
}
|
||||
|
||||
// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
|
||||
// processes are killed.
|
||||
func (c *Conn) KillUnit(name string, signal int32) {
|
||||
c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
|
||||
}
|
||||
|
||||
// ResetFailedUnit resets the "failed" state of a specific unit.
|
||||
func (c *Conn) ResetFailedUnit(name string) error {
|
||||
return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
|
||||
}
|
||||
|
||||
// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`.
|
||||
func (c *Conn) SystemState() (*Property, error) {
|
||||
var err error
|
||||
var prop dbus.Variant
|
||||
|
||||
obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1")
|
||||
err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Property{Name: "SystemState", Value: prop}, nil
|
||||
}
|
||||
|
||||
// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface
|
||||
func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) {
|
||||
var err error
|
||||
var props map[string]dbus.Variant
|
||||
|
||||
if !path.IsValid() {
|
||||
return nil, fmt.Errorf("invalid unit name: %v", path)
|
||||
}
|
||||
|
||||
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
|
||||
err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := make(map[string]interface{}, len(props))
|
||||
for k, v := range props {
|
||||
out[k] = v.Value()
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties.
|
||||
func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
|
||||
path := unitPath(unit)
|
||||
return c.getProperties(path, "org.freedesktop.systemd1.Unit")
|
||||
}
|
||||
|
||||
// GetUnitProperties takes the (escaped) unit path and returns all of its dbus object properties.
|
||||
func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) {
|
||||
return c.getProperties(path, "org.freedesktop.systemd1.Unit")
|
||||
}
|
||||
|
||||
func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
|
||||
var err error
|
||||
var prop dbus.Variant
|
||||
|
||||
path := unitPath(unit)
|
||||
if !path.IsValid() {
|
||||
return nil, errors.New("invalid unit name: " + unit)
|
||||
}
|
||||
|
||||
obj := c.sysconn.Object("org.freedesktop.systemd1", path)
|
||||
err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Property{Name: propertyName, Value: prop}, nil
|
||||
}
|
||||
|
||||
func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
|
||||
return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
|
||||
}
|
||||
|
||||
// GetServiceProperty returns property for given service name and property name
|
||||
func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) {
|
||||
return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName)
|
||||
}
|
||||
|
||||
// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
|
||||
// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
|
||||
// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
|
||||
func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
|
||||
path := unitPath(unit)
|
||||
return c.getProperties(path, "org.freedesktop.systemd1."+unitType)
|
||||
}
|
||||
|
||||
// SetUnitProperties() may be used to modify certain unit properties at runtime.
|
||||
// Not all properties may be changed at runtime, but many resource management
|
||||
// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
|
||||
// instantly, and stored on disk for future boots, unless runtime is true, in which
|
||||
// case the settings only apply until the next reboot. name is the name of the unit
|
||||
// to modify. properties are the settings to set, encoded as an array of property
|
||||
// name and value pairs.
|
||||
func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
|
||||
return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
|
||||
}
|
||||
|
||||
func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
|
||||
return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
|
||||
}
|
||||
|
||||
type UnitStatus struct {
|
||||
Name string // The primary unit name as string
|
||||
Description string // The human readable description string
|
||||
LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
|
||||
ActiveState string // The active state (i.e. whether the unit is currently started or not)
|
||||
SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
|
||||
Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
|
||||
Path dbus.ObjectPath // The unit object path
|
||||
JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
|
||||
JobType string // The job type as string
|
||||
JobPath dbus.ObjectPath // The job object path
|
||||
}
|
||||
|
||||
type storeFunc func(retvalues ...interface{}) error
|
||||
|
||||
func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := f(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
status := make([]UnitStatus, len(result))
|
||||
statusInterface := make([]interface{}, len(status))
|
||||
for i := range status {
|
||||
statusInterface[i] = &status[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, statusInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// ListUnits returns an array with all currently loaded units. Note that
|
||||
// units may be known by multiple names at the same time, and hence there might
|
||||
// be more unit names loaded than actual units behind them.
|
||||
func (c *Conn) ListUnits() ([]UnitStatus, error) {
|
||||
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store)
|
||||
}
|
||||
|
||||
// ListUnitsFiltered returns an array with units filtered by state.
|
||||
// It takes a list of units' statuses to filter.
|
||||
func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) {
|
||||
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store)
|
||||
}
|
||||
|
||||
// ListUnitsByPatterns returns an array with units.
|
||||
// It takes a list of units' statuses and names to filter.
|
||||
// Note that units may be known by multiple names at the same time,
|
||||
// and hence there might be more unit names loaded than actual units behind them.
|
||||
func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) {
|
||||
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store)
|
||||
}
|
||||
|
||||
// ListUnitsByNames returns an array with units. It takes a list of units'
|
||||
// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns
|
||||
// method, this method returns statuses even for inactive or non-existing
|
||||
// units. Input array should contain exact unit names, but not patterns.
|
||||
// Note: Requires systemd v230 or higher
|
||||
func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) {
|
||||
return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store)
|
||||
}
|
||||
|
||||
type UnitFile struct {
|
||||
Path string
|
||||
Type string
|
||||
}
|
||||
|
||||
func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := f(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
files := make([]UnitFile, len(result))
|
||||
fileInterface := make([]interface{}, len(files))
|
||||
for i := range files {
|
||||
fileInterface[i] = &files[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, fileInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// ListUnitFiles returns an array of all available units on disk.
|
||||
func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
|
||||
return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store)
|
||||
}
|
||||
|
||||
// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns.
|
||||
func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) {
|
||||
return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store)
|
||||
}
|
||||
|
||||
type LinkUnitFileChange EnableUnitFileChange
|
||||
|
||||
// LinkUnitFiles() links unit files (that are located outside of the
|
||||
// usual unit search paths) into the unit search path.
|
||||
//
|
||||
// It takes a list of absolute paths to unit files to link and two
|
||||
// booleans. The first boolean controls whether the unit shall be
|
||||
// enabled for runtime only (true, /run), or persistently (false,
|
||||
// /etc).
|
||||
// The second controls whether symlinks pointing to other units shall
|
||||
// be replaced if necessary.
|
||||
//
|
||||
// This call returns a list of the changes made. The list consists of
|
||||
// structures with three strings: the type of the change (one of symlink
|
||||
// or unlink), the file name of the symlink and the destination of the
|
||||
// symlink.
|
||||
func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]LinkUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// EnableUnitFiles() may be used to enable one or more units in the system (by
|
||||
// creating symlinks to them in /etc or /run).
|
||||
//
|
||||
// It takes a list of unit files to enable (either just file names or full
|
||||
// absolute paths if the unit files are residing outside the usual unit
|
||||
// search paths), and two booleans: the first controls whether the unit shall
|
||||
// be enabled for runtime only (true, /run), or persistently (false, /etc).
|
||||
// The second one controls whether symlinks pointing to other units shall
|
||||
// be replaced if necessary.
|
||||
//
|
||||
// This call returns one boolean and an array with the changes made. The
|
||||
// boolean signals whether the unit files contained any enablement
|
||||
// information (i.e. an [Install]) section. The changes list consists of
|
||||
// structures with three strings: the type of the change (one of symlink
|
||||
// or unlink), the file name of the symlink and the destination of the
|
||||
// symlink.
|
||||
func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
|
||||
var carries_install_info bool
|
||||
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]EnableUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
return carries_install_info, changes, nil
|
||||
}
|
||||
|
||||
type EnableUnitFileChange struct {
|
||||
Type string // Type of the change (one of symlink or unlink)
|
||||
Filename string // File name of the symlink
|
||||
Destination string // Destination of the symlink
|
||||
}
|
||||
|
||||
// DisableUnitFiles() may be used to disable one or more units in the system (by
|
||||
// removing symlinks to them from /etc or /run).
|
||||
//
|
||||
// It takes a list of unit files to disable (either just file names or full
|
||||
// absolute paths if the unit files are residing outside the usual unit
|
||||
// search paths), and one boolean: whether the unit was enabled for runtime
|
||||
// only (true, /run), or persistently (false, /etc).
|
||||
//
|
||||
// This call returns an array with the changes made. The changes list
|
||||
// consists of structures with three strings: the type of the change (one of
|
||||
// symlink or unlink), the file name of the symlink and the destination of the
|
||||
// symlink.
|
||||
func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]DisableUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
type DisableUnitFileChange struct {
|
||||
Type string // Type of the change (one of symlink or unlink)
|
||||
Filename string // File name of the symlink
|
||||
Destination string // Destination of the symlink
|
||||
}
|
||||
|
||||
// MaskUnitFiles masks one or more units in the system
|
||||
//
|
||||
// It takes three arguments:
|
||||
// * list of units to mask (either just file names or full
|
||||
// absolute paths if the unit files are residing outside
|
||||
// the usual unit search paths)
|
||||
// * runtime to specify whether the unit was enabled for runtime
|
||||
// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
|
||||
// * force flag
|
||||
func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]MaskUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
type MaskUnitFileChange struct {
|
||||
Type string // Type of the change (one of symlink or unlink)
|
||||
Filename string // File name of the symlink
|
||||
Destination string // Destination of the symlink
|
||||
}
|
||||
|
||||
// UnmaskUnitFiles unmasks one or more units in the system
|
||||
//
|
||||
// It takes two arguments:
|
||||
// * list of unit files to mask (either just file names or full
|
||||
// absolute paths if the unit files are residing outside
|
||||
// the usual unit search paths)
|
||||
// * runtime to specify whether the unit was enabled for runtime
|
||||
// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
|
||||
func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) {
|
||||
result := make([][]interface{}, 0)
|
||||
err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resultInterface := make([]interface{}, len(result))
|
||||
for i := range result {
|
||||
resultInterface[i] = result[i]
|
||||
}
|
||||
|
||||
changes := make([]UnmaskUnitFileChange, len(result))
|
||||
changesInterface := make([]interface{}, len(changes))
|
||||
for i := range changes {
|
||||
changesInterface[i] = &changes[i]
|
||||
}
|
||||
|
||||
err = dbus.Store(resultInterface, changesInterface...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
type UnmaskUnitFileChange struct {
|
||||
Type string // Type of the change (one of symlink or unlink)
|
||||
Filename string // File name of the symlink
|
||||
Destination string // Destination of the symlink
|
||||
}
|
||||
|
||||
// Reload instructs systemd to scan for and reload unit files. This is
|
||||
// equivalent to a 'systemctl daemon-reload'.
|
||||
func (c *Conn) Reload() error {
|
||||
return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
|
||||
}
|
||||
|
||||
func unitPath(name string) dbus.ObjectPath {
|
||||
return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
|
||||
}
|
||||
|
||||
// unitName returns the unescaped base element of the supplied escaped path
|
||||
func unitName(dpath dbus.ObjectPath) string {
|
||||
return pathBusUnescape(path.Base(string(dpath)))
|
||||
}
|
||||
237
vendor/github.com/coreos/go-systemd/dbus/properties.go
generated
vendored
237
vendor/github.com/coreos/go-systemd/dbus/properties.go
generated
vendored
@@ -1,237 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
// From the systemd docs:
|
||||
//
|
||||
// The properties array of StartTransientUnit() may take many of the settings
|
||||
// that may also be configured in unit files. Not all parameters are currently
|
||||
// accepted though, but we plan to cover more properties with future release.
|
||||
// Currently you may set the Description, Slice and all dependency types of
|
||||
// units, as well as RemainAfterExit, ExecStart for service units,
|
||||
// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
|
||||
// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
|
||||
// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
|
||||
// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
|
||||
// directly to their counterparts in unit files and as normal D-Bus object
|
||||
// properties. The exception here is the PIDs field of scope units which is
|
||||
// used for construction of the scope only and specifies the initial PIDs to
|
||||
// add to the scope object.
|
||||
|
||||
type Property struct {
|
||||
Name string
|
||||
Value dbus.Variant
|
||||
}
|
||||
|
||||
type PropertyCollection struct {
|
||||
Name string
|
||||
Properties []Property
|
||||
}
|
||||
|
||||
type execStart struct {
|
||||
Path string // the binary path to execute
|
||||
Args []string // an array with all arguments to pass to the executed command, starting with argument 0
|
||||
UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
|
||||
}
|
||||
|
||||
// PropExecStart sets the ExecStart service property. The first argument is a
|
||||
// slice with the binary path to execute followed by the arguments to pass to
|
||||
// the executed command. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
|
||||
func PropExecStart(command []string, uncleanIsFailure bool) Property {
|
||||
execStarts := []execStart{
|
||||
execStart{
|
||||
Path: command[0],
|
||||
Args: command,
|
||||
UncleanIsFailure: uncleanIsFailure,
|
||||
},
|
||||
}
|
||||
|
||||
return Property{
|
||||
Name: "ExecStart",
|
||||
Value: dbus.MakeVariant(execStarts),
|
||||
}
|
||||
}
|
||||
|
||||
// PropRemainAfterExit sets the RemainAfterExit service property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
|
||||
func PropRemainAfterExit(b bool) Property {
|
||||
return Property{
|
||||
Name: "RemainAfterExit",
|
||||
Value: dbus.MakeVariant(b),
|
||||
}
|
||||
}
|
||||
|
||||
// PropType sets the Type service property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=
|
||||
func PropType(t string) Property {
|
||||
return Property{
|
||||
Name: "Type",
|
||||
Value: dbus.MakeVariant(t),
|
||||
}
|
||||
}
|
||||
|
||||
// PropDescription sets the Description unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
|
||||
func PropDescription(desc string) Property {
|
||||
return Property{
|
||||
Name: "Description",
|
||||
Value: dbus.MakeVariant(desc),
|
||||
}
|
||||
}
|
||||
|
||||
func propDependency(name string, units []string) Property {
|
||||
return Property{
|
||||
Name: name,
|
||||
Value: dbus.MakeVariant(units),
|
||||
}
|
||||
}
|
||||
|
||||
// PropRequires sets the Requires unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
|
||||
func PropRequires(units ...string) Property {
|
||||
return propDependency("Requires", units)
|
||||
}
|
||||
|
||||
// PropRequiresOverridable sets the RequiresOverridable unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
|
||||
func PropRequiresOverridable(units ...string) Property {
|
||||
return propDependency("RequiresOverridable", units)
|
||||
}
|
||||
|
||||
// PropRequisite sets the Requisite unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
|
||||
func PropRequisite(units ...string) Property {
|
||||
return propDependency("Requisite", units)
|
||||
}
|
||||
|
||||
// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
|
||||
func PropRequisiteOverridable(units ...string) Property {
|
||||
return propDependency("RequisiteOverridable", units)
|
||||
}
|
||||
|
||||
// PropWants sets the Wants unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
|
||||
func PropWants(units ...string) Property {
|
||||
return propDependency("Wants", units)
|
||||
}
|
||||
|
||||
// PropBindsTo sets the BindsTo unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
|
||||
func PropBindsTo(units ...string) Property {
|
||||
return propDependency("BindsTo", units)
|
||||
}
|
||||
|
||||
// PropRequiredBy sets the RequiredBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
|
||||
func PropRequiredBy(units ...string) Property {
|
||||
return propDependency("RequiredBy", units)
|
||||
}
|
||||
|
||||
// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
|
||||
func PropRequiredByOverridable(units ...string) Property {
|
||||
return propDependency("RequiredByOverridable", units)
|
||||
}
|
||||
|
||||
// PropWantedBy sets the WantedBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
|
||||
func PropWantedBy(units ...string) Property {
|
||||
return propDependency("WantedBy", units)
|
||||
}
|
||||
|
||||
// PropBoundBy sets the BoundBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
|
||||
func PropBoundBy(units ...string) Property {
|
||||
return propDependency("BoundBy", units)
|
||||
}
|
||||
|
||||
// PropConflicts sets the Conflicts unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
|
||||
func PropConflicts(units ...string) Property {
|
||||
return propDependency("Conflicts", units)
|
||||
}
|
||||
|
||||
// PropConflictedBy sets the ConflictedBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
|
||||
func PropConflictedBy(units ...string) Property {
|
||||
return propDependency("ConflictedBy", units)
|
||||
}
|
||||
|
||||
// PropBefore sets the Before unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
|
||||
func PropBefore(units ...string) Property {
|
||||
return propDependency("Before", units)
|
||||
}
|
||||
|
||||
// PropAfter sets the After unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
|
||||
func PropAfter(units ...string) Property {
|
||||
return propDependency("After", units)
|
||||
}
|
||||
|
||||
// PropOnFailure sets the OnFailure unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
|
||||
func PropOnFailure(units ...string) Property {
|
||||
return propDependency("OnFailure", units)
|
||||
}
|
||||
|
||||
// PropTriggers sets the Triggers unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
|
||||
func PropTriggers(units ...string) Property {
|
||||
return propDependency("Triggers", units)
|
||||
}
|
||||
|
||||
// PropTriggeredBy sets the TriggeredBy unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
|
||||
func PropTriggeredBy(units ...string) Property {
|
||||
return propDependency("TriggeredBy", units)
|
||||
}
|
||||
|
||||
// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
|
||||
func PropPropagatesReloadTo(units ...string) Property {
|
||||
return propDependency("PropagatesReloadTo", units)
|
||||
}
|
||||
|
||||
// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
|
||||
func PropRequiresMountsFor(units ...string) Property {
|
||||
return propDependency("RequiresMountsFor", units)
|
||||
}
|
||||
|
||||
// PropSlice sets the Slice unit property. See
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
|
||||
func PropSlice(slice string) Property {
|
||||
return Property{
|
||||
Name: "Slice",
|
||||
Value: dbus.MakeVariant(slice),
|
||||
}
|
||||
}
|
||||
|
||||
// PropPids sets the PIDs field of scope units used in the initial construction
|
||||
// of the scope only and specifies the initial PIDs to add to the scope object.
|
||||
// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties
|
||||
func PropPids(pids ...uint32) Property {
|
||||
return Property{
|
||||
Name: "PIDs",
|
||||
Value: dbus.MakeVariant(pids),
|
||||
}
|
||||
}
|
||||
47
vendor/github.com/coreos/go-systemd/dbus/set.go
generated
vendored
47
vendor/github.com/coreos/go-systemd/dbus/set.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
type set struct {
|
||||
data map[string]bool
|
||||
}
|
||||
|
||||
func (s *set) Add(value string) {
|
||||
s.data[value] = true
|
||||
}
|
||||
|
||||
func (s *set) Remove(value string) {
|
||||
delete(s.data, value)
|
||||
}
|
||||
|
||||
func (s *set) Contains(value string) (exists bool) {
|
||||
_, exists = s.data[value]
|
||||
return
|
||||
}
|
||||
|
||||
func (s *set) Length() int {
|
||||
return len(s.data)
|
||||
}
|
||||
|
||||
func (s *set) Values() (values []string) {
|
||||
for val := range s.data {
|
||||
values = append(values, val)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newSet() *set {
|
||||
return &set{make(map[string]bool)}
|
||||
}
|
||||
333
vendor/github.com/coreos/go-systemd/dbus/subscription.go
generated
vendored
333
vendor/github.com/coreos/go-systemd/dbus/subscription.go
generated
vendored
@@ -1,333 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
const (
|
||||
cleanIgnoreInterval = int64(10 * time.Second)
|
||||
ignoreInterval = int64(30 * time.Millisecond)
|
||||
)
|
||||
|
||||
// Subscribe sets up this connection to subscribe to all systemd dbus events.
|
||||
// This is required before calling SubscribeUnits. When the connection closes
|
||||
// systemd will automatically stop sending signals so there is no need to
|
||||
// explicitly call Unsubscribe().
|
||||
func (c *Conn) Subscribe() error {
|
||||
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||
"type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
|
||||
c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||
"type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
|
||||
|
||||
return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
|
||||
}
|
||||
|
||||
// Unsubscribe this connection from systemd dbus events.
|
||||
func (c *Conn) Unsubscribe() error {
|
||||
return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
|
||||
}
|
||||
|
||||
func (c *Conn) dispatch() {
|
||||
ch := make(chan *dbus.Signal, signalBuffer)
|
||||
|
||||
c.sigconn.Signal(ch)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
signal, ok := <-ch
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
|
||||
c.jobComplete(signal)
|
||||
}
|
||||
|
||||
if c.subStateSubscriber.updateCh == nil &&
|
||||
c.propertiesSubscriber.updateCh == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var unitPath dbus.ObjectPath
|
||||
switch signal.Name {
|
||||
case "org.freedesktop.systemd1.Manager.JobRemoved":
|
||||
unitName := signal.Body[2].(string)
|
||||
c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
|
||||
case "org.freedesktop.systemd1.Manager.UnitNew":
|
||||
unitPath = signal.Body[1].(dbus.ObjectPath)
|
||||
case "org.freedesktop.DBus.Properties.PropertiesChanged":
|
||||
if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
|
||||
unitPath = signal.Path
|
||||
|
||||
if len(signal.Body) >= 2 {
|
||||
if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok {
|
||||
c.sendPropertiesUpdate(unitPath, changed)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if unitPath == dbus.ObjectPath("") {
|
||||
continue
|
||||
}
|
||||
|
||||
c.sendSubStateUpdate(unitPath)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Returns two unbuffered channels which will receive all changed units every
|
||||
// interval. Deleted units are sent as nil.
|
||||
func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
|
||||
}
|
||||
|
||||
// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
|
||||
// size of the channels, the comparison function for detecting changes and a filter
|
||||
// function for cutting down on the noise that your channel receives.
|
||||
func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
old := make(map[string]*UnitStatus)
|
||||
statusChan := make(chan map[string]*UnitStatus, buffer)
|
||||
errChan := make(chan error, buffer)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
timerChan := time.After(interval)
|
||||
|
||||
units, err := c.ListUnits()
|
||||
if err == nil {
|
||||
cur := make(map[string]*UnitStatus)
|
||||
for i := range units {
|
||||
if filterUnit != nil && filterUnit(units[i].Name) {
|
||||
continue
|
||||
}
|
||||
cur[units[i].Name] = &units[i]
|
||||
}
|
||||
|
||||
// add all new or changed units
|
||||
changed := make(map[string]*UnitStatus)
|
||||
for n, u := range cur {
|
||||
if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
|
||||
changed[n] = u
|
||||
}
|
||||
delete(old, n)
|
||||
}
|
||||
|
||||
// add all deleted units
|
||||
for oldN := range old {
|
||||
changed[oldN] = nil
|
||||
}
|
||||
|
||||
old = cur
|
||||
|
||||
if len(changed) != 0 {
|
||||
statusChan <- changed
|
||||
}
|
||||
} else {
|
||||
errChan <- err
|
||||
}
|
||||
|
||||
<-timerChan
|
||||
}
|
||||
}()
|
||||
|
||||
return statusChan, errChan
|
||||
}
|
||||
|
||||
type SubStateUpdate struct {
|
||||
UnitName string
|
||||
SubState string
|
||||
}
|
||||
|
||||
// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
|
||||
// Although this writes to updateCh on every state change, the reported state
|
||||
// may be more recent than the change that generated it (due to an unavoidable
|
||||
// race in the systemd dbus interface). That is, this method provides a good
|
||||
// way to keep a current view of all units' states, but is not guaranteed to
|
||||
// show every state transition they go through. Furthermore, state changes
|
||||
// will only be written to the channel with non-blocking writes. If updateCh
|
||||
// is full, it attempts to write an error to errCh; if errCh is full, the error
|
||||
// passes silently.
|
||||
func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
|
||||
if c == nil {
|
||||
msg := "nil receiver"
|
||||
select {
|
||||
case errCh <- errors.New(msg):
|
||||
default:
|
||||
log.Printf("full error channel while reporting: %s\n", msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
c.subStateSubscriber.Lock()
|
||||
defer c.subStateSubscriber.Unlock()
|
||||
c.subStateSubscriber.updateCh = updateCh
|
||||
c.subStateSubscriber.errCh = errCh
|
||||
}
|
||||
|
||||
func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) {
|
||||
c.subStateSubscriber.Lock()
|
||||
defer c.subStateSubscriber.Unlock()
|
||||
|
||||
if c.subStateSubscriber.updateCh == nil {
|
||||
return
|
||||
}
|
||||
|
||||
isIgnored := c.shouldIgnore(unitPath)
|
||||
defer c.cleanIgnore()
|
||||
if isIgnored {
|
||||
return
|
||||
}
|
||||
|
||||
info, err := c.GetUnitPathProperties(unitPath)
|
||||
if err != nil {
|
||||
select {
|
||||
case c.subStateSubscriber.errCh <- err:
|
||||
default:
|
||||
log.Printf("full error channel while reporting: %s\n", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer c.updateIgnore(unitPath, info)
|
||||
|
||||
name, ok := info["Id"].(string)
|
||||
if !ok {
|
||||
msg := "failed to cast info.Id"
|
||||
select {
|
||||
case c.subStateSubscriber.errCh <- errors.New(msg):
|
||||
default:
|
||||
log.Printf("full error channel while reporting: %s\n", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
substate, ok := info["SubState"].(string)
|
||||
if !ok {
|
||||
msg := "failed to cast info.SubState"
|
||||
select {
|
||||
case c.subStateSubscriber.errCh <- errors.New(msg):
|
||||
default:
|
||||
log.Printf("full error channel while reporting: %s\n", msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
update := &SubStateUpdate{name, substate}
|
||||
select {
|
||||
case c.subStateSubscriber.updateCh <- update:
|
||||
default:
|
||||
msg := "update channel is full"
|
||||
select {
|
||||
case c.subStateSubscriber.errCh <- errors.New(msg):
|
||||
default:
|
||||
log.Printf("full error channel while reporting: %s\n", msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The ignore functions work around a wart in the systemd dbus interface.
|
||||
// Requesting the properties of an unloaded unit will cause systemd to send a
|
||||
// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
|
||||
// properties on UnitNew (as that's the only indication of a new unit coming up
|
||||
// for the first time), we would enter an infinite loop if we did not attempt
|
||||
// to detect and ignore these spurious signals. The signal themselves are
|
||||
// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
|
||||
// unloaded unit's signals for a short time after requesting its properties.
|
||||
// This means that we will miss e.g. a transient unit being restarted
|
||||
// *immediately* upon failure and also a transient unit being started
|
||||
// immediately after requesting its status (with systemctl status, for example,
|
||||
// because this causes a UnitNew signal to be sent which then causes us to fetch
|
||||
// the properties).
|
||||
|
||||
func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
|
||||
t, ok := c.subStateSubscriber.ignore[path]
|
||||
return ok && t >= time.Now().UnixNano()
|
||||
}
|
||||
|
||||
func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
|
||||
loadState, ok := info["LoadState"].(string)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// unit is unloaded - it will trigger bad systemd dbus behavior
|
||||
if loadState == "not-found" {
|
||||
c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
|
||||
}
|
||||
}
|
||||
|
||||
// without this, ignore would grow unboundedly over time
|
||||
func (c *Conn) cleanIgnore() {
|
||||
now := time.Now().UnixNano()
|
||||
if c.subStateSubscriber.cleanIgnore < now {
|
||||
c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval
|
||||
|
||||
for p, t := range c.subStateSubscriber.ignore {
|
||||
if t < now {
|
||||
delete(c.subStateSubscriber.ignore, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PropertiesUpdate holds a map of a unit's changed properties
|
||||
type PropertiesUpdate struct {
|
||||
UnitName string
|
||||
Changed map[string]dbus.Variant
|
||||
}
|
||||
|
||||
// SetPropertiesSubscriber writes to updateCh when any unit's properties
|
||||
// change. Every property change reported by systemd will be sent; that is, no
|
||||
// transitions will be "missed" (as they might be with SetSubStateSubscriber).
|
||||
// However, state changes will only be written to the channel with non-blocking
|
||||
// writes. If updateCh is full, it attempts to write an error to errCh; if
|
||||
// errCh is full, the error passes silently.
|
||||
func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) {
|
||||
c.propertiesSubscriber.Lock()
|
||||
defer c.propertiesSubscriber.Unlock()
|
||||
c.propertiesSubscriber.updateCh = updateCh
|
||||
c.propertiesSubscriber.errCh = errCh
|
||||
}
|
||||
|
||||
// we don't need to worry about shouldIgnore() here because
|
||||
// sendPropertiesUpdate doesn't call GetProperties()
|
||||
func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) {
|
||||
c.propertiesSubscriber.Lock()
|
||||
defer c.propertiesSubscriber.Unlock()
|
||||
|
||||
if c.propertiesSubscriber.updateCh == nil {
|
||||
return
|
||||
}
|
||||
|
||||
update := &PropertiesUpdate{unitName(unitPath), changedProps}
|
||||
|
||||
select {
|
||||
case c.propertiesSubscriber.updateCh <- update:
|
||||
default:
|
||||
msg := "update channel is full"
|
||||
select {
|
||||
case c.propertiesSubscriber.errCh <- errors.New(msg):
|
||||
default:
|
||||
log.Printf("full error channel while reporting: %s\n", msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
57
vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
generated
vendored
57
vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
generated
vendored
@@ -1,57 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dbus
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SubscriptionSet returns a subscription set which is like conn.Subscribe but
|
||||
// can filter to only return events for a set of units.
|
||||
type SubscriptionSet struct {
|
||||
*set
|
||||
conn *Conn
|
||||
}
|
||||
|
||||
func (s *SubscriptionSet) filter(unit string) bool {
|
||||
return !s.Contains(unit)
|
||||
}
|
||||
|
||||
// Subscribe starts listening for dbus events for all of the units in the set.
|
||||
// Returns channels identical to conn.SubscribeUnits.
|
||||
func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
|
||||
// TODO: Make fully evented by using systemd 209 with properties changed values
|
||||
return s.conn.SubscribeUnitsCustom(time.Second, 0,
|
||||
mismatchUnitStatus,
|
||||
func(unit string) bool { return s.filter(unit) },
|
||||
)
|
||||
}
|
||||
|
||||
// NewSubscriptionSet returns a new subscription set.
|
||||
func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
|
||||
return &SubscriptionSet{newSet(), conn}
|
||||
}
|
||||
|
||||
// mismatchUnitStatus returns true if the provided UnitStatus objects
|
||||
// are not equivalent. false is returned if the objects are equivalent.
|
||||
// Only the Name, Description and state-related fields are used in
|
||||
// the comparison.
|
||||
func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
|
||||
return u1.Name != u2.Name ||
|
||||
u1.Description != u2.Description ||
|
||||
u1.LoadState != u2.LoadState ||
|
||||
u1.ActiveState != u2.ActiveState ||
|
||||
u1.SubState != u2.SubState
|
||||
}
|
||||
2
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
2
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
@@ -2,7 +2,7 @@ ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
|
||||
187
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
187
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
@@ -16,7 +16,9 @@
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build !js,!appengine,!safe,!disableunsafe
|
||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||
|
||||
package spew
|
||||
|
||||
@@ -34,80 +36,49 @@ const (
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
var (
|
||||
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
|
||||
// internal reflect.Value fields. These values are valid before golang
|
||||
// commit ecccf07e7f9d which changed the format. The are also valid
|
||||
// after commit 82f48826c6c7 which changed the format again to mirror
|
||||
// the original format. Code in the init function updates these offsets
|
||||
// as necessary.
|
||||
offsetPtr = uintptr(ptrSize)
|
||||
offsetScalar = uintptr(0)
|
||||
offsetFlag = uintptr(ptrSize * 2)
|
||||
type flag uintptr
|
||||
|
||||
// flagKindWidth and flagKindShift indicate various bits that the
|
||||
// reflect package uses internally to track kind information.
|
||||
//
|
||||
// flagRO indicates whether or not the value field of a reflect.Value is
|
||||
// read-only.
|
||||
//
|
||||
// flagIndir indicates whether the value field of a reflect.Value is
|
||||
// the actual data or a pointer to the data.
|
||||
//
|
||||
// These values are valid before golang commit 90a7c3c86944 which
|
||||
// changed their positions. Code in the init function updates these
|
||||
// flags as necessary.
|
||||
flagKindWidth = uintptr(5)
|
||||
flagKindShift = uintptr(flagKindWidth - 1)
|
||||
flagRO = uintptr(1 << 0)
|
||||
flagIndir = uintptr(1 << 1)
|
||||
var (
|
||||
// flagRO indicates whether the value field of a reflect.Value
|
||||
// is read-only.
|
||||
flagRO flag
|
||||
|
||||
// flagAddr indicates whether the address of the reflect.Value's
|
||||
// value may be taken.
|
||||
flagAddr flag
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Older versions of reflect.Value stored small integers directly in the
|
||||
// ptr field (which is named val in the older versions). Versions
|
||||
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
|
||||
// scalar for this purpose which unfortunately came before the flag
|
||||
// field, so the offset of the flag field is different for those
|
||||
// versions.
|
||||
//
|
||||
// This code constructs a new reflect.Value from a known small integer
|
||||
// and checks if the size of the reflect.Value struct indicates it has
|
||||
// the scalar field. When it does, the offsets are updated accordingly.
|
||||
vv := reflect.ValueOf(0xf00)
|
||||
if unsafe.Sizeof(vv) == (ptrSize * 4) {
|
||||
offsetScalar = ptrSize * 2
|
||||
offsetFlag = ptrSize * 3
|
||||
}
|
||||
// flagKindMask holds the bits that make up the kind
|
||||
// part of the flags field. In all the supported versions,
|
||||
// it is in the lower 5 bits.
|
||||
const flagKindMask = flag(0x1f)
|
||||
|
||||
// Commit 90a7c3c86944 changed the flag positions such that the low
|
||||
// order bits are the kind. This code extracts the kind from the flags
|
||||
// field and ensures it's the correct type. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are updated
|
||||
// accordingly.
|
||||
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
|
||||
upfv := *(*uintptr)(upf)
|
||||
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
|
||||
flagKindShift = 0
|
||||
flagRO = 1 << 5
|
||||
flagIndir = 1 << 6
|
||||
// Different versions of Go have used different
|
||||
// bit layouts for the flags type. This table
|
||||
// records the known combinations.
|
||||
var okFlags = []struct {
|
||||
ro, addr flag
|
||||
}{{
|
||||
// From Go 1.4 to 1.5
|
||||
ro: 1 << 5,
|
||||
addr: 1 << 7,
|
||||
}, {
|
||||
// Up to Go tip.
|
||||
ro: 1<<5 | 1<<6,
|
||||
addr: 1 << 8,
|
||||
}}
|
||||
|
||||
// Commit adf9b30e5594 modified the flags to separate the
|
||||
// flagRO flag into two bits which specifies whether or not the
|
||||
// field is embedded. This causes flagIndir to move over a bit
|
||||
// and means that flagRO is the combination of either of the
|
||||
// original flagRO bit and the new bit.
|
||||
//
|
||||
// This code detects the change by extracting what used to be
|
||||
// the indirect bit to ensure it's set. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are
|
||||
// updated accordingly.
|
||||
if upfv&flagIndir == 0 {
|
||||
flagRO = 3 << 5
|
||||
flagIndir = 1 << 7
|
||||
}
|
||||
var flagValOffset = func() uintptr {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return field.Offset
|
||||
}()
|
||||
|
||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||
func flagField(v *reflect.Value) *flag {
|
||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
@@ -119,34 +90,56 @@ func init() {
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
|
||||
indirects := 1
|
||||
vt := v.Type()
|
||||
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
|
||||
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
|
||||
if rvf&flagIndir != 0 {
|
||||
vt = reflect.PtrTo(v.Type())
|
||||
indirects++
|
||||
} else if offsetScalar != 0 {
|
||||
// The value is in the scalar field when it's not one of the
|
||||
// reference types.
|
||||
switch vt.Kind() {
|
||||
case reflect.Uintptr:
|
||||
case reflect.Chan:
|
||||
case reflect.Func:
|
||||
case reflect.Map:
|
||||
case reflect.Ptr:
|
||||
case reflect.UnsafePointer:
|
||||
default:
|
||||
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
|
||||
offsetScalar)
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||
return v
|
||||
}
|
||||
flagFieldPtr := flagField(&v)
|
||||
*flagFieldPtr &^= flagRO
|
||||
*flagFieldPtr |= flagAddr
|
||||
return v
|
||||
}
|
||||
|
||||
// Sanity checks against future reflect package changes
|
||||
// to the type or semantics of the Value.flag field.
|
||||
func init() {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||
panic("reflect.Value flag field has changed kind")
|
||||
}
|
||||
type t0 int
|
||||
var t struct {
|
||||
A t0
|
||||
// t0 will have flagEmbedRO set.
|
||||
t0
|
||||
// a will have flagStickyRO set
|
||||
a t0
|
||||
}
|
||||
vA := reflect.ValueOf(t).FieldByName("A")
|
||||
va := reflect.ValueOf(t).FieldByName("a")
|
||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||
|
||||
// Infer flagRO from the difference between the flags
|
||||
// for the (otherwise identical) fields in t.
|
||||
flagPublic := *flagField(&vA)
|
||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||
flagRO = flagPublic ^ flagWithRO
|
||||
|
||||
// Infer flagAddr from the difference between a value
|
||||
// taken from a pointer and not.
|
||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||
flagNoPtr := *flagField(&vA)
|
||||
flagPtr := *flagField(&vPtrA)
|
||||
flagAddr = flagNoPtr ^ flagPtr
|
||||
|
||||
// Check that the inferred flags tally with one of the known versions.
|
||||
for _, f := range okFlags {
|
||||
if flagRO == f.ro && flagAddr == f.addr {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pv := reflect.NewAt(vt, upv)
|
||||
rv = pv
|
||||
for i := 0; i < indirects; i++ {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
return rv
|
||||
panic("reflect.Value read-only flag has changed semantics")
|
||||
}
|
||||
|
||||
2
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
2
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
@@ -16,7 +16,7 @@
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe
|
||||
// +build js appengine safe disableunsafe !go1.4
|
||||
|
||||
package spew
|
||||
|
||||
|
||||
10
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
10
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
@@ -35,16 +35,16 @@ var (
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
|
||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
|
||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
|
||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
@@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound == true:
|
||||
case nilFound:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
case cycleFound:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
|
||||
4
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
4
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
@@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound == true:
|
||||
case nilFound:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
case cycleFound:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
|
||||
2
vendor/github.com/go-ini/ini/LICENSE
generated
vendored
2
vendor/github.com/go-ini/ini/LICENSE
generated
vendored
@@ -176,7 +176,7 @@ recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright 2014 Unknwon
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
414
vendor/github.com/go-ini/ini/file.go
generated
vendored
Normal file
414
vendor/github.com/go-ini/ini/file.go
generated
vendored
Normal file
@@ -0,0 +1,414 @@
|
||||
// Copyright 2017 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// File represents a combination of a or more INI file(s) in memory.
|
||||
type File struct {
|
||||
options LoadOptions
|
||||
dataSources []dataSource
|
||||
|
||||
// Should make things safe, but sometimes doesn't matter.
|
||||
BlockMode bool
|
||||
lock sync.RWMutex
|
||||
|
||||
// To keep data in order.
|
||||
sectionList []string
|
||||
// Actual data is stored here.
|
||||
sections map[string]*Section
|
||||
|
||||
NameMapper
|
||||
ValueMapper
|
||||
}
|
||||
|
||||
// newFile initializes File object with given data sources.
|
||||
func newFile(dataSources []dataSource, opts LoadOptions) *File {
|
||||
return &File{
|
||||
BlockMode: true,
|
||||
dataSources: dataSources,
|
||||
sections: make(map[string]*Section),
|
||||
sectionList: make([]string, 0, 10),
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
// Empty returns an empty file object.
|
||||
func Empty() *File {
|
||||
// Ignore error here, we sure our data is good.
|
||||
f, _ := Load([]byte(""))
|
||||
return f
|
||||
}
|
||||
|
||||
// NewSection creates a new section.
|
||||
func (f *File) NewSection(name string) (*Section, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, errors.New("error creating new section: empty section name")
|
||||
} else if f.options.Insensitive && name != DEFAULT_SECTION {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if f.BlockMode {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
if inSlice(name, f.sectionList) {
|
||||
return f.sections[name], nil
|
||||
}
|
||||
|
||||
f.sectionList = append(f.sectionList, name)
|
||||
f.sections[name] = newSection(f, name)
|
||||
return f.sections[name], nil
|
||||
}
|
||||
|
||||
// NewRawSection creates a new section with an unparseable body.
|
||||
func (f *File) NewRawSection(name, body string) (*Section, error) {
|
||||
section, err := f.NewSection(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
section.isRawSection = true
|
||||
section.rawBody = body
|
||||
return section, nil
|
||||
}
|
||||
|
||||
// NewSections creates a list of sections.
|
||||
func (f *File) NewSections(names ...string) (err error) {
|
||||
for _, name := range names {
|
||||
if _, err = f.NewSection(name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSection returns section by given name.
|
||||
func (f *File) GetSection(name string) (*Section, error) {
|
||||
if len(name) == 0 {
|
||||
name = DEFAULT_SECTION
|
||||
}
|
||||
if f.options.Insensitive {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if f.BlockMode {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
}
|
||||
|
||||
sec := f.sections[name]
|
||||
if sec == nil {
|
||||
return nil, fmt.Errorf("section '%s' does not exist", name)
|
||||
}
|
||||
return sec, nil
|
||||
}
|
||||
|
||||
// Section assumes named section exists and returns a zero-value when not.
|
||||
func (f *File) Section(name string) *Section {
|
||||
sec, err := f.GetSection(name)
|
||||
if err != nil {
|
||||
// Note: It's OK here because the only possible error is empty section name,
|
||||
// but if it's empty, this piece of code won't be executed.
|
||||
sec, _ = f.NewSection(name)
|
||||
return sec
|
||||
}
|
||||
return sec
|
||||
}
|
||||
|
||||
// Section returns list of Section.
|
||||
func (f *File) Sections() []*Section {
|
||||
if f.BlockMode {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
}
|
||||
|
||||
sections := make([]*Section, len(f.sectionList))
|
||||
for i, name := range f.sectionList {
|
||||
sections[i] = f.sections[name]
|
||||
}
|
||||
return sections
|
||||
}
|
||||
|
||||
// ChildSections returns a list of child sections of given section name.
|
||||
func (f *File) ChildSections(name string) []*Section {
|
||||
return f.Section(name).ChildSections()
|
||||
}
|
||||
|
||||
// SectionStrings returns list of section names.
|
||||
func (f *File) SectionStrings() []string {
|
||||
list := make([]string, len(f.sectionList))
|
||||
copy(list, f.sectionList)
|
||||
return list
|
||||
}
|
||||
|
||||
// DeleteSection deletes a section.
|
||||
func (f *File) DeleteSection(name string) {
|
||||
if f.BlockMode {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
if len(name) == 0 {
|
||||
name = DEFAULT_SECTION
|
||||
}
|
||||
|
||||
for i, s := range f.sectionList {
|
||||
if s == name {
|
||||
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
|
||||
delete(f.sections, name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) reload(s dataSource) error {
|
||||
r, err := s.ReadCloser()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
return f.parse(r)
|
||||
}
|
||||
|
||||
// Reload reloads and parses all data sources.
|
||||
func (f *File) Reload() (err error) {
|
||||
for _, s := range f.dataSources {
|
||||
if err = f.reload(s); err != nil {
|
||||
// In loose mode, we create an empty default section for nonexistent files.
|
||||
if os.IsNotExist(err) && f.options.Loose {
|
||||
f.parse(bytes.NewBuffer(nil))
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append appends one or more data sources and reloads automatically.
|
||||
func (f *File) Append(source interface{}, others ...interface{}) error {
|
||||
ds, err := parseDataSource(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dataSources = append(f.dataSources, ds)
|
||||
for _, s := range others {
|
||||
ds, err = parseDataSource(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dataSources = append(f.dataSources, ds)
|
||||
}
|
||||
return f.Reload()
|
||||
}
|
||||
|
||||
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
||||
equalSign := "="
|
||||
if PrettyFormat || PrettyEqual {
|
||||
equalSign = " = "
|
||||
}
|
||||
|
||||
// Use buffer to make sure target is safe until finish encoding.
|
||||
buf := bytes.NewBuffer(nil)
|
||||
for i, sname := range f.sectionList {
|
||||
sec := f.Section(sname)
|
||||
if len(sec.Comment) > 0 {
|
||||
// Support multiline comments
|
||||
lines := strings.Split(sec.Comment, LineBreak)
|
||||
for i := range lines {
|
||||
if lines[i][0] != '#' && lines[i][0] != ';' {
|
||||
lines[i] = "; " + lines[i]
|
||||
} else {
|
||||
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
|
||||
}
|
||||
|
||||
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i > 0 || DefaultHeader {
|
||||
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Write nothing if default section is empty
|
||||
if len(sec.keyList) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if sec.isRawSection {
|
||||
if _, err := buf.WriteString(sec.rawBody); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if PrettySection {
|
||||
// Put a line between sections
|
||||
if _, err := buf.WriteString(LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Count and generate alignment length and buffer spaces using the
|
||||
// longest key. Keys may be modifed if they contain certain characters so
|
||||
// we need to take that into account in our calculation.
|
||||
alignLength := 0
|
||||
if PrettyFormat {
|
||||
for _, kname := range sec.keyList {
|
||||
keyLength := len(kname)
|
||||
// First case will surround key by ` and second by """
|
||||
if strings.ContainsAny(kname, "\"=:") {
|
||||
keyLength += 2
|
||||
} else if strings.Contains(kname, "`") {
|
||||
keyLength += 6
|
||||
}
|
||||
|
||||
if keyLength > alignLength {
|
||||
alignLength = keyLength
|
||||
}
|
||||
}
|
||||
}
|
||||
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
|
||||
|
||||
KEY_LIST:
|
||||
for _, kname := range sec.keyList {
|
||||
key := sec.Key(kname)
|
||||
if len(key.Comment) > 0 {
|
||||
if len(indent) > 0 && sname != DEFAULT_SECTION {
|
||||
buf.WriteString(indent)
|
||||
}
|
||||
|
||||
// Support multiline comments
|
||||
lines := strings.Split(key.Comment, LineBreak)
|
||||
for i := range lines {
|
||||
if lines[i][0] != '#' && lines[i][0] != ';' {
|
||||
lines[i] = "; " + lines[i]
|
||||
} else {
|
||||
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
|
||||
}
|
||||
|
||||
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(indent) > 0 && sname != DEFAULT_SECTION {
|
||||
buf.WriteString(indent)
|
||||
}
|
||||
|
||||
switch {
|
||||
case key.isAutoIncrement:
|
||||
kname = "-"
|
||||
case strings.ContainsAny(kname, "\"=:"):
|
||||
kname = "`" + kname + "`"
|
||||
case strings.Contains(kname, "`"):
|
||||
kname = `"""` + kname + `"""`
|
||||
}
|
||||
|
||||
for _, val := range key.ValueWithShadows() {
|
||||
if _, err := buf.WriteString(kname); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if key.isBooleanType {
|
||||
if kname != sec.keyList[len(sec.keyList)-1] {
|
||||
buf.WriteString(LineBreak)
|
||||
}
|
||||
continue KEY_LIST
|
||||
}
|
||||
|
||||
// Write out alignment spaces before "=" sign
|
||||
if PrettyFormat {
|
||||
buf.Write(alignSpaces[:alignLength-len(kname)])
|
||||
}
|
||||
|
||||
// In case key value contains "\n", "`", "\"", "#" or ";"
|
||||
if strings.ContainsAny(val, "\n`") {
|
||||
val = `"""` + val + `"""`
|
||||
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
|
||||
val = "`" + val + "`"
|
||||
}
|
||||
if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, val := range key.nestedValues {
|
||||
if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if PrettySection {
|
||||
// Put a line between sections
|
||||
if _, err := buf.WriteString(LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// WriteToIndent writes content into io.Writer with given indention.
|
||||
// If PrettyFormat has been set to be true,
|
||||
// it will align "=" sign with spaces under each section.
|
||||
func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
|
||||
buf, err := f.writeToBuffer(indent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return buf.WriteTo(w)
|
||||
}
|
||||
|
||||
// WriteTo writes file content into io.Writer.
|
||||
func (f *File) WriteTo(w io.Writer) (int64, error) {
|
||||
return f.WriteToIndent(w, "")
|
||||
}
|
||||
|
||||
// SaveToIndent writes content to file system with given value indention.
|
||||
func (f *File) SaveToIndent(filename, indent string) error {
|
||||
// Note: Because we are truncating with os.Create,
|
||||
// so it's safer to save to a temporary file location and rename afte done.
|
||||
buf, err := f.writeToBuffer(indent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(filename, buf.Bytes(), 0666)
|
||||
}
|
||||
|
||||
// SaveTo writes content to file system.
|
||||
func (f *File) SaveTo(filename string) error {
|
||||
return f.SaveToIndent(filename, "")
|
||||
}
|
||||
405
vendor/github.com/go-ini/ini/ini.go
generated
vendored
405
vendor/github.com/go-ini/ini/ini.go
generated
vendored
@@ -1,3 +1,5 @@
|
||||
// +build go1.6
|
||||
|
||||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
@@ -17,15 +19,12 @@ package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -35,7 +34,7 @@ const (
|
||||
|
||||
// Maximum allowed depth when recursively substituing variable names.
|
||||
_DEPTH_VALUES = 99
|
||||
_VERSION = "1.28.2"
|
||||
_VERSION = "1.38.2"
|
||||
)
|
||||
|
||||
// Version returns current package version literal.
|
||||
@@ -56,6 +55,9 @@ var (
|
||||
// or reduce all possible spaces for compact format.
|
||||
PrettyFormat = true
|
||||
|
||||
// Place spaces around "=" sign even when PrettyFormat is false
|
||||
PrettyEqual = false
|
||||
|
||||
// Explicitly write DEFAULT section header
|
||||
DefaultHeader = false
|
||||
|
||||
@@ -92,18 +94,6 @@ func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
|
||||
return os.Open(s.name)
|
||||
}
|
||||
|
||||
type bytesReadCloser struct {
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.reader.Read(p)
|
||||
}
|
||||
|
||||
func (rc *bytesReadCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sourceData represents an object that contains content in memory.
|
||||
type sourceData struct {
|
||||
data []byte
|
||||
@@ -122,38 +112,6 @@ func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
|
||||
return s.reader, nil
|
||||
}
|
||||
|
||||
// File represents a combination of a or more INI file(s) in memory.
|
||||
type File struct {
|
||||
// Should make things safe, but sometimes doesn't matter.
|
||||
BlockMode bool
|
||||
// Make sure data is safe in multiple goroutines.
|
||||
lock sync.RWMutex
|
||||
|
||||
// Allow combination of multiple data sources.
|
||||
dataSources []dataSource
|
||||
// Actual data is stored here.
|
||||
sections map[string]*Section
|
||||
|
||||
// To keep data in order.
|
||||
sectionList []string
|
||||
|
||||
options LoadOptions
|
||||
|
||||
NameMapper
|
||||
ValueMapper
|
||||
}
|
||||
|
||||
// newFile initializes File object with given data sources.
|
||||
func newFile(dataSources []dataSource, opts LoadOptions) *File {
|
||||
return &File{
|
||||
BlockMode: true,
|
||||
dataSources: dataSources,
|
||||
sections: make(map[string]*Section),
|
||||
sectionList: make([]string, 0, 10),
|
||||
options: opts,
|
||||
}
|
||||
}
|
||||
|
||||
func parseDataSource(source interface{}) (dataSource, error) {
|
||||
switch s := source.(type) {
|
||||
case string:
|
||||
@@ -176,12 +134,34 @@ type LoadOptions struct {
|
||||
IgnoreContinuation bool
|
||||
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
|
||||
IgnoreInlineComment bool
|
||||
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
|
||||
SkipUnrecognizableLines bool
|
||||
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
|
||||
// This type of keys are mostly used in my.cnf.
|
||||
AllowBooleanKeys bool
|
||||
// AllowShadows indicates whether to keep track of keys with same name under same section.
|
||||
AllowShadows bool
|
||||
// Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
|
||||
// AllowNestedValues indicates whether to allow AWS-like nested values.
|
||||
// Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
|
||||
AllowNestedValues bool
|
||||
// AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
|
||||
// Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
|
||||
// Relevant quote: Values can also span multiple lines, as long as they are indented deeper
|
||||
// than the first line of the value.
|
||||
AllowPythonMultilineValues bool
|
||||
// SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
|
||||
// Docs: https://docs.python.org/2/library/configparser.html
|
||||
// Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
|
||||
// In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
|
||||
SpaceBeforeInlineComment bool
|
||||
// UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
|
||||
// when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
|
||||
UnescapeValueDoubleQuotes bool
|
||||
// UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
|
||||
// when value is NOT surrounded by any quotes.
|
||||
// Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
|
||||
UnescapeValueCommentSymbols bool
|
||||
// UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
|
||||
// conform to key/value pairs. Specify the names of those blocks here.
|
||||
UnparseableSections []string
|
||||
}
|
||||
@@ -224,333 +204,8 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
|
||||
}
|
||||
|
||||
// InsensitiveLoad has exactly same functionality as Load function
|
||||
// ShadowLoad has exactly same functionality as Load function
|
||||
// except it allows have shadow keys.
|
||||
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
|
||||
}
|
||||
|
||||
// Empty returns an empty file object.
|
||||
func Empty() *File {
|
||||
// Ignore error here, we sure our data is good.
|
||||
f, _ := Load([]byte(""))
|
||||
return f
|
||||
}
|
||||
|
||||
// NewSection creates a new section.
|
||||
func (f *File) NewSection(name string) (*Section, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, errors.New("error creating new section: empty section name")
|
||||
} else if f.options.Insensitive && name != DEFAULT_SECTION {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if f.BlockMode {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
if inSlice(name, f.sectionList) {
|
||||
return f.sections[name], nil
|
||||
}
|
||||
|
||||
f.sectionList = append(f.sectionList, name)
|
||||
f.sections[name] = newSection(f, name)
|
||||
return f.sections[name], nil
|
||||
}
|
||||
|
||||
// NewRawSection creates a new section with an unparseable body.
|
||||
func (f *File) NewRawSection(name, body string) (*Section, error) {
|
||||
section, err := f.NewSection(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
section.isRawSection = true
|
||||
section.rawBody = body
|
||||
return section, nil
|
||||
}
|
||||
|
||||
// NewSections creates a list of sections.
|
||||
func (f *File) NewSections(names ...string) (err error) {
|
||||
for _, name := range names {
|
||||
if _, err = f.NewSection(name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSection returns section by given name.
|
||||
func (f *File) GetSection(name string) (*Section, error) {
|
||||
if len(name) == 0 {
|
||||
name = DEFAULT_SECTION
|
||||
} else if f.options.Insensitive {
|
||||
name = strings.ToLower(name)
|
||||
}
|
||||
|
||||
if f.BlockMode {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
}
|
||||
|
||||
sec := f.sections[name]
|
||||
if sec == nil {
|
||||
return nil, fmt.Errorf("section '%s' does not exist", name)
|
||||
}
|
||||
return sec, nil
|
||||
}
|
||||
|
||||
// Section assumes named section exists and returns a zero-value when not.
|
||||
func (f *File) Section(name string) *Section {
|
||||
sec, err := f.GetSection(name)
|
||||
if err != nil {
|
||||
// Note: It's OK here because the only possible error is empty section name,
|
||||
// but if it's empty, this piece of code won't be executed.
|
||||
sec, _ = f.NewSection(name)
|
||||
return sec
|
||||
}
|
||||
return sec
|
||||
}
|
||||
|
||||
// Section returns list of Section.
|
||||
func (f *File) Sections() []*Section {
|
||||
sections := make([]*Section, len(f.sectionList))
|
||||
for i := range f.sectionList {
|
||||
sections[i] = f.Section(f.sectionList[i])
|
||||
}
|
||||
return sections
|
||||
}
|
||||
|
||||
// ChildSections returns a list of child sections of given section name.
|
||||
func (f *File) ChildSections(name string) []*Section {
|
||||
return f.Section(name).ChildSections()
|
||||
}
|
||||
|
||||
// SectionStrings returns list of section names.
|
||||
func (f *File) SectionStrings() []string {
|
||||
list := make([]string, len(f.sectionList))
|
||||
copy(list, f.sectionList)
|
||||
return list
|
||||
}
|
||||
|
||||
// DeleteSection deletes a section.
|
||||
func (f *File) DeleteSection(name string) {
|
||||
if f.BlockMode {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
}
|
||||
|
||||
if len(name) == 0 {
|
||||
name = DEFAULT_SECTION
|
||||
}
|
||||
|
||||
for i, s := range f.sectionList {
|
||||
if s == name {
|
||||
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
|
||||
delete(f.sections, name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) reload(s dataSource) error {
|
||||
r, err := s.ReadCloser()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
return f.parse(r)
|
||||
}
|
||||
|
||||
// Reload reloads and parses all data sources.
|
||||
func (f *File) Reload() (err error) {
|
||||
for _, s := range f.dataSources {
|
||||
if err = f.reload(s); err != nil {
|
||||
// In loose mode, we create an empty default section for nonexistent files.
|
||||
if os.IsNotExist(err) && f.options.Loose {
|
||||
f.parse(bytes.NewBuffer(nil))
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append appends one or more data sources and reloads automatically.
|
||||
func (f *File) Append(source interface{}, others ...interface{}) error {
|
||||
ds, err := parseDataSource(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dataSources = append(f.dataSources, ds)
|
||||
for _, s := range others {
|
||||
ds, err = parseDataSource(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dataSources = append(f.dataSources, ds)
|
||||
}
|
||||
return f.Reload()
|
||||
}
|
||||
|
||||
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
||||
equalSign := "="
|
||||
if PrettyFormat {
|
||||
equalSign = " = "
|
||||
}
|
||||
|
||||
// Use buffer to make sure target is safe until finish encoding.
|
||||
buf := bytes.NewBuffer(nil)
|
||||
for i, sname := range f.sectionList {
|
||||
sec := f.Section(sname)
|
||||
if len(sec.Comment) > 0 {
|
||||
if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
|
||||
sec.Comment = "; " + sec.Comment
|
||||
}
|
||||
if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if i > 0 || DefaultHeader {
|
||||
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Write nothing if default section is empty
|
||||
if len(sec.keyList) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if sec.isRawSection {
|
||||
if _, err := buf.WriteString(sec.rawBody); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Count and generate alignment length and buffer spaces using the
|
||||
// longest key. Keys may be modifed if they contain certain characters so
|
||||
// we need to take that into account in our calculation.
|
||||
alignLength := 0
|
||||
if PrettyFormat {
|
||||
for _, kname := range sec.keyList {
|
||||
keyLength := len(kname)
|
||||
// First case will surround key by ` and second by """
|
||||
if strings.ContainsAny(kname, "\"=:") {
|
||||
keyLength += 2
|
||||
} else if strings.Contains(kname, "`") {
|
||||
keyLength += 6
|
||||
}
|
||||
|
||||
if keyLength > alignLength {
|
||||
alignLength = keyLength
|
||||
}
|
||||
}
|
||||
}
|
||||
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
|
||||
|
||||
KEY_LIST:
|
||||
for _, kname := range sec.keyList {
|
||||
key := sec.Key(kname)
|
||||
if len(key.Comment) > 0 {
|
||||
if len(indent) > 0 && sname != DEFAULT_SECTION {
|
||||
buf.WriteString(indent)
|
||||
}
|
||||
if key.Comment[0] != '#' && key.Comment[0] != ';' {
|
||||
key.Comment = "; " + key.Comment
|
||||
}
|
||||
if _, err := buf.WriteString(key.Comment + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(indent) > 0 && sname != DEFAULT_SECTION {
|
||||
buf.WriteString(indent)
|
||||
}
|
||||
|
||||
switch {
|
||||
case key.isAutoIncrement:
|
||||
kname = "-"
|
||||
case strings.ContainsAny(kname, "\"=:"):
|
||||
kname = "`" + kname + "`"
|
||||
case strings.Contains(kname, "`"):
|
||||
kname = `"""` + kname + `"""`
|
||||
}
|
||||
|
||||
for _, val := range key.ValueWithShadows() {
|
||||
if _, err := buf.WriteString(kname); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if key.isBooleanType {
|
||||
if kname != sec.keyList[len(sec.keyList)-1] {
|
||||
buf.WriteString(LineBreak)
|
||||
}
|
||||
continue KEY_LIST
|
||||
}
|
||||
|
||||
// Write out alignment spaces before "=" sign
|
||||
if PrettyFormat {
|
||||
buf.Write(alignSpaces[:alignLength-len(kname)])
|
||||
}
|
||||
|
||||
// In case key value contains "\n", "`", "\"", "#" or ";"
|
||||
if strings.ContainsAny(val, "\n`") {
|
||||
val = `"""` + val + `"""`
|
||||
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
|
||||
val = "`" + val + "`"
|
||||
}
|
||||
if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if PrettySection {
|
||||
// Put a line between sections
|
||||
if _, err := buf.WriteString(LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// WriteToIndent writes content into io.Writer with given indention.
|
||||
// If PrettyFormat has been set to be true,
|
||||
// it will align "=" sign with spaces under each section.
|
||||
func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
|
||||
buf, err := f.writeToBuffer(indent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return buf.WriteTo(w)
|
||||
}
|
||||
|
||||
// WriteTo writes file content into io.Writer.
|
||||
func (f *File) WriteTo(w io.Writer) (int64, error) {
|
||||
return f.WriteToIndent(w, "")
|
||||
}
|
||||
|
||||
// SaveToIndent writes content to file system with given value indention.
|
||||
func (f *File) SaveToIndent(filename, indent string) error {
|
||||
// Note: Because we are truncating with os.Create,
|
||||
// so it's safer to save to a temporary file location and rename afte done.
|
||||
buf, err := f.writeToBuffer(indent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(filename, buf.Bytes(), 0666)
|
||||
}
|
||||
|
||||
// SaveTo writes content to file system.
|
||||
func (f *File) SaveTo(filename string) error {
|
||||
return f.SaveToIndent(filename, "")
|
||||
}
|
||||
|
||||
64
vendor/github.com/go-ini/ini/key.go
generated
vendored
64
vendor/github.com/go-ini/ini/key.go
generated
vendored
@@ -15,6 +15,7 @@
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
// Key represents a key under a section.
|
||||
type Key struct {
|
||||
s *Section
|
||||
Comment string
|
||||
name string
|
||||
value string
|
||||
isAutoIncrement bool
|
||||
@@ -33,7 +35,7 @@ type Key struct {
|
||||
isShadow bool
|
||||
shadows []*Key
|
||||
|
||||
Comment string
|
||||
nestedValues []string
|
||||
}
|
||||
|
||||
// newKey simply return a key object with given values.
|
||||
@@ -66,6 +68,22 @@ func (k *Key) AddShadow(val string) error {
|
||||
return k.addShadow(val)
|
||||
}
|
||||
|
||||
func (k *Key) addNestedValue(val string) error {
|
||||
if k.isAutoIncrement || k.isBooleanType {
|
||||
return errors.New("cannot add nested value to auto-increment or boolean key")
|
||||
}
|
||||
|
||||
k.nestedValues = append(k.nestedValues, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Key) AddNestedValue(val string) error {
|
||||
if !k.s.f.options.AllowNestedValues {
|
||||
return errors.New("nested value is not allowed")
|
||||
}
|
||||
return k.addNestedValue(val)
|
||||
}
|
||||
|
||||
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
|
||||
type ValueMapper func(string) string
|
||||
|
||||
@@ -92,6 +110,12 @@ func (k *Key) ValueWithShadows() []string {
|
||||
return vals
|
||||
}
|
||||
|
||||
// NestedValues returns nested values stored in the key.
|
||||
// It is possible returned value is nil if no nested values stored in the key.
|
||||
func (k *Key) NestedValues() []string {
|
||||
return k.nestedValues
|
||||
}
|
||||
|
||||
// transformValue takes a raw value and transforms to its final string.
|
||||
func (k *Key) transformValue(val string) string {
|
||||
if k.s.f.ValueMapper != nil {
|
||||
@@ -114,7 +138,7 @@ func (k *Key) transformValue(val string) string {
|
||||
|
||||
// Search in the same section.
|
||||
nk, err := k.s.GetKey(noption)
|
||||
if err != nil {
|
||||
if err != nil || k == nk {
|
||||
// Search again in default section.
|
||||
nk, _ = k.s.f.Section("").GetKey(noption)
|
||||
}
|
||||
@@ -444,11 +468,39 @@ func (k *Key) Strings(delim string) []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
vals := strings.Split(str, delim)
|
||||
for i := range vals {
|
||||
// vals[i] = k.transformValue(strings.TrimSpace(vals[i]))
|
||||
vals[i] = strings.TrimSpace(vals[i])
|
||||
runes := []rune(str)
|
||||
vals := make([]string, 0, 2)
|
||||
var buf bytes.Buffer
|
||||
escape := false
|
||||
idx := 0
|
||||
for {
|
||||
if escape {
|
||||
escape = false
|
||||
if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
|
||||
buf.WriteRune('\\')
|
||||
}
|
||||
buf.WriteRune(runes[idx])
|
||||
} else {
|
||||
if runes[idx] == '\\' {
|
||||
escape = true
|
||||
} else if strings.HasPrefix(string(runes[idx:]), delim) {
|
||||
idx += len(delim) - 1
|
||||
vals = append(vals, strings.TrimSpace(buf.String()))
|
||||
buf.Reset()
|
||||
} else {
|
||||
buf.WriteRune(runes[idx])
|
||||
}
|
||||
}
|
||||
idx += 1
|
||||
if idx == len(runes) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if buf.Len() > 0 {
|
||||
vals = append(vals, strings.TrimSpace(buf.String()))
|
||||
}
|
||||
|
||||
return vals
|
||||
}
|
||||
|
||||
|
||||
171
vendor/github.com/go-ini/ini/parser.go
generated
vendored
171
vendor/github.com/go-ini/ini/parser.go
generated
vendored
@@ -19,11 +19,14 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)")
|
||||
|
||||
type tokenType int
|
||||
|
||||
const (
|
||||
@@ -193,7 +196,10 @@ func hasSurroundedQuote(in string, quote byte) bool {
|
||||
strings.IndexByte(in[1:], quote) == len(in)-2
|
||||
}
|
||||
|
||||
func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) {
|
||||
func (p *parser) readValue(in []byte,
|
||||
parserBufferSize int,
|
||||
ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols, allowPythonMultilines, spaceBeforeInlineComment bool) (string, error) {
|
||||
|
||||
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
|
||||
if len(line) == 0 {
|
||||
return "", nil
|
||||
@@ -204,6 +210,8 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo
|
||||
valQuote = `"""`
|
||||
} else if line[0] == '`' {
|
||||
valQuote = "`"
|
||||
} else if unescapeValueDoubleQuotes && line[0] == '"' {
|
||||
valQuote = `"`
|
||||
}
|
||||
|
||||
if len(valQuote) > 0 {
|
||||
@@ -214,31 +222,97 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo
|
||||
return p.readMultilines(line, line[startIdx:], valQuote)
|
||||
}
|
||||
|
||||
if unescapeValueDoubleQuotes && valQuote == `"` {
|
||||
return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
|
||||
}
|
||||
return line[startIdx : pos+startIdx], nil
|
||||
}
|
||||
|
||||
lastChar := line[len(line)-1]
|
||||
// Won't be able to reach here if value only contains whitespace
|
||||
line = strings.TrimSpace(line)
|
||||
trimmedLastChar := line[len(line)-1]
|
||||
|
||||
// Check continuation lines when desired
|
||||
if !ignoreContinuation && line[len(line)-1] == '\\' {
|
||||
if !ignoreContinuation && trimmedLastChar == '\\' {
|
||||
return p.readContinuationLines(line[:len(line)-1])
|
||||
}
|
||||
|
||||
// Check if ignore inline comment
|
||||
if !ignoreInlineComment {
|
||||
i := strings.IndexAny(line, "#;")
|
||||
var i int
|
||||
if spaceBeforeInlineComment {
|
||||
i = strings.Index(line, " #")
|
||||
if i == -1 {
|
||||
i = strings.Index(line, " ;")
|
||||
}
|
||||
|
||||
} else {
|
||||
i = strings.IndexAny(line, "#;")
|
||||
}
|
||||
|
||||
if i > -1 {
|
||||
p.comment.WriteString(line[i:])
|
||||
line = strings.TrimSpace(line[:i])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Trim single quotes
|
||||
// Trim single and double quotes
|
||||
if hasSurroundedQuote(line, '\'') ||
|
||||
hasSurroundedQuote(line, '"') {
|
||||
line = line[1 : len(line)-1]
|
||||
} else if len(valQuote) == 0 && unescapeValueCommentSymbols {
|
||||
if strings.Contains(line, `\;`) {
|
||||
line = strings.Replace(line, `\;`, ";", -1)
|
||||
}
|
||||
if strings.Contains(line, `\#`) {
|
||||
line = strings.Replace(line, `\#`, "#", -1)
|
||||
}
|
||||
} else if allowPythonMultilines && lastChar == '\n' {
|
||||
parserBufferPeekResult, _ := p.buf.Peek(parserBufferSize)
|
||||
peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
|
||||
|
||||
identSize := -1
|
||||
val := line
|
||||
|
||||
for {
|
||||
peekData, peekErr := peekBuffer.ReadBytes('\n')
|
||||
if peekErr != nil {
|
||||
if peekErr == io.EOF {
|
||||
return val, nil
|
||||
}
|
||||
return "", peekErr
|
||||
}
|
||||
|
||||
peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
|
||||
if len(peekMatches) != 3 {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
currentIdentSize := len(peekMatches[1])
|
||||
// NOTE: Return if not a python-ini multi-line value.
|
||||
if currentIdentSize < 0 {
|
||||
return val, nil
|
||||
}
|
||||
identSize = currentIdentSize
|
||||
|
||||
// NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer.
|
||||
_, err := p.readUntil('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
val += fmt.Sprintf("\n%s", peekMatches[2])
|
||||
}
|
||||
|
||||
// NOTE: If it was a Python multi-line value,
|
||||
// return the appended value.
|
||||
if identSize > 0 {
|
||||
return val, nil
|
||||
}
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
||||
|
||||
@@ -250,16 +324,54 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
}
|
||||
|
||||
// Ignore error because default section name is never empty string.
|
||||
section, _ := f.NewSection(DEFAULT_SECTION)
|
||||
name := DEFAULT_SECTION
|
||||
if f.options.Insensitive {
|
||||
name = strings.ToLower(DEFAULT_SECTION)
|
||||
}
|
||||
section, _ := f.NewSection(name)
|
||||
|
||||
// This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
|
||||
var isLastValueEmpty bool
|
||||
var lastRegularKey *Key
|
||||
|
||||
var line []byte
|
||||
var inUnparseableSection bool
|
||||
|
||||
// NOTE: Iterate and increase `currentPeekSize` until
|
||||
// the size of the parser buffer is found.
|
||||
// TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
|
||||
parserBufferSize := 0
|
||||
// NOTE: Peek 1kb at a time.
|
||||
currentPeekSize := 1024
|
||||
|
||||
if f.options.AllowPythonMultilineValues {
|
||||
for {
|
||||
peekBytes, _ := p.buf.Peek(currentPeekSize)
|
||||
peekBytesLength := len(peekBytes)
|
||||
|
||||
if parserBufferSize >= peekBytesLength {
|
||||
break
|
||||
}
|
||||
|
||||
currentPeekSize *= 2
|
||||
parserBufferSize = peekBytesLength
|
||||
}
|
||||
}
|
||||
|
||||
for !p.isEOF {
|
||||
line, err = p.readUntil('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if f.options.AllowNestedValues &&
|
||||
isLastValueEmpty && len(line) > 0 {
|
||||
if line[0] == ' ' || line[0] == '\t' {
|
||||
lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
@@ -277,8 +389,7 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
// Section
|
||||
if line[0] == '[' {
|
||||
// Read to the next ']' (TODO: support quoted strings)
|
||||
// TODO(unknwon): use LastIndexByte when stop supporting Go1.4
|
||||
closeIdx := bytes.LastIndex(line, []byte("]"))
|
||||
closeIdx := bytes.LastIndexByte(line, ']')
|
||||
if closeIdx == -1 {
|
||||
return fmt.Errorf("unclosed section: %s", line)
|
||||
}
|
||||
@@ -320,18 +431,31 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
kname, offset, err := readKeyName(line)
|
||||
if err != nil {
|
||||
// Treat as boolean key when desired, and whole line is key name.
|
||||
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
|
||||
kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
|
||||
if err != nil {
|
||||
return err
|
||||
if IsErrDelimiterNotFound(err) {
|
||||
switch {
|
||||
case f.options.AllowBooleanKeys:
|
||||
kname, err := p.readValue(line,
|
||||
parserBufferSize,
|
||||
f.options.IgnoreContinuation,
|
||||
f.options.IgnoreInlineComment,
|
||||
f.options.UnescapeValueDoubleQuotes,
|
||||
f.options.UnescapeValueCommentSymbols,
|
||||
f.options.AllowPythonMultilineValues,
|
||||
f.options.SpaceBeforeInlineComment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key, err := section.NewBooleanKey(kname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key.Comment = strings.TrimSpace(p.comment.String())
|
||||
p.comment.Reset()
|
||||
continue
|
||||
|
||||
case f.options.SkipUnrecognizableLines:
|
||||
continue
|
||||
}
|
||||
key, err := section.NewBooleanKey(kname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key.Comment = strings.TrimSpace(p.comment.String())
|
||||
p.comment.Reset()
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -344,10 +468,18 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
p.count++
|
||||
}
|
||||
|
||||
value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
|
||||
value, err := p.readValue(line[offset:],
|
||||
parserBufferSize,
|
||||
f.options.IgnoreContinuation,
|
||||
f.options.IgnoreInlineComment,
|
||||
f.options.UnescapeValueDoubleQuotes,
|
||||
f.options.UnescapeValueCommentSymbols,
|
||||
f.options.AllowPythonMultilineValues,
|
||||
f.options.SpaceBeforeInlineComment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isLastValueEmpty = len(value) == 0
|
||||
|
||||
key, err := section.NewKey(kname, value)
|
||||
if err != nil {
|
||||
@@ -356,6 +488,7 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
key.isAutoIncrement = isAutoIncr
|
||||
key.Comment = strings.TrimSpace(p.comment.String())
|
||||
p.comment.Reset()
|
||||
lastRegularKey = key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
10
vendor/github.com/go-ini/ini/section.go
generated
vendored
10
vendor/github.com/go-ini/ini/section.go
generated
vendored
@@ -54,6 +54,14 @@ func (s *Section) Body() string {
|
||||
return strings.TrimSpace(s.rawBody)
|
||||
}
|
||||
|
||||
// SetBody updates body content only if section is raw.
|
||||
func (s *Section) SetBody(body string) {
|
||||
if !s.isRawSection {
|
||||
return
|
||||
}
|
||||
s.rawBody = body
|
||||
}
|
||||
|
||||
// NewKey creates a new key to given section.
|
||||
func (s *Section) NewKey(name, val string) (*Key, error) {
|
||||
if len(name) == 0 {
|
||||
@@ -74,6 +82,7 @@ func (s *Section) NewKey(name, val string) (*Key, error) {
|
||||
}
|
||||
} else {
|
||||
s.keys[name].value = val
|
||||
s.keysHash[name] = val
|
||||
}
|
||||
return s.keys[name], nil
|
||||
}
|
||||
@@ -136,6 +145,7 @@ func (s *Section) HasKey(name string) bool {
|
||||
}
|
||||
|
||||
// Haskey is a backwards-compatible name for HasKey.
|
||||
// TODO: delete me in v2
|
||||
func (s *Section) Haskey(name string) bool {
|
||||
return s.HasKey(name)
|
||||
}
|
||||
|
||||
16
vendor/github.com/go-ini/ini/struct.go
generated
vendored
16
vendor/github.com/go-ini/ini/struct.go
generated
vendored
@@ -113,7 +113,7 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowSh
|
||||
default:
|
||||
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
|
||||
}
|
||||
if isStrict {
|
||||
if err != nil && isStrict {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
durationVal, err := key.Duration()
|
||||
// Skip zero value
|
||||
if err == nil && int(durationVal) > 0 {
|
||||
if err == nil && int64(durationVal) > 0 {
|
||||
field.Set(reflect.ValueOf(durationVal))
|
||||
return nil
|
||||
}
|
||||
@@ -450,6 +450,12 @@ func (s *Section) reflectFrom(val reflect.Value) error {
|
||||
// Note: fieldName can never be empty here, ignore error.
|
||||
sec, _ = s.f.NewSection(fieldName)
|
||||
}
|
||||
|
||||
// Add comment from comment tag
|
||||
if len(sec.Comment) == 0 {
|
||||
sec.Comment = tpField.Tag.Get("comment")
|
||||
}
|
||||
|
||||
if err = sec.reflectFrom(field); err != nil {
|
||||
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
|
||||
}
|
||||
@@ -461,6 +467,12 @@ func (s *Section) reflectFrom(val reflect.Value) error {
|
||||
if err != nil {
|
||||
key, _ = s.NewKey(fieldName, "")
|
||||
}
|
||||
|
||||
// Add comment from comment tag
|
||||
if len(key.Comment) == 0 {
|
||||
key.Comment = tpField.Tag.Get("comment")
|
||||
}
|
||||
|
||||
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
|
||||
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
|
||||
}
|
||||
|
||||
14
vendor/github.com/gogo/protobuf/AUTHORS
generated
vendored
Normal file
14
vendor/github.com/gogo/protobuf/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# This is the official list of GoGo authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS file, which
|
||||
# lists people. For example, employees are listed in CONTRIBUTORS,
|
||||
# but not in AUTHORS, because the employer holds the copyright.
|
||||
|
||||
# Names should be added to this file as one of
|
||||
# Organization's name
|
||||
# Individual's name <submission email address>
|
||||
# Individual's name <submission email address> <email2> <emailN>
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Vastech SA (PTY) LTD
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
||||
18
vendor/github.com/gogo/protobuf/CONTRIBUTORS
generated
vendored
Normal file
18
vendor/github.com/gogo/protobuf/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
Anton Povarov <anton.povarov@gmail.com>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
Denis Smirnov <denis.smirnov.91@gmail.com>
|
||||
DongYun Kang <ceram1000@gmail.com>
|
||||
Dwayne Schultz <dschultz@pivotal.io>
|
||||
Georg Apitz <gapitz@pivotal.io>
|
||||
Gustav Paul <gustav.paul@gmail.com>
|
||||
Johan Brandhorst <johan.brandhorst@gmail.com>
|
||||
John Shahid <jvshahid@gmail.com>
|
||||
John Tuley <john@tuley.org>
|
||||
Laurent <laurent@adyoulike.com>
|
||||
Patrick Lee <patrick@dropbox.com>
|
||||
Sergio Arbeo <serabe@gmail.com>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Tamir Duberstein <tamird@gmail.com>
|
||||
Todd Eisenberger <teisenberger@dropbox.com>
|
||||
Tormod Erevik Lea <tormodlea@gmail.com>
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
||||
5
vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS
generated
vendored
Normal file
5
vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
The contributors to the Go protobuf repository:
|
||||
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
2
vendor/github.com/jmespath/go-jmespath/api.go
generated
vendored
2
vendor/github.com/jmespath/go-jmespath/api.go
generated
vendored
@@ -2,7 +2,7 @@ package jmespath
|
||||
|
||||
import "strconv"
|
||||
|
||||
// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is
|
||||
// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
|
||||
// safe for concurrent use by multiple goroutines.
|
||||
type JMESPath struct {
|
||||
ast ASTNode
|
||||
|
||||
15
vendor/github.com/mattn/go-isatty/isatty_appengine.go
generated
vendored
Normal file
15
vendor/github.com/mattn/go-isatty/isatty_appengine.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// +build appengine
|
||||
|
||||
package isatty
|
||||
|
||||
// IsTerminal returns true if the file descriptor is terminal which
|
||||
// is always false on on appengine classic which is a sandboxed PaaS.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
6
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
6
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
@@ -16,9 +16,3 @@ func IsTerminal(fd uintptr) bool {
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
6
vendor/github.com/mattn/go-isatty/isatty_linux.go
generated
vendored
6
vendor/github.com/mattn/go-isatty/isatty_linux.go
generated
vendored
@@ -16,9 +16,3 @@ func IsTerminal(fd uintptr) bool {
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
6
vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
generated
vendored
6
vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go
generated
vendored
@@ -17,9 +17,3 @@ func IsTerminal(fd uintptr) bool {
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
11
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
11
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
@@ -1,14 +1,9 @@
|
||||
// +build appengine js
|
||||
// +build !windows
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
// IsTerminal returns true if the file descriptor is terminal which
|
||||
// is always false on js and appengine classic which is a sandboxed PaaS.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
|
||||
6
vendor/github.com/mattn/go-isatty/isatty_solaris.go
generated
vendored
6
vendor/github.com/mattn/go-isatty/isatty_solaris.go
generated
vendored
@@ -14,9 +14,3 @@ func IsTerminal(fd uintptr) bool {
|
||||
err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||
// terminal. This is also always false on this environment.
|
||||
func IsCygwinTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
1
vendor/github.com/pborman/uuid/CONTRIBUTORS
generated
vendored
Normal file
1
vendor/github.com/pborman/uuid/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Paul Borman <borman@google.com>
|
||||
4
vendor/github.com/petar/GoLLRB/AUTHORS
generated
vendored
Normal file
4
vendor/github.com/petar/GoLLRB/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
Petar Maymounkov <petar@5ttt.org>
|
||||
Vadim Vygonets <vadik@vygo.net>
|
||||
Ian Smith <iansmith@acm.org>
|
||||
Martin Bruse
|
||||
27
vendor/github.com/petar/GoLLRB/LICENSE
generated
vendored
Normal file
27
vendor/github.com/petar/GoLLRB/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2010, Petar Maymounkov
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
(*) Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
|
||||
(*) Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
(*) Neither the name of Petar Maymounkov nor the names of its contributors may be
|
||||
used to endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
39
vendor/github.com/petar/GoLLRB/llrb/avgvar.go
generated
vendored
Normal file
39
vendor/github.com/petar/GoLLRB/llrb/avgvar.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package llrb
|
||||
|
||||
import "math"
|
||||
|
||||
// avgVar maintains the average and variance of a stream of numbers
|
||||
// in a space-efficient manner.
|
||||
type avgVar struct {
|
||||
count int64
|
||||
sum, sumsq float64
|
||||
}
|
||||
|
||||
func (av *avgVar) Init() {
|
||||
av.count = 0
|
||||
av.sum = 0.0
|
||||
av.sumsq = 0.0
|
||||
}
|
||||
|
||||
func (av *avgVar) Add(sample float64) {
|
||||
av.count++
|
||||
av.sum += sample
|
||||
av.sumsq += sample * sample
|
||||
}
|
||||
|
||||
func (av *avgVar) GetCount() int64 { return av.count }
|
||||
|
||||
func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) }
|
||||
|
||||
func (av *avgVar) GetTotal() float64 { return av.sum }
|
||||
|
||||
func (av *avgVar) GetVar() float64 {
|
||||
a := av.GetAvg()
|
||||
return av.sumsq/float64(av.count) - a*a
|
||||
}
|
||||
|
||||
func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) }
|
||||
93
vendor/github.com/petar/GoLLRB/llrb/iterator.go
generated
vendored
Normal file
93
vendor/github.com/petar/GoLLRB/llrb/iterator.go
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
package llrb
|
||||
|
||||
type ItemIterator func(i Item) bool
|
||||
|
||||
//func (t *Tree) Ascend(iterator ItemIterator) {
|
||||
// t.AscendGreaterOrEqual(Inf(-1), iterator)
|
||||
//}
|
||||
|
||||
func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
||||
t.ascendRange(t.root, greaterOrEqual, lessThan, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if !less(h.Item, sup) {
|
||||
return t.ascendRange(h.Left, inf, sup, iterator)
|
||||
}
|
||||
if less(h.Item, inf) {
|
||||
return t.ascendRange(h.Right, inf, sup, iterator)
|
||||
}
|
||||
|
||||
if !t.ascendRange(h.Left, inf, sup, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
return t.ascendRange(h.Right, inf, sup, iterator)
|
||||
}
|
||||
|
||||
// AscendGreaterOrEqual will call iterator once for each element greater or equal to
|
||||
// pivot in ascending order. It will stop whenever the iterator returns false.
|
||||
func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
||||
t.ascendGreaterOrEqual(t.root, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if !less(h.Item, pivot) {
|
||||
if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return t.ascendGreaterOrEqual(h.Right, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) {
|
||||
t.ascendLessThan(t.root, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if !t.ascendLessThan(h.Left, pivot, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
if less(h.Item, pivot) {
|
||||
return t.ascendLessThan(h.Left, pivot, iterator)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DescendLessOrEqual will call iterator once for each element less than the
|
||||
// pivot in descending order. It will stop whenever the iterator returns false.
|
||||
func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
|
||||
t.descendLessOrEqual(t.root, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if less(h.Item, pivot) || !less(pivot, h.Item) {
|
||||
if !t.descendLessOrEqual(h.Right, pivot, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return t.descendLessOrEqual(h.Left, pivot, iterator)
|
||||
}
|
||||
46
vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
generated
vendored
Normal file
46
vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package llrb
|
||||
|
||||
// GetHeight() returns an item in the tree with key @key, and it's height in the tree
|
||||
func (t *LLRB) GetHeight(key Item) (result Item, depth int) {
|
||||
return t.getHeight(t.root, key)
|
||||
}
|
||||
|
||||
func (t *LLRB) getHeight(h *Node, item Item) (Item, int) {
|
||||
if h == nil {
|
||||
return nil, 0
|
||||
}
|
||||
if less(item, h.Item) {
|
||||
result, depth := t.getHeight(h.Left, item)
|
||||
return result, depth + 1
|
||||
}
|
||||
if less(h.Item, item) {
|
||||
result, depth := t.getHeight(h.Right, item)
|
||||
return result, depth + 1
|
||||
}
|
||||
return h.Item, 0
|
||||
}
|
||||
|
||||
// HeightStats() returns the average and standard deviation of the height
|
||||
// of elements in the tree
|
||||
func (t *LLRB) HeightStats() (avg, stddev float64) {
|
||||
av := &avgVar{}
|
||||
heightStats(t.root, 0, av)
|
||||
return av.GetAvg(), av.GetStdDev()
|
||||
}
|
||||
|
||||
func heightStats(h *Node, d int, av *avgVar) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
av.Add(float64(d))
|
||||
if h.Left != nil {
|
||||
heightStats(h.Left, d+1, av)
|
||||
}
|
||||
if h.Right != nil {
|
||||
heightStats(h.Right, d+1, av)
|
||||
}
|
||||
}
|
||||
456
vendor/github.com/petar/GoLLRB/llrb/llrb.go
generated
vendored
Normal file
456
vendor/github.com/petar/GoLLRB/llrb/llrb.go
generated
vendored
Normal file
@@ -0,0 +1,456 @@
|
||||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// A Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary search trees,
|
||||
// based on the following work:
|
||||
//
|
||||
// http://www.cs.princeton.edu/~rs/talks/LLRB/08Penn.pdf
|
||||
// http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf
|
||||
// http://www.cs.princeton.edu/~rs/talks/LLRB/Java/RedBlackBST.java
|
||||
//
|
||||
// 2-3 trees (and the run-time equivalent 2-3-4 trees) are the de facto standard BST
|
||||
// algoritms found in implementations of Python, Java, and other libraries. The LLRB
|
||||
// implementation of 2-3 trees is a recent improvement on the traditional implementation,
|
||||
// observed and documented by Robert Sedgewick.
|
||||
//
|
||||
package llrb
|
||||
|
||||
// Tree is a Left-Leaning Red-Black (LLRB) implementation of 2-3 trees
|
||||
type LLRB struct {
|
||||
count int
|
||||
root *Node
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
Item
|
||||
Left, Right *Node // Pointers to left and right child nodes
|
||||
Black bool // If set, the color of the link (incoming from the parent) is black
|
||||
// In the LLRB, new nodes are always red, hence the zero-value for node
|
||||
}
|
||||
|
||||
type Item interface {
|
||||
Less(than Item) bool
|
||||
}
|
||||
|
||||
//
|
||||
func less(x, y Item) bool {
|
||||
if x == pinf {
|
||||
return false
|
||||
}
|
||||
if x == ninf {
|
||||
return true
|
||||
}
|
||||
return x.Less(y)
|
||||
}
|
||||
|
||||
// Inf returns an Item that is "bigger than" any other item, if sign is positive.
|
||||
// Otherwise it returns an Item that is "smaller than" any other item.
|
||||
func Inf(sign int) Item {
|
||||
if sign == 0 {
|
||||
panic("sign")
|
||||
}
|
||||
if sign > 0 {
|
||||
return pinf
|
||||
}
|
||||
return ninf
|
||||
}
|
||||
|
||||
var (
|
||||
ninf = nInf{}
|
||||
pinf = pInf{}
|
||||
)
|
||||
|
||||
type nInf struct{}
|
||||
|
||||
func (nInf) Less(Item) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type pInf struct{}
|
||||
|
||||
func (pInf) Less(Item) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// New() allocates a new tree
|
||||
func New() *LLRB {
|
||||
return &LLRB{}
|
||||
}
|
||||
|
||||
// SetRoot sets the root node of the tree.
|
||||
// It is intended to be used by functions that deserialize the tree.
|
||||
func (t *LLRB) SetRoot(r *Node) {
|
||||
t.root = r
|
||||
}
|
||||
|
||||
// Root returns the root node of the tree.
|
||||
// It is intended to be used by functions that serialize the tree.
|
||||
func (t *LLRB) Root() *Node {
|
||||
return t.root
|
||||
}
|
||||
|
||||
// Len returns the number of nodes in the tree.
|
||||
func (t *LLRB) Len() int { return t.count }
|
||||
|
||||
// Has returns true if the tree contains an element whose order is the same as that of key.
|
||||
func (t *LLRB) Has(key Item) bool {
|
||||
return t.Get(key) != nil
|
||||
}
|
||||
|
||||
// Get retrieves an element from the tree whose order is the same as that of key.
|
||||
func (t *LLRB) Get(key Item) Item {
|
||||
h := t.root
|
||||
for h != nil {
|
||||
switch {
|
||||
case less(key, h.Item):
|
||||
h = h.Left
|
||||
case less(h.Item, key):
|
||||
h = h.Right
|
||||
default:
|
||||
return h.Item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Min returns the minimum element in the tree.
|
||||
func (t *LLRB) Min() Item {
|
||||
h := t.root
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
for h.Left != nil {
|
||||
h = h.Left
|
||||
}
|
||||
return h.Item
|
||||
}
|
||||
|
||||
// Max returns the maximum element in the tree.
|
||||
func (t *LLRB) Max() Item {
|
||||
h := t.root
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
for h.Right != nil {
|
||||
h = h.Right
|
||||
}
|
||||
return h.Item
|
||||
}
|
||||
|
||||
func (t *LLRB) ReplaceOrInsertBulk(items ...Item) {
|
||||
for _, i := range items {
|
||||
t.ReplaceOrInsert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *LLRB) InsertNoReplaceBulk(items ...Item) {
|
||||
for _, i := range items {
|
||||
t.InsertNoReplace(i)
|
||||
}
|
||||
}
|
||||
|
||||
// ReplaceOrInsert inserts item into the tree. If an existing
|
||||
// element has the same order, it is removed from the tree and returned.
|
||||
func (t *LLRB) ReplaceOrInsert(item Item) Item {
|
||||
if item == nil {
|
||||
panic("inserting nil item")
|
||||
}
|
||||
var replaced Item
|
||||
t.root, replaced = t.replaceOrInsert(t.root, item)
|
||||
t.root.Black = true
|
||||
if replaced == nil {
|
||||
t.count++
|
||||
}
|
||||
return replaced
|
||||
}
|
||||
|
||||
func (t *LLRB) replaceOrInsert(h *Node, item Item) (*Node, Item) {
|
||||
if h == nil {
|
||||
return newNode(item), nil
|
||||
}
|
||||
|
||||
h = walkDownRot23(h)
|
||||
|
||||
var replaced Item
|
||||
if less(item, h.Item) { // BUG
|
||||
h.Left, replaced = t.replaceOrInsert(h.Left, item)
|
||||
} else if less(h.Item, item) {
|
||||
h.Right, replaced = t.replaceOrInsert(h.Right, item)
|
||||
} else {
|
||||
replaced, h.Item = h.Item, item
|
||||
}
|
||||
|
||||
h = walkUpRot23(h)
|
||||
|
||||
return h, replaced
|
||||
}
|
||||
|
||||
// InsertNoReplace inserts item into the tree. If an existing
|
||||
// element has the same order, both elements remain in the tree.
|
||||
func (t *LLRB) InsertNoReplace(item Item) {
|
||||
if item == nil {
|
||||
panic("inserting nil item")
|
||||
}
|
||||
t.root = t.insertNoReplace(t.root, item)
|
||||
t.root.Black = true
|
||||
t.count++
|
||||
}
|
||||
|
||||
func (t *LLRB) insertNoReplace(h *Node, item Item) *Node {
|
||||
if h == nil {
|
||||
return newNode(item)
|
||||
}
|
||||
|
||||
h = walkDownRot23(h)
|
||||
|
||||
if less(item, h.Item) {
|
||||
h.Left = t.insertNoReplace(h.Left, item)
|
||||
} else {
|
||||
h.Right = t.insertNoReplace(h.Right, item)
|
||||
}
|
||||
|
||||
return walkUpRot23(h)
|
||||
}
|
||||
|
||||
// Rotation driver routines for 2-3 algorithm
|
||||
|
||||
func walkDownRot23(h *Node) *Node { return h }
|
||||
|
||||
func walkUpRot23(h *Node) *Node {
|
||||
if isRed(h.Right) && !isRed(h.Left) {
|
||||
h = rotateLeft(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Right) {
|
||||
flip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// Rotation driver routines for 2-3-4 algorithm
|
||||
|
||||
func walkDownRot234(h *Node) *Node {
|
||||
if isRed(h.Left) && isRed(h.Right) {
|
||||
flip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func walkUpRot234(h *Node) *Node {
|
||||
if isRed(h.Right) && !isRed(h.Left) {
|
||||
h = rotateLeft(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// DeleteMin deletes the minimum element in the tree and returns the
|
||||
// deleted item or nil otherwise.
|
||||
func (t *LLRB) DeleteMin() Item {
|
||||
var deleted Item
|
||||
t.root, deleted = deleteMin(t.root)
|
||||
if t.root != nil {
|
||||
t.root.Black = true
|
||||
}
|
||||
if deleted != nil {
|
||||
t.count--
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
// deleteMin code for LLRB 2-3 trees
|
||||
func deleteMin(h *Node) (*Node, Item) {
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if h.Left == nil {
|
||||
return nil, h.Item
|
||||
}
|
||||
|
||||
if !isRed(h.Left) && !isRed(h.Left.Left) {
|
||||
h = moveRedLeft(h)
|
||||
}
|
||||
|
||||
var deleted Item
|
||||
h.Left, deleted = deleteMin(h.Left)
|
||||
|
||||
return fixUp(h), deleted
|
||||
}
|
||||
|
||||
// DeleteMax deletes the maximum element in the tree and returns
|
||||
// the deleted item or nil otherwise
|
||||
func (t *LLRB) DeleteMax() Item {
|
||||
var deleted Item
|
||||
t.root, deleted = deleteMax(t.root)
|
||||
if t.root != nil {
|
||||
t.root.Black = true
|
||||
}
|
||||
if deleted != nil {
|
||||
t.count--
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
func deleteMax(h *Node) (*Node, Item) {
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if isRed(h.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
if h.Right == nil {
|
||||
return nil, h.Item
|
||||
}
|
||||
if !isRed(h.Right) && !isRed(h.Right.Left) {
|
||||
h = moveRedRight(h)
|
||||
}
|
||||
var deleted Item
|
||||
h.Right, deleted = deleteMax(h.Right)
|
||||
|
||||
return fixUp(h), deleted
|
||||
}
|
||||
|
||||
// Delete deletes an item from the tree whose key equals key.
|
||||
// The deleted item is return, otherwise nil is returned.
|
||||
func (t *LLRB) Delete(key Item) Item {
|
||||
var deleted Item
|
||||
t.root, deleted = t.delete(t.root, key)
|
||||
if t.root != nil {
|
||||
t.root.Black = true
|
||||
}
|
||||
if deleted != nil {
|
||||
t.count--
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
func (t *LLRB) delete(h *Node, item Item) (*Node, Item) {
|
||||
var deleted Item
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if less(item, h.Item) {
|
||||
if h.Left == nil { // item not present. Nothing to delete
|
||||
return h, nil
|
||||
}
|
||||
if !isRed(h.Left) && !isRed(h.Left.Left) {
|
||||
h = moveRedLeft(h)
|
||||
}
|
||||
h.Left, deleted = t.delete(h.Left, item)
|
||||
} else {
|
||||
if isRed(h.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
// If @item equals @h.Item and no right children at @h
|
||||
if !less(h.Item, item) && h.Right == nil {
|
||||
return nil, h.Item
|
||||
}
|
||||
// PETAR: Added 'h.Right != nil' below
|
||||
if h.Right != nil && !isRed(h.Right) && !isRed(h.Right.Left) {
|
||||
h = moveRedRight(h)
|
||||
}
|
||||
// If @item equals @h.Item, and (from above) 'h.Right != nil'
|
||||
if !less(h.Item, item) {
|
||||
var subDeleted Item
|
||||
h.Right, subDeleted = deleteMin(h.Right)
|
||||
if subDeleted == nil {
|
||||
panic("logic")
|
||||
}
|
||||
deleted, h.Item = h.Item, subDeleted
|
||||
} else { // Else, @item is bigger than @h.Item
|
||||
h.Right, deleted = t.delete(h.Right, item)
|
||||
}
|
||||
}
|
||||
|
||||
return fixUp(h), deleted
|
||||
}
|
||||
|
||||
// Internal node manipulation routines
|
||||
|
||||
func newNode(item Item) *Node { return &Node{Item: item} }
|
||||
|
||||
func isRed(h *Node) bool {
|
||||
if h == nil {
|
||||
return false
|
||||
}
|
||||
return !h.Black
|
||||
}
|
||||
|
||||
func rotateLeft(h *Node) *Node {
|
||||
x := h.Right
|
||||
if x.Black {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.Right = x.Left
|
||||
x.Left = h
|
||||
x.Black = h.Black
|
||||
h.Black = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rotateRight(h *Node) *Node {
|
||||
x := h.Left
|
||||
if x.Black {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.Left = x.Right
|
||||
x.Right = h
|
||||
x.Black = h.Black
|
||||
h.Black = false
|
||||
return x
|
||||
}
|
||||
|
||||
// REQUIRE: Left and Right children must be present
|
||||
func flip(h *Node) {
|
||||
h.Black = !h.Black
|
||||
h.Left.Black = !h.Left.Black
|
||||
h.Right.Black = !h.Right.Black
|
||||
}
|
||||
|
||||
// REQUIRE: Left and Right children must be present
|
||||
func moveRedLeft(h *Node) *Node {
|
||||
flip(h)
|
||||
if isRed(h.Right.Left) {
|
||||
h.Right = rotateRight(h.Right)
|
||||
h = rotateLeft(h)
|
||||
flip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// REQUIRE: Left and Right children must be present
|
||||
func moveRedRight(h *Node) *Node {
|
||||
flip(h)
|
||||
if isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
flip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func fixUp(h *Node) *Node {
|
||||
if isRed(h.Right) {
|
||||
h = rotateLeft(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Right) {
|
||||
flip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
17
vendor/github.com/petar/GoLLRB/llrb/util.go
generated
vendored
Normal file
17
vendor/github.com/petar/GoLLRB/llrb/util.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package llrb
|
||||
|
||||
type Int int
|
||||
|
||||
func (x Int) Less(than Item) bool {
|
||||
return x < than.(Int)
|
||||
}
|
||||
|
||||
type String string
|
||||
|
||||
func (x String) Less(than Item) bool {
|
||||
return x < than.(String)
|
||||
}
|
||||
54
vendor/github.com/pmezard/go-difflib/difflib/difflib.go
generated
vendored
54
vendor/github.com/pmezard/go-difflib/difflib/difflib.go
generated
vendored
@@ -559,10 +559,14 @@ type UnifiedDiff struct {
|
||||
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
w := func(format string, args ...interface{}) error {
|
||||
wf := func(format string, args ...interface{}) error {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
return err
|
||||
}
|
||||
ws := func(s string) error {
|
||||
_, err := buf.WriteString(s)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
@@ -581,26 +585,28 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
err := w("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
first, last := g[0], g[len(g)-1]
|
||||
range1 := formatRangeUnified(first.I1, last.I2)
|
||||
range2 := formatRangeUnified(first.J1, last.J2)
|
||||
if err := w("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
|
||||
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range g {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
if c.Tag == 'e' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := w(" " + line); err != nil {
|
||||
if err := ws(" " + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -608,14 +614,14 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := w("-" + line); err != nil {
|
||||
if err := ws("-" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, line := range diff.B[j1:j2] {
|
||||
if err := w("+" + line); err != nil {
|
||||
if err := ws("+" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -669,12 +675,18 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
var diffErr error
|
||||
w := func(format string, args ...interface{}) {
|
||||
wf := func(format string, args ...interface{}) {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
ws := func(s string) {
|
||||
_, err := buf.WriteString(s)
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
@@ -700,15 +712,17 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
w("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
w("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
}
|
||||
}
|
||||
|
||||
first, last := g[0], g[len(g)-1]
|
||||
w("***************" + diff.Eol)
|
||||
ws("***************" + diff.Eol)
|
||||
|
||||
range1 := formatRangeContext(first.I1, last.I2)
|
||||
w("*** %s ****%s", range1, diff.Eol)
|
||||
wf("*** %s ****%s", range1, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, cc := range g {
|
||||
@@ -716,7 +730,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.A[cc.I1:cc.I2] {
|
||||
w(prefix[cc.Tag] + line)
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
@@ -724,7 +738,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
|
||||
}
|
||||
|
||||
range2 := formatRangeContext(first.J1, last.J2)
|
||||
w("--- %s ----%s", range2, diff.Eol)
|
||||
wf("--- %s ----%s", range2, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, cc := range g {
|
||||
@@ -732,7 +746,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.B[cc.J1:cc.J2] {
|
||||
w(prefix[cc.Tag] + line)
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
22
vendor/github.com/stretchr/testify/doc.go
generated
vendored
22
vendor/github.com/stretchr/testify/doc.go
generated
vendored
@@ -1,22 +0,0 @@
|
||||
// Package testify is a set of packages that provide many tools for testifying that your code will behave as you intend.
|
||||
//
|
||||
// testify contains the following packages:
|
||||
//
|
||||
// The assert package provides a comprehensive set of assertion functions that tie in to the Go testing system.
|
||||
//
|
||||
// The http package contains tools to make it easier to test http activity using the Go testing system.
|
||||
//
|
||||
// The mock package provides a system by which it is possible to mock your objects and verify calls are happening as expected.
|
||||
//
|
||||
// The suite package provides a basic structure for using structs as testing suites, and methods on those structs as tests. It includes setup/teardown functionality in the way of interfaces.
|
||||
package testify
|
||||
|
||||
// blank imports help docs.
|
||||
import (
|
||||
// assert package
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
// http package
|
||||
_ "github.com/stretchr/testify/http"
|
||||
// mock package
|
||||
_ "github.com/stretchr/testify/mock"
|
||||
)
|
||||
8
vendor/go4.org/AUTHORS
generated
vendored
Normal file
8
vendor/go4.org/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# This is the official list of go4 authors for copyright purposes.
|
||||
# This is distinct from the CONTRIBUTORS file, which is the list of
|
||||
# people who have contributed, even if they don't own the copyright on
|
||||
# their work.
|
||||
|
||||
Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
|
||||
Daniel Theophanes <kardianos@gmail.com>
|
||||
Google
|
||||
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/AUTHORS.
|
||||
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/CONTRIBUTORS.
|
||||
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
78
vendor/golang.org/x/net/html/atom/atom.go
generated
vendored
78
vendor/golang.org/x/net/html/atom/atom.go
generated
vendored
@@ -1,78 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package atom provides integer codes (also known as atoms) for a fixed set of
|
||||
// frequently occurring HTML strings: tag names and attribute keys such as "p"
|
||||
// and "id".
|
||||
//
|
||||
// Sharing an atom's name between all elements with the same tag can result in
|
||||
// fewer string allocations when tokenizing and parsing HTML. Integer
|
||||
// comparisons are also generally faster than string comparisons.
|
||||
//
|
||||
// The value of an atom's particular code is not guaranteed to stay the same
|
||||
// between versions of this package. Neither is any ordering guaranteed:
|
||||
// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
|
||||
// be dense. The only guarantees are that e.g. looking up "div" will yield
|
||||
// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
|
||||
package atom // import "golang.org/x/net/html/atom"
|
||||
|
||||
// Atom is an integer code for a string. The zero value maps to "".
|
||||
type Atom uint32
|
||||
|
||||
// String returns the atom's name.
|
||||
func (a Atom) String() string {
|
||||
start := uint32(a >> 8)
|
||||
n := uint32(a & 0xff)
|
||||
if start+n > uint32(len(atomText)) {
|
||||
return ""
|
||||
}
|
||||
return atomText[start : start+n]
|
||||
}
|
||||
|
||||
func (a Atom) string() string {
|
||||
return atomText[a>>8 : a>>8+a&0xff]
|
||||
}
|
||||
|
||||
// fnv computes the FNV hash with an arbitrary starting value h.
|
||||
func fnv(h uint32, s []byte) uint32 {
|
||||
for i := range s {
|
||||
h ^= uint32(s[i])
|
||||
h *= 16777619
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func match(s string, t []byte) bool {
|
||||
for i, c := range t {
|
||||
if s[i] != c {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup returns the atom whose name is s. It returns zero if there is no
|
||||
// such atom. The lookup is case sensitive.
|
||||
func Lookup(s []byte) Atom {
|
||||
if len(s) == 0 || len(s) > maxAtomLen {
|
||||
return 0
|
||||
}
|
||||
h := fnv(hash0, s)
|
||||
if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
|
||||
return a
|
||||
}
|
||||
if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
|
||||
return a
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// String returns a string whose contents are equal to s. In that sense, it is
|
||||
// equivalent to string(s) but may be more efficient.
|
||||
func String(s []byte) string {
|
||||
if a := Lookup(s); a != 0 {
|
||||
return a.String()
|
||||
}
|
||||
return string(s)
|
||||
}
|
||||
648
vendor/golang.org/x/net/html/atom/gen.go
generated
vendored
648
vendor/golang.org/x/net/html/atom/gen.go
generated
vendored
@@ -1,648 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
// This program generates table.go and table_test.go.
|
||||
// Invoke as
|
||||
//
|
||||
// go run gen.go |gofmt >table.go
|
||||
// go run gen.go -test |gofmt >table_test.go
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// identifier converts s to a Go exported identifier.
|
||||
// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
|
||||
func identifier(s string) string {
|
||||
b := make([]byte, 0, len(s))
|
||||
cap := true
|
||||
for _, c := range s {
|
||||
if c == '-' {
|
||||
cap = true
|
||||
continue
|
||||
}
|
||||
if cap && 'a' <= c && c <= 'z' {
|
||||
c -= 'a' - 'A'
|
||||
}
|
||||
cap = false
|
||||
b = append(b, byte(c))
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
var test = flag.Bool("test", false, "generate table_test.go")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var all []string
|
||||
all = append(all, elements...)
|
||||
all = append(all, attributes...)
|
||||
all = append(all, eventHandlers...)
|
||||
all = append(all, extra...)
|
||||
sort.Strings(all)
|
||||
|
||||
if *test {
|
||||
fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n")
|
||||
fmt.Printf("package atom\n\n")
|
||||
fmt.Printf("var testAtomList = []string{\n")
|
||||
for _, s := range all {
|
||||
fmt.Printf("\t%q,\n", s)
|
||||
}
|
||||
fmt.Printf("}\n")
|
||||
return
|
||||
}
|
||||
|
||||
// uniq - lists have dups
|
||||
// compute max len too
|
||||
maxLen := 0
|
||||
w := 0
|
||||
for _, s := range all {
|
||||
if w == 0 || all[w-1] != s {
|
||||
if maxLen < len(s) {
|
||||
maxLen = len(s)
|
||||
}
|
||||
all[w] = s
|
||||
w++
|
||||
}
|
||||
}
|
||||
all = all[:w]
|
||||
|
||||
// Find hash that minimizes table size.
|
||||
var best *table
|
||||
for i := 0; i < 1000000; i++ {
|
||||
if best != nil && 1<<(best.k-1) < len(all) {
|
||||
break
|
||||
}
|
||||
h := rand.Uint32()
|
||||
for k := uint(0); k <= 16; k++ {
|
||||
if best != nil && k >= best.k {
|
||||
break
|
||||
}
|
||||
var t table
|
||||
if t.init(h, k, all) {
|
||||
best = &t
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if best == nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to construct string table\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Lay out strings, using overlaps when possible.
|
||||
layout := append([]string{}, all...)
|
||||
|
||||
// Remove strings that are substrings of other strings
|
||||
for changed := true; changed; {
|
||||
changed = false
|
||||
for i, s := range layout {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
for j, t := range layout {
|
||||
if i != j && t != "" && strings.Contains(s, t) {
|
||||
changed = true
|
||||
layout[j] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Join strings where one suffix matches another prefix.
|
||||
for {
|
||||
// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
|
||||
// maximizing overlap length k.
|
||||
besti := -1
|
||||
bestj := -1
|
||||
bestk := 0
|
||||
for i, s := range layout {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
for j, t := range layout {
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
|
||||
if s[len(s)-k:] == t[:k] {
|
||||
besti = i
|
||||
bestj = j
|
||||
bestk = k
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if bestk > 0 {
|
||||
layout[besti] += layout[bestj][bestk:]
|
||||
layout[bestj] = ""
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
text := strings.Join(layout, "")
|
||||
|
||||
atom := map[string]uint32{}
|
||||
for _, s := range all {
|
||||
off := strings.Index(text, s)
|
||||
if off < 0 {
|
||||
panic("lost string " + s)
|
||||
}
|
||||
atom[s] = uint32(off<<8 | len(s))
|
||||
}
|
||||
|
||||
// Generate the Go code.
|
||||
fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n")
|
||||
fmt.Printf("package atom\n\nconst (\n")
|
||||
for _, s := range all {
|
||||
fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s])
|
||||
}
|
||||
fmt.Printf(")\n\n")
|
||||
|
||||
fmt.Printf("const hash0 = %#x\n\n", best.h0)
|
||||
fmt.Printf("const maxAtomLen = %d\n\n", maxLen)
|
||||
|
||||
fmt.Printf("var table = [1<<%d]Atom{\n", best.k)
|
||||
for i, s := range best.tab {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s)
|
||||
}
|
||||
fmt.Printf("}\n")
|
||||
datasize := (1 << best.k) * 4
|
||||
|
||||
fmt.Printf("const atomText =\n")
|
||||
textsize := len(text)
|
||||
for len(text) > 60 {
|
||||
fmt.Printf("\t%q +\n", text[:60])
|
||||
text = text[60:]
|
||||
}
|
||||
fmt.Printf("\t%q\n\n", text)
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
|
||||
}
|
||||
|
||||
type byLen []string
|
||||
|
||||
func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
|
||||
func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byLen) Len() int { return len(x) }
|
||||
|
||||
// fnv computes the FNV hash with an arbitrary starting value h.
|
||||
func fnv(h uint32, s string) uint32 {
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint32(s[i])
|
||||
h *= 16777619
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// A table represents an attempt at constructing the lookup table.
|
||||
// The lookup table uses cuckoo hashing, meaning that each string
|
||||
// can be found in one of two positions.
|
||||
type table struct {
|
||||
h0 uint32
|
||||
k uint
|
||||
mask uint32
|
||||
tab []string
|
||||
}
|
||||
|
||||
// hash returns the two hashes for s.
|
||||
func (t *table) hash(s string) (h1, h2 uint32) {
|
||||
h := fnv(t.h0, s)
|
||||
h1 = h & t.mask
|
||||
h2 = (h >> 16) & t.mask
|
||||
return
|
||||
}
|
||||
|
||||
// init initializes the table with the given parameters.
|
||||
// h0 is the initial hash value,
|
||||
// k is the number of bits of hash value to use, and
|
||||
// x is the list of strings to store in the table.
|
||||
// init returns false if the table cannot be constructed.
|
||||
func (t *table) init(h0 uint32, k uint, x []string) bool {
|
||||
t.h0 = h0
|
||||
t.k = k
|
||||
t.tab = make([]string, 1<<k)
|
||||
t.mask = 1<<k - 1
|
||||
for _, s := range x {
|
||||
if !t.insert(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// insert inserts s in the table.
|
||||
func (t *table) insert(s string) bool {
|
||||
h1, h2 := t.hash(s)
|
||||
if t.tab[h1] == "" {
|
||||
t.tab[h1] = s
|
||||
return true
|
||||
}
|
||||
if t.tab[h2] == "" {
|
||||
t.tab[h2] = s
|
||||
return true
|
||||
}
|
||||
if t.push(h1, 0) {
|
||||
t.tab[h1] = s
|
||||
return true
|
||||
}
|
||||
if t.push(h2, 0) {
|
||||
t.tab[h2] = s
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// push attempts to push aside the entry in slot i.
|
||||
func (t *table) push(i uint32, depth int) bool {
|
||||
if depth > len(t.tab) {
|
||||
return false
|
||||
}
|
||||
s := t.tab[i]
|
||||
h1, h2 := t.hash(s)
|
||||
j := h1 + h2 - i
|
||||
if t.tab[j] != "" && !t.push(j, depth+1) {
|
||||
return false
|
||||
}
|
||||
t.tab[j] = s
|
||||
return true
|
||||
}
|
||||
|
||||
// The lists of element names and attribute keys were taken from
|
||||
// https://html.spec.whatwg.org/multipage/indices.html#index
|
||||
// as of the "HTML Living Standard - Last Updated 21 February 2015" version.
|
||||
|
||||
var elements = []string{
|
||||
"a",
|
||||
"abbr",
|
||||
"address",
|
||||
"area",
|
||||
"article",
|
||||
"aside",
|
||||
"audio",
|
||||
"b",
|
||||
"base",
|
||||
"bdi",
|
||||
"bdo",
|
||||
"blockquote",
|
||||
"body",
|
||||
"br",
|
||||
"button",
|
||||
"canvas",
|
||||
"caption",
|
||||
"cite",
|
||||
"code",
|
||||
"col",
|
||||
"colgroup",
|
||||
"command",
|
||||
"data",
|
||||
"datalist",
|
||||
"dd",
|
||||
"del",
|
||||
"details",
|
||||
"dfn",
|
||||
"dialog",
|
||||
"div",
|
||||
"dl",
|
||||
"dt",
|
||||
"em",
|
||||
"embed",
|
||||
"fieldset",
|
||||
"figcaption",
|
||||
"figure",
|
||||
"footer",
|
||||
"form",
|
||||
"h1",
|
||||
"h2",
|
||||
"h3",
|
||||
"h4",
|
||||
"h5",
|
||||
"h6",
|
||||
"head",
|
||||
"header",
|
||||
"hgroup",
|
||||
"hr",
|
||||
"html",
|
||||
"i",
|
||||
"iframe",
|
||||
"img",
|
||||
"input",
|
||||
"ins",
|
||||
"kbd",
|
||||
"keygen",
|
||||
"label",
|
||||
"legend",
|
||||
"li",
|
||||
"link",
|
||||
"map",
|
||||
"mark",
|
||||
"menu",
|
||||
"menuitem",
|
||||
"meta",
|
||||
"meter",
|
||||
"nav",
|
||||
"noscript",
|
||||
"object",
|
||||
"ol",
|
||||
"optgroup",
|
||||
"option",
|
||||
"output",
|
||||
"p",
|
||||
"param",
|
||||
"pre",
|
||||
"progress",
|
||||
"q",
|
||||
"rp",
|
||||
"rt",
|
||||
"ruby",
|
||||
"s",
|
||||
"samp",
|
||||
"script",
|
||||
"section",
|
||||
"select",
|
||||
"small",
|
||||
"source",
|
||||
"span",
|
||||
"strong",
|
||||
"style",
|
||||
"sub",
|
||||
"summary",
|
||||
"sup",
|
||||
"table",
|
||||
"tbody",
|
||||
"td",
|
||||
"template",
|
||||
"textarea",
|
||||
"tfoot",
|
||||
"th",
|
||||
"thead",
|
||||
"time",
|
||||
"title",
|
||||
"tr",
|
||||
"track",
|
||||
"u",
|
||||
"ul",
|
||||
"var",
|
||||
"video",
|
||||
"wbr",
|
||||
}
|
||||
|
||||
// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
|
||||
|
||||
var attributes = []string{
|
||||
"abbr",
|
||||
"accept",
|
||||
"accept-charset",
|
||||
"accesskey",
|
||||
"action",
|
||||
"alt",
|
||||
"async",
|
||||
"autocomplete",
|
||||
"autofocus",
|
||||
"autoplay",
|
||||
"challenge",
|
||||
"charset",
|
||||
"checked",
|
||||
"cite",
|
||||
"class",
|
||||
"cols",
|
||||
"colspan",
|
||||
"command",
|
||||
"content",
|
||||
"contenteditable",
|
||||
"contextmenu",
|
||||
"controls",
|
||||
"coords",
|
||||
"crossorigin",
|
||||
"data",
|
||||
"datetime",
|
||||
"default",
|
||||
"defer",
|
||||
"dir",
|
||||
"dirname",
|
||||
"disabled",
|
||||
"download",
|
||||
"draggable",
|
||||
"dropzone",
|
||||
"enctype",
|
||||
"for",
|
||||
"form",
|
||||
"formaction",
|
||||
"formenctype",
|
||||
"formmethod",
|
||||
"formnovalidate",
|
||||
"formtarget",
|
||||
"headers",
|
||||
"height",
|
||||
"hidden",
|
||||
"high",
|
||||
"href",
|
||||
"hreflang",
|
||||
"http-equiv",
|
||||
"icon",
|
||||
"id",
|
||||
"inputmode",
|
||||
"ismap",
|
||||
"itemid",
|
||||
"itemprop",
|
||||
"itemref",
|
||||
"itemscope",
|
||||
"itemtype",
|
||||
"keytype",
|
||||
"kind",
|
||||
"label",
|
||||
"lang",
|
||||
"list",
|
||||
"loop",
|
||||
"low",
|
||||
"manifest",
|
||||
"max",
|
||||
"maxlength",
|
||||
"media",
|
||||
"mediagroup",
|
||||
"method",
|
||||
"min",
|
||||
"minlength",
|
||||
"multiple",
|
||||
"muted",
|
||||
"name",
|
||||
"novalidate",
|
||||
"open",
|
||||
"optimum",
|
||||
"pattern",
|
||||
"ping",
|
||||
"placeholder",
|
||||
"poster",
|
||||
"preload",
|
||||
"radiogroup",
|
||||
"readonly",
|
||||
"rel",
|
||||
"required",
|
||||
"reversed",
|
||||
"rows",
|
||||
"rowspan",
|
||||
"sandbox",
|
||||
"spellcheck",
|
||||
"scope",
|
||||
"scoped",
|
||||
"seamless",
|
||||
"selected",
|
||||
"shape",
|
||||
"size",
|
||||
"sizes",
|
||||
"sortable",
|
||||
"sorted",
|
||||
"span",
|
||||
"src",
|
||||
"srcdoc",
|
||||
"srclang",
|
||||
"start",
|
||||
"step",
|
||||
"style",
|
||||
"tabindex",
|
||||
"target",
|
||||
"title",
|
||||
"translate",
|
||||
"type",
|
||||
"typemustmatch",
|
||||
"usemap",
|
||||
"value",
|
||||
"width",
|
||||
"wrap",
|
||||
}
|
||||
|
||||
var eventHandlers = []string{
|
||||
"onabort",
|
||||
"onautocomplete",
|
||||
"onautocompleteerror",
|
||||
"onafterprint",
|
||||
"onbeforeprint",
|
||||
"onbeforeunload",
|
||||
"onblur",
|
||||
"oncancel",
|
||||
"oncanplay",
|
||||
"oncanplaythrough",
|
||||
"onchange",
|
||||
"onclick",
|
||||
"onclose",
|
||||
"oncontextmenu",
|
||||
"oncuechange",
|
||||
"ondblclick",
|
||||
"ondrag",
|
||||
"ondragend",
|
||||
"ondragenter",
|
||||
"ondragleave",
|
||||
"ondragover",
|
||||
"ondragstart",
|
||||
"ondrop",
|
||||
"ondurationchange",
|
||||
"onemptied",
|
||||
"onended",
|
||||
"onerror",
|
||||
"onfocus",
|
||||
"onhashchange",
|
||||
"oninput",
|
||||
"oninvalid",
|
||||
"onkeydown",
|
||||
"onkeypress",
|
||||
"onkeyup",
|
||||
"onlanguagechange",
|
||||
"onload",
|
||||
"onloadeddata",
|
||||
"onloadedmetadata",
|
||||
"onloadstart",
|
||||
"onmessage",
|
||||
"onmousedown",
|
||||
"onmousemove",
|
||||
"onmouseout",
|
||||
"onmouseover",
|
||||
"onmouseup",
|
||||
"onmousewheel",
|
||||
"onoffline",
|
||||
"ononline",
|
||||
"onpagehide",
|
||||
"onpageshow",
|
||||
"onpause",
|
||||
"onplay",
|
||||
"onplaying",
|
||||
"onpopstate",
|
||||
"onprogress",
|
||||
"onratechange",
|
||||
"onreset",
|
||||
"onresize",
|
||||
"onscroll",
|
||||
"onseeked",
|
||||
"onseeking",
|
||||
"onselect",
|
||||
"onshow",
|
||||
"onsort",
|
||||
"onstalled",
|
||||
"onstorage",
|
||||
"onsubmit",
|
||||
"onsuspend",
|
||||
"ontimeupdate",
|
||||
"ontoggle",
|
||||
"onunload",
|
||||
"onvolumechange",
|
||||
"onwaiting",
|
||||
}
|
||||
|
||||
// extra are ad-hoc values not covered by any of the lists above.
|
||||
var extra = []string{
|
||||
"align",
|
||||
"annotation",
|
||||
"annotation-xml",
|
||||
"applet",
|
||||
"basefont",
|
||||
"bgsound",
|
||||
"big",
|
||||
"blink",
|
||||
"center",
|
||||
"color",
|
||||
"desc",
|
||||
"face",
|
||||
"font",
|
||||
"foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
|
||||
"foreignobject",
|
||||
"frame",
|
||||
"frameset",
|
||||
"image",
|
||||
"isindex",
|
||||
"listing",
|
||||
"malignmark",
|
||||
"marquee",
|
||||
"math",
|
||||
"mglyph",
|
||||
"mi",
|
||||
"mn",
|
||||
"mo",
|
||||
"ms",
|
||||
"mtext",
|
||||
"nobr",
|
||||
"noembed",
|
||||
"noframes",
|
||||
"plaintext",
|
||||
"prompt",
|
||||
"public",
|
||||
"spacer",
|
||||
"strike",
|
||||
"svg",
|
||||
"system",
|
||||
"tt",
|
||||
"xmp",
|
||||
}
|
||||
713
vendor/golang.org/x/net/html/atom/table.go
generated
vendored
713
vendor/golang.org/x/net/html/atom/table.go
generated
vendored
@@ -1,713 +0,0 @@
|
||||
// generated by go run gen.go; DO NOT EDIT
|
||||
|
||||
package atom
|
||||
|
||||
const (
|
||||
A Atom = 0x1
|
||||
Abbr Atom = 0x4
|
||||
Accept Atom = 0x2106
|
||||
AcceptCharset Atom = 0x210e
|
||||
Accesskey Atom = 0x3309
|
||||
Action Atom = 0x1f606
|
||||
Address Atom = 0x4f307
|
||||
Align Atom = 0x1105
|
||||
Alt Atom = 0x4503
|
||||
Annotation Atom = 0x1670a
|
||||
AnnotationXml Atom = 0x1670e
|
||||
Applet Atom = 0x2b306
|
||||
Area Atom = 0x2fa04
|
||||
Article Atom = 0x38807
|
||||
Aside Atom = 0x8305
|
||||
Async Atom = 0x7b05
|
||||
Audio Atom = 0xa605
|
||||
Autocomplete Atom = 0x1fc0c
|
||||
Autofocus Atom = 0xb309
|
||||
Autoplay Atom = 0xce08
|
||||
B Atom = 0x101
|
||||
Base Atom = 0xd604
|
||||
Basefont Atom = 0xd608
|
||||
Bdi Atom = 0x1a03
|
||||
Bdo Atom = 0xe703
|
||||
Bgsound Atom = 0x11807
|
||||
Big Atom = 0x12403
|
||||
Blink Atom = 0x12705
|
||||
Blockquote Atom = 0x12c0a
|
||||
Body Atom = 0x2f04
|
||||
Br Atom = 0x202
|
||||
Button Atom = 0x13606
|
||||
Canvas Atom = 0x7f06
|
||||
Caption Atom = 0x1bb07
|
||||
Center Atom = 0x5b506
|
||||
Challenge Atom = 0x21f09
|
||||
Charset Atom = 0x2807
|
||||
Checked Atom = 0x32807
|
||||
Cite Atom = 0x3c804
|
||||
Class Atom = 0x4de05
|
||||
Code Atom = 0x14904
|
||||
Col Atom = 0x15003
|
||||
Colgroup Atom = 0x15008
|
||||
Color Atom = 0x15d05
|
||||
Cols Atom = 0x16204
|
||||
Colspan Atom = 0x16207
|
||||
Command Atom = 0x17507
|
||||
Content Atom = 0x42307
|
||||
Contenteditable Atom = 0x4230f
|
||||
Contextmenu Atom = 0x3310b
|
||||
Controls Atom = 0x18808
|
||||
Coords Atom = 0x19406
|
||||
Crossorigin Atom = 0x19f0b
|
||||
Data Atom = 0x44a04
|
||||
Datalist Atom = 0x44a08
|
||||
Datetime Atom = 0x23c08
|
||||
Dd Atom = 0x26702
|
||||
Default Atom = 0x8607
|
||||
Defer Atom = 0x14b05
|
||||
Del Atom = 0x3ef03
|
||||
Desc Atom = 0x4db04
|
||||
Details Atom = 0x4807
|
||||
Dfn Atom = 0x6103
|
||||
Dialog Atom = 0x1b06
|
||||
Dir Atom = 0x6903
|
||||
Dirname Atom = 0x6907
|
||||
Disabled Atom = 0x10c08
|
||||
Div Atom = 0x11303
|
||||
Dl Atom = 0x11e02
|
||||
Download Atom = 0x40008
|
||||
Draggable Atom = 0x17b09
|
||||
Dropzone Atom = 0x39108
|
||||
Dt Atom = 0x50902
|
||||
Em Atom = 0x6502
|
||||
Embed Atom = 0x6505
|
||||
Enctype Atom = 0x21107
|
||||
Face Atom = 0x5b304
|
||||
Fieldset Atom = 0x1b008
|
||||
Figcaption Atom = 0x1b80a
|
||||
Figure Atom = 0x1cc06
|
||||
Font Atom = 0xda04
|
||||
Footer Atom = 0x8d06
|
||||
For Atom = 0x1d803
|
||||
ForeignObject Atom = 0x1d80d
|
||||
Foreignobject Atom = 0x1e50d
|
||||
Form Atom = 0x1f204
|
||||
Formaction Atom = 0x1f20a
|
||||
Formenctype Atom = 0x20d0b
|
||||
Formmethod Atom = 0x2280a
|
||||
Formnovalidate Atom = 0x2320e
|
||||
Formtarget Atom = 0x2470a
|
||||
Frame Atom = 0x9a05
|
||||
Frameset Atom = 0x9a08
|
||||
H1 Atom = 0x26e02
|
||||
H2 Atom = 0x29402
|
||||
H3 Atom = 0x2a702
|
||||
H4 Atom = 0x2e902
|
||||
H5 Atom = 0x2f302
|
||||
H6 Atom = 0x50b02
|
||||
Head Atom = 0x2d504
|
||||
Header Atom = 0x2d506
|
||||
Headers Atom = 0x2d507
|
||||
Height Atom = 0x25106
|
||||
Hgroup Atom = 0x25906
|
||||
Hidden Atom = 0x26506
|
||||
High Atom = 0x26b04
|
||||
Hr Atom = 0x27002
|
||||
Href Atom = 0x27004
|
||||
Hreflang Atom = 0x27008
|
||||
Html Atom = 0x25504
|
||||
HttpEquiv Atom = 0x2780a
|
||||
I Atom = 0x601
|
||||
Icon Atom = 0x42204
|
||||
Id Atom = 0x8502
|
||||
Iframe Atom = 0x29606
|
||||
Image Atom = 0x29c05
|
||||
Img Atom = 0x2a103
|
||||
Input Atom = 0x3e805
|
||||
Inputmode Atom = 0x3e809
|
||||
Ins Atom = 0x1a803
|
||||
Isindex Atom = 0x2a907
|
||||
Ismap Atom = 0x2b005
|
||||
Itemid Atom = 0x33c06
|
||||
Itemprop Atom = 0x3c908
|
||||
Itemref Atom = 0x5ad07
|
||||
Itemscope Atom = 0x2b909
|
||||
Itemtype Atom = 0x2c308
|
||||
Kbd Atom = 0x1903
|
||||
Keygen Atom = 0x3906
|
||||
Keytype Atom = 0x53707
|
||||
Kind Atom = 0x10904
|
||||
Label Atom = 0xf005
|
||||
Lang Atom = 0x27404
|
||||
Legend Atom = 0x18206
|
||||
Li Atom = 0x1202
|
||||
Link Atom = 0x12804
|
||||
List Atom = 0x44e04
|
||||
Listing Atom = 0x44e07
|
||||
Loop Atom = 0xf404
|
||||
Low Atom = 0x11f03
|
||||
Malignmark Atom = 0x100a
|
||||
Manifest Atom = 0x5f108
|
||||
Map Atom = 0x2b203
|
||||
Mark Atom = 0x1604
|
||||
Marquee Atom = 0x2cb07
|
||||
Math Atom = 0x2d204
|
||||
Max Atom = 0x2e103
|
||||
Maxlength Atom = 0x2e109
|
||||
Media Atom = 0x6e05
|
||||
Mediagroup Atom = 0x6e0a
|
||||
Menu Atom = 0x33804
|
||||
Menuitem Atom = 0x33808
|
||||
Meta Atom = 0x45d04
|
||||
Meter Atom = 0x24205
|
||||
Method Atom = 0x22c06
|
||||
Mglyph Atom = 0x2a206
|
||||
Mi Atom = 0x2eb02
|
||||
Min Atom = 0x2eb03
|
||||
Minlength Atom = 0x2eb09
|
||||
Mn Atom = 0x23502
|
||||
Mo Atom = 0x3ed02
|
||||
Ms Atom = 0x2bc02
|
||||
Mtext Atom = 0x2f505
|
||||
Multiple Atom = 0x30308
|
||||
Muted Atom = 0x30b05
|
||||
Name Atom = 0x6c04
|
||||
Nav Atom = 0x3e03
|
||||
Nobr Atom = 0x5704
|
||||
Noembed Atom = 0x6307
|
||||
Noframes Atom = 0x9808
|
||||
Noscript Atom = 0x3d208
|
||||
Novalidate Atom = 0x2360a
|
||||
Object Atom = 0x1ec06
|
||||
Ol Atom = 0xc902
|
||||
Onabort Atom = 0x13a07
|
||||
Onafterprint Atom = 0x1c00c
|
||||
Onautocomplete Atom = 0x1fa0e
|
||||
Onautocompleteerror Atom = 0x1fa13
|
||||
Onbeforeprint Atom = 0x6040d
|
||||
Onbeforeunload Atom = 0x4e70e
|
||||
Onblur Atom = 0xaa06
|
||||
Oncancel Atom = 0xe908
|
||||
Oncanplay Atom = 0x28509
|
||||
Oncanplaythrough Atom = 0x28510
|
||||
Onchange Atom = 0x3a708
|
||||
Onclick Atom = 0x31007
|
||||
Onclose Atom = 0x31707
|
||||
Oncontextmenu Atom = 0x32f0d
|
||||
Oncuechange Atom = 0x3420b
|
||||
Ondblclick Atom = 0x34d0a
|
||||
Ondrag Atom = 0x35706
|
||||
Ondragend Atom = 0x35709
|
||||
Ondragenter Atom = 0x3600b
|
||||
Ondragleave Atom = 0x36b0b
|
||||
Ondragover Atom = 0x3760a
|
||||
Ondragstart Atom = 0x3800b
|
||||
Ondrop Atom = 0x38f06
|
||||
Ondurationchange Atom = 0x39f10
|
||||
Onemptied Atom = 0x39609
|
||||
Onended Atom = 0x3af07
|
||||
Onerror Atom = 0x3b607
|
||||
Onfocus Atom = 0x3bd07
|
||||
Onhashchange Atom = 0x3da0c
|
||||
Oninput Atom = 0x3e607
|
||||
Oninvalid Atom = 0x3f209
|
||||
Onkeydown Atom = 0x3fb09
|
||||
Onkeypress Atom = 0x4080a
|
||||
Onkeyup Atom = 0x41807
|
||||
Onlanguagechange Atom = 0x43210
|
||||
Onload Atom = 0x44206
|
||||
Onloadeddata Atom = 0x4420c
|
||||
Onloadedmetadata Atom = 0x45510
|
||||
Onloadstart Atom = 0x46b0b
|
||||
Onmessage Atom = 0x47609
|
||||
Onmousedown Atom = 0x47f0b
|
||||
Onmousemove Atom = 0x48a0b
|
||||
Onmouseout Atom = 0x4950a
|
||||
Onmouseover Atom = 0x4a20b
|
||||
Onmouseup Atom = 0x4ad09
|
||||
Onmousewheel Atom = 0x4b60c
|
||||
Onoffline Atom = 0x4c209
|
||||
Ononline Atom = 0x4cb08
|
||||
Onpagehide Atom = 0x4d30a
|
||||
Onpageshow Atom = 0x4fe0a
|
||||
Onpause Atom = 0x50d07
|
||||
Onplay Atom = 0x51706
|
||||
Onplaying Atom = 0x51709
|
||||
Onpopstate Atom = 0x5200a
|
||||
Onprogress Atom = 0x52a0a
|
||||
Onratechange Atom = 0x53e0c
|
||||
Onreset Atom = 0x54a07
|
||||
Onresize Atom = 0x55108
|
||||
Onscroll Atom = 0x55f08
|
||||
Onseeked Atom = 0x56708
|
||||
Onseeking Atom = 0x56f09
|
||||
Onselect Atom = 0x57808
|
||||
Onshow Atom = 0x58206
|
||||
Onsort Atom = 0x58b06
|
||||
Onstalled Atom = 0x59509
|
||||
Onstorage Atom = 0x59e09
|
||||
Onsubmit Atom = 0x5a708
|
||||
Onsuspend Atom = 0x5bb09
|
||||
Ontimeupdate Atom = 0xdb0c
|
||||
Ontoggle Atom = 0x5c408
|
||||
Onunload Atom = 0x5cc08
|
||||
Onvolumechange Atom = 0x5d40e
|
||||
Onwaiting Atom = 0x5e209
|
||||
Open Atom = 0x3cf04
|
||||
Optgroup Atom = 0xf608
|
||||
Optimum Atom = 0x5eb07
|
||||
Option Atom = 0x60006
|
||||
Output Atom = 0x49c06
|
||||
P Atom = 0xc01
|
||||
Param Atom = 0xc05
|
||||
Pattern Atom = 0x5107
|
||||
Ping Atom = 0x7704
|
||||
Placeholder Atom = 0xc30b
|
||||
Plaintext Atom = 0xfd09
|
||||
Poster Atom = 0x15706
|
||||
Pre Atom = 0x25e03
|
||||
Preload Atom = 0x25e07
|
||||
Progress Atom = 0x52c08
|
||||
Prompt Atom = 0x5fa06
|
||||
Public Atom = 0x41e06
|
||||
Q Atom = 0x13101
|
||||
Radiogroup Atom = 0x30a
|
||||
Readonly Atom = 0x2fb08
|
||||
Rel Atom = 0x25f03
|
||||
Required Atom = 0x1d008
|
||||
Reversed Atom = 0x5a08
|
||||
Rows Atom = 0x9204
|
||||
Rowspan Atom = 0x9207
|
||||
Rp Atom = 0x1c602
|
||||
Rt Atom = 0x13f02
|
||||
Ruby Atom = 0xaf04
|
||||
S Atom = 0x2c01
|
||||
Samp Atom = 0x4e04
|
||||
Sandbox Atom = 0xbb07
|
||||
Scope Atom = 0x2bd05
|
||||
Scoped Atom = 0x2bd06
|
||||
Script Atom = 0x3d406
|
||||
Seamless Atom = 0x31c08
|
||||
Section Atom = 0x4e207
|
||||
Select Atom = 0x57a06
|
||||
Selected Atom = 0x57a08
|
||||
Shape Atom = 0x4f905
|
||||
Size Atom = 0x55504
|
||||
Sizes Atom = 0x55505
|
||||
Small Atom = 0x18f05
|
||||
Sortable Atom = 0x58d08
|
||||
Sorted Atom = 0x19906
|
||||
Source Atom = 0x1aa06
|
||||
Spacer Atom = 0x2db06
|
||||
Span Atom = 0x9504
|
||||
Spellcheck Atom = 0x3230a
|
||||
Src Atom = 0x3c303
|
||||
Srcdoc Atom = 0x3c306
|
||||
Srclang Atom = 0x41107
|
||||
Start Atom = 0x38605
|
||||
Step Atom = 0x5f704
|
||||
Strike Atom = 0x53306
|
||||
Strong Atom = 0x55906
|
||||
Style Atom = 0x61105
|
||||
Sub Atom = 0x5a903
|
||||
Summary Atom = 0x61607
|
||||
Sup Atom = 0x61d03
|
||||
Svg Atom = 0x62003
|
||||
System Atom = 0x62306
|
||||
Tabindex Atom = 0x46308
|
||||
Table Atom = 0x42d05
|
||||
Target Atom = 0x24b06
|
||||
Tbody Atom = 0x2e05
|
||||
Td Atom = 0x4702
|
||||
Template Atom = 0x62608
|
||||
Textarea Atom = 0x2f608
|
||||
Tfoot Atom = 0x8c05
|
||||
Th Atom = 0x22e02
|
||||
Thead Atom = 0x2d405
|
||||
Time Atom = 0xdd04
|
||||
Title Atom = 0xa105
|
||||
Tr Atom = 0x10502
|
||||
Track Atom = 0x10505
|
||||
Translate Atom = 0x14009
|
||||
Tt Atom = 0x5302
|
||||
Type Atom = 0x21404
|
||||
Typemustmatch Atom = 0x2140d
|
||||
U Atom = 0xb01
|
||||
Ul Atom = 0x8a02
|
||||
Usemap Atom = 0x51106
|
||||
Value Atom = 0x4005
|
||||
Var Atom = 0x11503
|
||||
Video Atom = 0x28105
|
||||
Wbr Atom = 0x12103
|
||||
Width Atom = 0x50705
|
||||
Wrap Atom = 0x58704
|
||||
Xmp Atom = 0xc103
|
||||
)
|
||||
|
||||
const hash0 = 0xc17da63e
|
||||
|
||||
const maxAtomLen = 19
|
||||
|
||||
var table = [1 << 9]Atom{
|
||||
0x1: 0x48a0b, // onmousemove
|
||||
0x2: 0x5e209, // onwaiting
|
||||
0x3: 0x1fa13, // onautocompleteerror
|
||||
0x4: 0x5fa06, // prompt
|
||||
0x7: 0x5eb07, // optimum
|
||||
0x8: 0x1604, // mark
|
||||
0xa: 0x5ad07, // itemref
|
||||
0xb: 0x4fe0a, // onpageshow
|
||||
0xc: 0x57a06, // select
|
||||
0xd: 0x17b09, // draggable
|
||||
0xe: 0x3e03, // nav
|
||||
0xf: 0x17507, // command
|
||||
0x11: 0xb01, // u
|
||||
0x14: 0x2d507, // headers
|
||||
0x15: 0x44a08, // datalist
|
||||
0x17: 0x4e04, // samp
|
||||
0x1a: 0x3fb09, // onkeydown
|
||||
0x1b: 0x55f08, // onscroll
|
||||
0x1c: 0x15003, // col
|
||||
0x20: 0x3c908, // itemprop
|
||||
0x21: 0x2780a, // http-equiv
|
||||
0x22: 0x61d03, // sup
|
||||
0x24: 0x1d008, // required
|
||||
0x2b: 0x25e07, // preload
|
||||
0x2c: 0x6040d, // onbeforeprint
|
||||
0x2d: 0x3600b, // ondragenter
|
||||
0x2e: 0x50902, // dt
|
||||
0x2f: 0x5a708, // onsubmit
|
||||
0x30: 0x27002, // hr
|
||||
0x31: 0x32f0d, // oncontextmenu
|
||||
0x33: 0x29c05, // image
|
||||
0x34: 0x50d07, // onpause
|
||||
0x35: 0x25906, // hgroup
|
||||
0x36: 0x7704, // ping
|
||||
0x37: 0x57808, // onselect
|
||||
0x3a: 0x11303, // div
|
||||
0x3b: 0x1fa0e, // onautocomplete
|
||||
0x40: 0x2eb02, // mi
|
||||
0x41: 0x31c08, // seamless
|
||||
0x42: 0x2807, // charset
|
||||
0x43: 0x8502, // id
|
||||
0x44: 0x5200a, // onpopstate
|
||||
0x45: 0x3ef03, // del
|
||||
0x46: 0x2cb07, // marquee
|
||||
0x47: 0x3309, // accesskey
|
||||
0x49: 0x8d06, // footer
|
||||
0x4a: 0x44e04, // list
|
||||
0x4b: 0x2b005, // ismap
|
||||
0x51: 0x33804, // menu
|
||||
0x52: 0x2f04, // body
|
||||
0x55: 0x9a08, // frameset
|
||||
0x56: 0x54a07, // onreset
|
||||
0x57: 0x12705, // blink
|
||||
0x58: 0xa105, // title
|
||||
0x59: 0x38807, // article
|
||||
0x5b: 0x22e02, // th
|
||||
0x5d: 0x13101, // q
|
||||
0x5e: 0x3cf04, // open
|
||||
0x5f: 0x2fa04, // area
|
||||
0x61: 0x44206, // onload
|
||||
0x62: 0xda04, // font
|
||||
0x63: 0xd604, // base
|
||||
0x64: 0x16207, // colspan
|
||||
0x65: 0x53707, // keytype
|
||||
0x66: 0x11e02, // dl
|
||||
0x68: 0x1b008, // fieldset
|
||||
0x6a: 0x2eb03, // min
|
||||
0x6b: 0x11503, // var
|
||||
0x6f: 0x2d506, // header
|
||||
0x70: 0x13f02, // rt
|
||||
0x71: 0x15008, // colgroup
|
||||
0x72: 0x23502, // mn
|
||||
0x74: 0x13a07, // onabort
|
||||
0x75: 0x3906, // keygen
|
||||
0x76: 0x4c209, // onoffline
|
||||
0x77: 0x21f09, // challenge
|
||||
0x78: 0x2b203, // map
|
||||
0x7a: 0x2e902, // h4
|
||||
0x7b: 0x3b607, // onerror
|
||||
0x7c: 0x2e109, // maxlength
|
||||
0x7d: 0x2f505, // mtext
|
||||
0x7e: 0xbb07, // sandbox
|
||||
0x7f: 0x58b06, // onsort
|
||||
0x80: 0x100a, // malignmark
|
||||
0x81: 0x45d04, // meta
|
||||
0x82: 0x7b05, // async
|
||||
0x83: 0x2a702, // h3
|
||||
0x84: 0x26702, // dd
|
||||
0x85: 0x27004, // href
|
||||
0x86: 0x6e0a, // mediagroup
|
||||
0x87: 0x19406, // coords
|
||||
0x88: 0x41107, // srclang
|
||||
0x89: 0x34d0a, // ondblclick
|
||||
0x8a: 0x4005, // value
|
||||
0x8c: 0xe908, // oncancel
|
||||
0x8e: 0x3230a, // spellcheck
|
||||
0x8f: 0x9a05, // frame
|
||||
0x91: 0x12403, // big
|
||||
0x94: 0x1f606, // action
|
||||
0x95: 0x6903, // dir
|
||||
0x97: 0x2fb08, // readonly
|
||||
0x99: 0x42d05, // table
|
||||
0x9a: 0x61607, // summary
|
||||
0x9b: 0x12103, // wbr
|
||||
0x9c: 0x30a, // radiogroup
|
||||
0x9d: 0x6c04, // name
|
||||
0x9f: 0x62306, // system
|
||||
0xa1: 0x15d05, // color
|
||||
0xa2: 0x7f06, // canvas
|
||||
0xa3: 0x25504, // html
|
||||
0xa5: 0x56f09, // onseeking
|
||||
0xac: 0x4f905, // shape
|
||||
0xad: 0x25f03, // rel
|
||||
0xae: 0x28510, // oncanplaythrough
|
||||
0xaf: 0x3760a, // ondragover
|
||||
0xb0: 0x62608, // template
|
||||
0xb1: 0x1d80d, // foreignObject
|
||||
0xb3: 0x9204, // rows
|
||||
0xb6: 0x44e07, // listing
|
||||
0xb7: 0x49c06, // output
|
||||
0xb9: 0x3310b, // contextmenu
|
||||
0xbb: 0x11f03, // low
|
||||
0xbc: 0x1c602, // rp
|
||||
0xbd: 0x5bb09, // onsuspend
|
||||
0xbe: 0x13606, // button
|
||||
0xbf: 0x4db04, // desc
|
||||
0xc1: 0x4e207, // section
|
||||
0xc2: 0x52a0a, // onprogress
|
||||
0xc3: 0x59e09, // onstorage
|
||||
0xc4: 0x2d204, // math
|
||||
0xc5: 0x4503, // alt
|
||||
0xc7: 0x8a02, // ul
|
||||
0xc8: 0x5107, // pattern
|
||||
0xc9: 0x4b60c, // onmousewheel
|
||||
0xca: 0x35709, // ondragend
|
||||
0xcb: 0xaf04, // ruby
|
||||
0xcc: 0xc01, // p
|
||||
0xcd: 0x31707, // onclose
|
||||
0xce: 0x24205, // meter
|
||||
0xcf: 0x11807, // bgsound
|
||||
0xd2: 0x25106, // height
|
||||
0xd4: 0x101, // b
|
||||
0xd5: 0x2c308, // itemtype
|
||||
0xd8: 0x1bb07, // caption
|
||||
0xd9: 0x10c08, // disabled
|
||||
0xdb: 0x33808, // menuitem
|
||||
0xdc: 0x62003, // svg
|
||||
0xdd: 0x18f05, // small
|
||||
0xde: 0x44a04, // data
|
||||
0xe0: 0x4cb08, // ononline
|
||||
0xe1: 0x2a206, // mglyph
|
||||
0xe3: 0x6505, // embed
|
||||
0xe4: 0x10502, // tr
|
||||
0xe5: 0x46b0b, // onloadstart
|
||||
0xe7: 0x3c306, // srcdoc
|
||||
0xeb: 0x5c408, // ontoggle
|
||||
0xed: 0xe703, // bdo
|
||||
0xee: 0x4702, // td
|
||||
0xef: 0x8305, // aside
|
||||
0xf0: 0x29402, // h2
|
||||
0xf1: 0x52c08, // progress
|
||||
0xf2: 0x12c0a, // blockquote
|
||||
0xf4: 0xf005, // label
|
||||
0xf5: 0x601, // i
|
||||
0xf7: 0x9207, // rowspan
|
||||
0xfb: 0x51709, // onplaying
|
||||
0xfd: 0x2a103, // img
|
||||
0xfe: 0xf608, // optgroup
|
||||
0xff: 0x42307, // content
|
||||
0x101: 0x53e0c, // onratechange
|
||||
0x103: 0x3da0c, // onhashchange
|
||||
0x104: 0x4807, // details
|
||||
0x106: 0x40008, // download
|
||||
0x109: 0x14009, // translate
|
||||
0x10b: 0x4230f, // contenteditable
|
||||
0x10d: 0x36b0b, // ondragleave
|
||||
0x10e: 0x2106, // accept
|
||||
0x10f: 0x57a08, // selected
|
||||
0x112: 0x1f20a, // formaction
|
||||
0x113: 0x5b506, // center
|
||||
0x115: 0x45510, // onloadedmetadata
|
||||
0x116: 0x12804, // link
|
||||
0x117: 0xdd04, // time
|
||||
0x118: 0x19f0b, // crossorigin
|
||||
0x119: 0x3bd07, // onfocus
|
||||
0x11a: 0x58704, // wrap
|
||||
0x11b: 0x42204, // icon
|
||||
0x11d: 0x28105, // video
|
||||
0x11e: 0x4de05, // class
|
||||
0x121: 0x5d40e, // onvolumechange
|
||||
0x122: 0xaa06, // onblur
|
||||
0x123: 0x2b909, // itemscope
|
||||
0x124: 0x61105, // style
|
||||
0x127: 0x41e06, // public
|
||||
0x129: 0x2320e, // formnovalidate
|
||||
0x12a: 0x58206, // onshow
|
||||
0x12c: 0x51706, // onplay
|
||||
0x12d: 0x3c804, // cite
|
||||
0x12e: 0x2bc02, // ms
|
||||
0x12f: 0xdb0c, // ontimeupdate
|
||||
0x130: 0x10904, // kind
|
||||
0x131: 0x2470a, // formtarget
|
||||
0x135: 0x3af07, // onended
|
||||
0x136: 0x26506, // hidden
|
||||
0x137: 0x2c01, // s
|
||||
0x139: 0x2280a, // formmethod
|
||||
0x13a: 0x3e805, // input
|
||||
0x13c: 0x50b02, // h6
|
||||
0x13d: 0xc902, // ol
|
||||
0x13e: 0x3420b, // oncuechange
|
||||
0x13f: 0x1e50d, // foreignobject
|
||||
0x143: 0x4e70e, // onbeforeunload
|
||||
0x144: 0x2bd05, // scope
|
||||
0x145: 0x39609, // onemptied
|
||||
0x146: 0x14b05, // defer
|
||||
0x147: 0xc103, // xmp
|
||||
0x148: 0x39f10, // ondurationchange
|
||||
0x149: 0x1903, // kbd
|
||||
0x14c: 0x47609, // onmessage
|
||||
0x14d: 0x60006, // option
|
||||
0x14e: 0x2eb09, // minlength
|
||||
0x14f: 0x32807, // checked
|
||||
0x150: 0xce08, // autoplay
|
||||
0x152: 0x202, // br
|
||||
0x153: 0x2360a, // novalidate
|
||||
0x156: 0x6307, // noembed
|
||||
0x159: 0x31007, // onclick
|
||||
0x15a: 0x47f0b, // onmousedown
|
||||
0x15b: 0x3a708, // onchange
|
||||
0x15e: 0x3f209, // oninvalid
|
||||
0x15f: 0x2bd06, // scoped
|
||||
0x160: 0x18808, // controls
|
||||
0x161: 0x30b05, // muted
|
||||
0x162: 0x58d08, // sortable
|
||||
0x163: 0x51106, // usemap
|
||||
0x164: 0x1b80a, // figcaption
|
||||
0x165: 0x35706, // ondrag
|
||||
0x166: 0x26b04, // high
|
||||
0x168: 0x3c303, // src
|
||||
0x169: 0x15706, // poster
|
||||
0x16b: 0x1670e, // annotation-xml
|
||||
0x16c: 0x5f704, // step
|
||||
0x16d: 0x4, // abbr
|
||||
0x16e: 0x1b06, // dialog
|
||||
0x170: 0x1202, // li
|
||||
0x172: 0x3ed02, // mo
|
||||
0x175: 0x1d803, // for
|
||||
0x176: 0x1a803, // ins
|
||||
0x178: 0x55504, // size
|
||||
0x179: 0x43210, // onlanguagechange
|
||||
0x17a: 0x8607, // default
|
||||
0x17b: 0x1a03, // bdi
|
||||
0x17c: 0x4d30a, // onpagehide
|
||||
0x17d: 0x6907, // dirname
|
||||
0x17e: 0x21404, // type
|
||||
0x17f: 0x1f204, // form
|
||||
0x181: 0x28509, // oncanplay
|
||||
0x182: 0x6103, // dfn
|
||||
0x183: 0x46308, // tabindex
|
||||
0x186: 0x6502, // em
|
||||
0x187: 0x27404, // lang
|
||||
0x189: 0x39108, // dropzone
|
||||
0x18a: 0x4080a, // onkeypress
|
||||
0x18b: 0x23c08, // datetime
|
||||
0x18c: 0x16204, // cols
|
||||
0x18d: 0x1, // a
|
||||
0x18e: 0x4420c, // onloadeddata
|
||||
0x190: 0xa605, // audio
|
||||
0x192: 0x2e05, // tbody
|
||||
0x193: 0x22c06, // method
|
||||
0x195: 0xf404, // loop
|
||||
0x196: 0x29606, // iframe
|
||||
0x198: 0x2d504, // head
|
||||
0x19e: 0x5f108, // manifest
|
||||
0x19f: 0xb309, // autofocus
|
||||
0x1a0: 0x14904, // code
|
||||
0x1a1: 0x55906, // strong
|
||||
0x1a2: 0x30308, // multiple
|
||||
0x1a3: 0xc05, // param
|
||||
0x1a6: 0x21107, // enctype
|
||||
0x1a7: 0x5b304, // face
|
||||
0x1a8: 0xfd09, // plaintext
|
||||
0x1a9: 0x26e02, // h1
|
||||
0x1aa: 0x59509, // onstalled
|
||||
0x1ad: 0x3d406, // script
|
||||
0x1ae: 0x2db06, // spacer
|
||||
0x1af: 0x55108, // onresize
|
||||
0x1b0: 0x4a20b, // onmouseover
|
||||
0x1b1: 0x5cc08, // onunload
|
||||
0x1b2: 0x56708, // onseeked
|
||||
0x1b4: 0x2140d, // typemustmatch
|
||||
0x1b5: 0x1cc06, // figure
|
||||
0x1b6: 0x4950a, // onmouseout
|
||||
0x1b7: 0x25e03, // pre
|
||||
0x1b8: 0x50705, // width
|
||||
0x1b9: 0x19906, // sorted
|
||||
0x1bb: 0x5704, // nobr
|
||||
0x1be: 0x5302, // tt
|
||||
0x1bf: 0x1105, // align
|
||||
0x1c0: 0x3e607, // oninput
|
||||
0x1c3: 0x41807, // onkeyup
|
||||
0x1c6: 0x1c00c, // onafterprint
|
||||
0x1c7: 0x210e, // accept-charset
|
||||
0x1c8: 0x33c06, // itemid
|
||||
0x1c9: 0x3e809, // inputmode
|
||||
0x1cb: 0x53306, // strike
|
||||
0x1cc: 0x5a903, // sub
|
||||
0x1cd: 0x10505, // track
|
||||
0x1ce: 0x38605, // start
|
||||
0x1d0: 0xd608, // basefont
|
||||
0x1d6: 0x1aa06, // source
|
||||
0x1d7: 0x18206, // legend
|
||||
0x1d8: 0x2d405, // thead
|
||||
0x1da: 0x8c05, // tfoot
|
||||
0x1dd: 0x1ec06, // object
|
||||
0x1de: 0x6e05, // media
|
||||
0x1df: 0x1670a, // annotation
|
||||
0x1e0: 0x20d0b, // formenctype
|
||||
0x1e2: 0x3d208, // noscript
|
||||
0x1e4: 0x55505, // sizes
|
||||
0x1e5: 0x1fc0c, // autocomplete
|
||||
0x1e6: 0x9504, // span
|
||||
0x1e7: 0x9808, // noframes
|
||||
0x1e8: 0x24b06, // target
|
||||
0x1e9: 0x38f06, // ondrop
|
||||
0x1ea: 0x2b306, // applet
|
||||
0x1ec: 0x5a08, // reversed
|
||||
0x1f0: 0x2a907, // isindex
|
||||
0x1f3: 0x27008, // hreflang
|
||||
0x1f5: 0x2f302, // h5
|
||||
0x1f6: 0x4f307, // address
|
||||
0x1fa: 0x2e103, // max
|
||||
0x1fb: 0xc30b, // placeholder
|
||||
0x1fc: 0x2f608, // textarea
|
||||
0x1fe: 0x4ad09, // onmouseup
|
||||
0x1ff: 0x3800b, // ondragstart
|
||||
}
|
||||
|
||||
const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" +
|
||||
"genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" +
|
||||
"ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" +
|
||||
"utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" +
|
||||
"labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" +
|
||||
"blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" +
|
||||
"nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" +
|
||||
"originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" +
|
||||
"bjectforeignobjectformactionautocompleteerrorformenctypemust" +
|
||||
"matchallengeformmethodformnovalidatetimeterformtargetheightm" +
|
||||
"lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" +
|
||||
"h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" +
|
||||
"eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" +
|
||||
"utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" +
|
||||
"hangeondblclickondragendondragenterondragleaveondragoverondr" +
|
||||
"agstarticleondropzonemptiedondurationchangeonendedonerroronf" +
|
||||
"ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" +
|
||||
"nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" +
|
||||
"uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" +
|
||||
"rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" +
|
||||
"ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" +
|
||||
"oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" +
|
||||
"teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" +
|
||||
"ollonseekedonseekingonselectedonshowraponsortableonstalledon" +
|
||||
"storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" +
|
||||
"changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" +
|
||||
"mmarysupsvgsystemplate"
|
||||
102
vendor/golang.org/x/net/html/const.go
generated
vendored
102
vendor/golang.org/x/net/html/const.go
generated
vendored
@@ -1,102 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package html
|
||||
|
||||
// Section 12.2.3.2 of the HTML5 specification says "The following elements
|
||||
// have varying levels of special parsing rules".
|
||||
// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
|
||||
var isSpecialElementMap = map[string]bool{
|
||||
"address": true,
|
||||
"applet": true,
|
||||
"area": true,
|
||||
"article": true,
|
||||
"aside": true,
|
||||
"base": true,
|
||||
"basefont": true,
|
||||
"bgsound": true,
|
||||
"blockquote": true,
|
||||
"body": true,
|
||||
"br": true,
|
||||
"button": true,
|
||||
"caption": true,
|
||||
"center": true,
|
||||
"col": true,
|
||||
"colgroup": true,
|
||||
"dd": true,
|
||||
"details": true,
|
||||
"dir": true,
|
||||
"div": true,
|
||||
"dl": true,
|
||||
"dt": true,
|
||||
"embed": true,
|
||||
"fieldset": true,
|
||||
"figcaption": true,
|
||||
"figure": true,
|
||||
"footer": true,
|
||||
"form": true,
|
||||
"frame": true,
|
||||
"frameset": true,
|
||||
"h1": true,
|
||||
"h2": true,
|
||||
"h3": true,
|
||||
"h4": true,
|
||||
"h5": true,
|
||||
"h6": true,
|
||||
"head": true,
|
||||
"header": true,
|
||||
"hgroup": true,
|
||||
"hr": true,
|
||||
"html": true,
|
||||
"iframe": true,
|
||||
"img": true,
|
||||
"input": true,
|
||||
"isindex": true,
|
||||
"li": true,
|
||||
"link": true,
|
||||
"listing": true,
|
||||
"marquee": true,
|
||||
"menu": true,
|
||||
"meta": true,
|
||||
"nav": true,
|
||||
"noembed": true,
|
||||
"noframes": true,
|
||||
"noscript": true,
|
||||
"object": true,
|
||||
"ol": true,
|
||||
"p": true,
|
||||
"param": true,
|
||||
"plaintext": true,
|
||||
"pre": true,
|
||||
"script": true,
|
||||
"section": true,
|
||||
"select": true,
|
||||
"source": true,
|
||||
"style": true,
|
||||
"summary": true,
|
||||
"table": true,
|
||||
"tbody": true,
|
||||
"td": true,
|
||||
"template": true,
|
||||
"textarea": true,
|
||||
"tfoot": true,
|
||||
"th": true,
|
||||
"thead": true,
|
||||
"title": true,
|
||||
"tr": true,
|
||||
"track": true,
|
||||
"ul": true,
|
||||
"wbr": true,
|
||||
"xmp": true,
|
||||
}
|
||||
|
||||
func isSpecialElement(element *Node) bool {
|
||||
switch element.Namespace {
|
||||
case "", "html":
|
||||
return isSpecialElementMap[element.Data]
|
||||
case "svg":
|
||||
return element.Data == "foreignObject"
|
||||
}
|
||||
return false
|
||||
}
|
||||
106
vendor/golang.org/x/net/html/doc.go
generated
vendored
106
vendor/golang.org/x/net/html/doc.go
generated
vendored
@@ -1,106 +0,0 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package html implements an HTML5-compliant tokenizer and parser.
|
||||
|
||||
Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
|
||||
caller's responsibility to ensure that r provides UTF-8 encoded HTML.
|
||||
|
||||
z := html.NewTokenizer(r)
|
||||
|
||||
Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
|
||||
which parses the next token and returns its type, or an error:
|
||||
|
||||
for {
|
||||
tt := z.Next()
|
||||
if tt == html.ErrorToken {
|
||||
// ...
|
||||
return ...
|
||||
}
|
||||
// Process the current token.
|
||||
}
|
||||
|
||||
There are two APIs for retrieving the current token. The high-level API is to
|
||||
call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
|
||||
allow optionally calling Raw after Next but before Token, Text, TagName, or
|
||||
TagAttr. In EBNF notation, the valid call sequence per token is:
|
||||
|
||||
Next {Raw} [ Token | Text | TagName {TagAttr} ]
|
||||
|
||||
Token returns an independent data structure that completely describes a token.
|
||||
Entities (such as "<") are unescaped, tag names and attribute keys are
|
||||
lower-cased, and attributes are collected into a []Attribute. For example:
|
||||
|
||||
for {
|
||||
if z.Next() == html.ErrorToken {
|
||||
// Returning io.EOF indicates success.
|
||||
return z.Err()
|
||||
}
|
||||
emitToken(z.Token())
|
||||
}
|
||||
|
||||
The low-level API performs fewer allocations and copies, but the contents of
|
||||
the []byte values returned by Text, TagName and TagAttr may change on the next
|
||||
call to Next. For example, to extract an HTML page's anchor text:
|
||||
|
||||
depth := 0
|
||||
for {
|
||||
tt := z.Next()
|
||||
switch tt {
|
||||
case ErrorToken:
|
||||
return z.Err()
|
||||
case TextToken:
|
||||
if depth > 0 {
|
||||
// emitBytes should copy the []byte it receives,
|
||||
// if it doesn't process it immediately.
|
||||
emitBytes(z.Text())
|
||||
}
|
||||
case StartTagToken, EndTagToken:
|
||||
tn, _ := z.TagName()
|
||||
if len(tn) == 1 && tn[0] == 'a' {
|
||||
if tt == StartTagToken {
|
||||
depth++
|
||||
} else {
|
||||
depth--
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Parsing is done by calling Parse with an io.Reader, which returns the root of
|
||||
the parse tree (the document element) as a *Node. It is the caller's
|
||||
responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
|
||||
example, to process each anchor node in depth-first order:
|
||||
|
||||
doc, err := html.Parse(r)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
var f func(*html.Node)
|
||||
f = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
// Do something with n...
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
f(c)
|
||||
}
|
||||
}
|
||||
f(doc)
|
||||
|
||||
The relevant specifications include:
|
||||
https://html.spec.whatwg.org/multipage/syntax.html and
|
||||
https://html.spec.whatwg.org/multipage/syntax.html#tokenization
|
||||
*/
|
||||
package html // import "golang.org/x/net/html"
|
||||
|
||||
// The tokenization algorithm implemented by this package is not a line-by-line
|
||||
// transliteration of the relatively verbose state-machine in the WHATWG
|
||||
// specification. A more direct approach is used instead, where the program
|
||||
// counter implies the state, such as whether it is tokenizing a tag or a text
|
||||
// node. Specification compliance is verified by checking expected and actual
|
||||
// outputs over a test suite rather than aiming for algorithmic fidelity.
|
||||
|
||||
// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
|
||||
// TODO(nigeltao): How does parsing interact with a JavaScript engine?
|
||||
156
vendor/golang.org/x/net/html/doctype.go
generated
vendored
156
vendor/golang.org/x/net/html/doctype.go
generated
vendored
@@ -1,156 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package html
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// parseDoctype parses the data from a DoctypeToken into a name,
|
||||
// public identifier, and system identifier. It returns a Node whose Type
|
||||
// is DoctypeNode, whose Data is the name, and which has attributes
|
||||
// named "system" and "public" for the two identifiers if they were present.
|
||||
// quirks is whether the document should be parsed in "quirks mode".
|
||||
func parseDoctype(s string) (n *Node, quirks bool) {
|
||||
n = &Node{Type: DoctypeNode}
|
||||
|
||||
// Find the name.
|
||||
space := strings.IndexAny(s, whitespace)
|
||||
if space == -1 {
|
||||
space = len(s)
|
||||
}
|
||||
n.Data = s[:space]
|
||||
// The comparison to "html" is case-sensitive.
|
||||
if n.Data != "html" {
|
||||
quirks = true
|
||||
}
|
||||
n.Data = strings.ToLower(n.Data)
|
||||
s = strings.TrimLeft(s[space:], whitespace)
|
||||
|
||||
if len(s) < 6 {
|
||||
// It can't start with "PUBLIC" or "SYSTEM".
|
||||
// Ignore the rest of the string.
|
||||
return n, quirks || s != ""
|
||||
}
|
||||
|
||||
key := strings.ToLower(s[:6])
|
||||
s = s[6:]
|
||||
for key == "public" || key == "system" {
|
||||
s = strings.TrimLeft(s, whitespace)
|
||||
if s == "" {
|
||||
break
|
||||
}
|
||||
quote := s[0]
|
||||
if quote != '"' && quote != '\'' {
|
||||
break
|
||||
}
|
||||
s = s[1:]
|
||||
q := strings.IndexRune(s, rune(quote))
|
||||
var id string
|
||||
if q == -1 {
|
||||
id = s
|
||||
s = ""
|
||||
} else {
|
||||
id = s[:q]
|
||||
s = s[q+1:]
|
||||
}
|
||||
n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
|
||||
if key == "public" {
|
||||
key = "system"
|
||||
} else {
|
||||
key = ""
|
||||
}
|
||||
}
|
||||
|
||||
if key != "" || s != "" {
|
||||
quirks = true
|
||||
} else if len(n.Attr) > 0 {
|
||||
if n.Attr[0].Key == "public" {
|
||||
public := strings.ToLower(n.Attr[0].Val)
|
||||
switch public {
|
||||
case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
|
||||
quirks = true
|
||||
default:
|
||||
for _, q := range quirkyIDs {
|
||||
if strings.HasPrefix(public, q) {
|
||||
quirks = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// The following two public IDs only cause quirks mode if there is no system ID.
|
||||
if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
|
||||
strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
|
||||
quirks = true
|
||||
}
|
||||
}
|
||||
if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
|
||||
strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
|
||||
quirks = true
|
||||
}
|
||||
}
|
||||
|
||||
return n, quirks
|
||||
}
|
||||
|
||||
// quirkyIDs is a list of public doctype identifiers that cause a document
|
||||
// to be interpreted in quirks mode. The identifiers should be in lower case.
|
||||
var quirkyIDs = []string{
|
||||
"+//silmaril//dtd html pro v0r11 19970101//",
|
||||
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
|
||||
"-//as//dtd html 3.0 aswedit + extensions//",
|
||||
"-//ietf//dtd html 2.0 level 1//",
|
||||
"-//ietf//dtd html 2.0 level 2//",
|
||||
"-//ietf//dtd html 2.0 strict level 1//",
|
||||
"-//ietf//dtd html 2.0 strict level 2//",
|
||||
"-//ietf//dtd html 2.0 strict//",
|
||||
"-//ietf//dtd html 2.0//",
|
||||
"-//ietf//dtd html 2.1e//",
|
||||
"-//ietf//dtd html 3.0//",
|
||||
"-//ietf//dtd html 3.2 final//",
|
||||
"-//ietf//dtd html 3.2//",
|
||||
"-//ietf//dtd html 3//",
|
||||
"-//ietf//dtd html level 0//",
|
||||
"-//ietf//dtd html level 1//",
|
||||
"-//ietf//dtd html level 2//",
|
||||
"-//ietf//dtd html level 3//",
|
||||
"-//ietf//dtd html strict level 0//",
|
||||
"-//ietf//dtd html strict level 1//",
|
||||
"-//ietf//dtd html strict level 2//",
|
||||
"-//ietf//dtd html strict level 3//",
|
||||
"-//ietf//dtd html strict//",
|
||||
"-//ietf//dtd html//",
|
||||
"-//metrius//dtd metrius presentational//",
|
||||
"-//microsoft//dtd internet explorer 2.0 html strict//",
|
||||
"-//microsoft//dtd internet explorer 2.0 html//",
|
||||
"-//microsoft//dtd internet explorer 2.0 tables//",
|
||||
"-//microsoft//dtd internet explorer 3.0 html strict//",
|
||||
"-//microsoft//dtd internet explorer 3.0 html//",
|
||||
"-//microsoft//dtd internet explorer 3.0 tables//",
|
||||
"-//netscape comm. corp.//dtd html//",
|
||||
"-//netscape comm. corp.//dtd strict html//",
|
||||
"-//o'reilly and associates//dtd html 2.0//",
|
||||
"-//o'reilly and associates//dtd html extended 1.0//",
|
||||
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
|
||||
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
|
||||
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
|
||||
"-//spyglass//dtd html 2.0 extended//",
|
||||
"-//sq//dtd html 2.0 hotmetal + extensions//",
|
||||
"-//sun microsystems corp.//dtd hotjava html//",
|
||||
"-//sun microsystems corp.//dtd hotjava strict html//",
|
||||
"-//w3c//dtd html 3 1995-03-24//",
|
||||
"-//w3c//dtd html 3.2 draft//",
|
||||
"-//w3c//dtd html 3.2 final//",
|
||||
"-//w3c//dtd html 3.2//",
|
||||
"-//w3c//dtd html 3.2s draft//",
|
||||
"-//w3c//dtd html 4.0 frameset//",
|
||||
"-//w3c//dtd html 4.0 transitional//",
|
||||
"-//w3c//dtd html experimental 19960712//",
|
||||
"-//w3c//dtd html experimental 970421//",
|
||||
"-//w3c//dtd w3 html//",
|
||||
"-//w3o//dtd w3 html 3.0//",
|
||||
"-//webtechs//dtd mozilla html 2.0//",
|
||||
"-//webtechs//dtd mozilla html//",
|
||||
}
|
||||
2253
vendor/golang.org/x/net/html/entity.go
generated
vendored
2253
vendor/golang.org/x/net/html/entity.go
generated
vendored
File diff suppressed because it is too large
Load Diff
258
vendor/golang.org/x/net/html/escape.go
generated
vendored
258
vendor/golang.org/x/net/html/escape.go
generated
vendored
@@ -1,258 +0,0 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package html
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// These replacements permit compatibility with old numeric entities that
|
||||
// assumed Windows-1252 encoding.
|
||||
// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
|
||||
var replacementTable = [...]rune{
|
||||
'\u20AC', // First entry is what 0x80 should be replaced with.
|
||||
'\u0081',
|
||||
'\u201A',
|
||||
'\u0192',
|
||||
'\u201E',
|
||||
'\u2026',
|
||||
'\u2020',
|
||||
'\u2021',
|
||||
'\u02C6',
|
||||
'\u2030',
|
||||
'\u0160',
|
||||
'\u2039',
|
||||
'\u0152',
|
||||
'\u008D',
|
||||
'\u017D',
|
||||
'\u008F',
|
||||
'\u0090',
|
||||
'\u2018',
|
||||
'\u2019',
|
||||
'\u201C',
|
||||
'\u201D',
|
||||
'\u2022',
|
||||
'\u2013',
|
||||
'\u2014',
|
||||
'\u02DC',
|
||||
'\u2122',
|
||||
'\u0161',
|
||||
'\u203A',
|
||||
'\u0153',
|
||||
'\u009D',
|
||||
'\u017E',
|
||||
'\u0178', // Last entry is 0x9F.
|
||||
// 0x00->'\uFFFD' is handled programmatically.
|
||||
// 0x0D->'\u000D' is a no-op.
|
||||
}
|
||||
|
||||
// unescapeEntity reads an entity like "<" from b[src:] and writes the
|
||||
// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
|
||||
// Precondition: b[src] == '&' && dst <= src.
|
||||
// attribute should be true if parsing an attribute value.
|
||||
func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
|
||||
// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
|
||||
|
||||
// i starts at 1 because we already know that s[0] == '&'.
|
||||
i, s := 1, b[src:]
|
||||
|
||||
if len(s) <= 1 {
|
||||
b[dst] = b[src]
|
||||
return dst + 1, src + 1
|
||||
}
|
||||
|
||||
if s[i] == '#' {
|
||||
if len(s) <= 3 { // We need to have at least "&#.".
|
||||
b[dst] = b[src]
|
||||
return dst + 1, src + 1
|
||||
}
|
||||
i++
|
||||
c := s[i]
|
||||
hex := false
|
||||
if c == 'x' || c == 'X' {
|
||||
hex = true
|
||||
i++
|
||||
}
|
||||
|
||||
x := '\x00'
|
||||
for i < len(s) {
|
||||
c = s[i]
|
||||
i++
|
||||
if hex {
|
||||
if '0' <= c && c <= '9' {
|
||||
x = 16*x + rune(c) - '0'
|
||||
continue
|
||||
} else if 'a' <= c && c <= 'f' {
|
||||
x = 16*x + rune(c) - 'a' + 10
|
||||
continue
|
||||
} else if 'A' <= c && c <= 'F' {
|
||||
x = 16*x + rune(c) - 'A' + 10
|
||||
continue
|
||||
}
|
||||
} else if '0' <= c && c <= '9' {
|
||||
x = 10*x + rune(c) - '0'
|
||||
continue
|
||||
}
|
||||
if c != ';' {
|
||||
i--
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if i <= 3 { // No characters matched.
|
||||
b[dst] = b[src]
|
||||
return dst + 1, src + 1
|
||||
}
|
||||
|
||||
if 0x80 <= x && x <= 0x9F {
|
||||
// Replace characters from Windows-1252 with UTF-8 equivalents.
|
||||
x = replacementTable[x-0x80]
|
||||
} else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
|
||||
// Replace invalid characters with the replacement character.
|
||||
x = '\uFFFD'
|
||||
}
|
||||
|
||||
return dst + utf8.EncodeRune(b[dst:], x), src + i
|
||||
}
|
||||
|
||||
// Consume the maximum number of characters possible, with the
|
||||
// consumed characters matching one of the named references.
|
||||
|
||||
for i < len(s) {
|
||||
c := s[i]
|
||||
i++
|
||||
// Lower-cased characters are more common in entities, so we check for them first.
|
||||
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
|
||||
continue
|
||||
}
|
||||
if c != ';' {
|
||||
i--
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
entityName := string(s[1:i])
|
||||
if entityName == "" {
|
||||
// No-op.
|
||||
} else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
|
||||
// No-op.
|
||||
} else if x := entity[entityName]; x != 0 {
|
||||
return dst + utf8.EncodeRune(b[dst:], x), src + i
|
||||
} else if x := entity2[entityName]; x[0] != 0 {
|
||||
dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
|
||||
return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
|
||||
} else if !attribute {
|
||||
maxLen := len(entityName) - 1
|
||||
if maxLen > longestEntityWithoutSemicolon {
|
||||
maxLen = longestEntityWithoutSemicolon
|
||||
}
|
||||
for j := maxLen; j > 1; j-- {
|
||||
if x := entity[entityName[:j]]; x != 0 {
|
||||
return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dst1, src1 = dst+i, src+i
|
||||
copy(b[dst:dst1], b[src:src1])
|
||||
return dst1, src1
|
||||
}
|
||||
|
||||
// unescape unescapes b's entities in-place, so that "a<b" becomes "a<b".
|
||||
// attribute should be true if parsing an attribute value.
|
||||
func unescape(b []byte, attribute bool) []byte {
|
||||
for i, c := range b {
|
||||
if c == '&' {
|
||||
dst, src := unescapeEntity(b, i, i, attribute)
|
||||
for src < len(b) {
|
||||
c := b[src]
|
||||
if c == '&' {
|
||||
dst, src = unescapeEntity(b, dst, src, attribute)
|
||||
} else {
|
||||
b[dst] = c
|
||||
dst, src = dst+1, src+1
|
||||
}
|
||||
}
|
||||
return b[0:dst]
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
|
||||
func lower(b []byte) []byte {
|
||||
for i, c := range b {
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
b[i] = c + 'a' - 'A'
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
const escapedChars = "&'<>\"\r"
|
||||
|
||||
func escape(w writer, s string) error {
|
||||
i := strings.IndexAny(s, escapedChars)
|
||||
for i != -1 {
|
||||
if _, err := w.WriteString(s[:i]); err != nil {
|
||||
return err
|
||||
}
|
||||
var esc string
|
||||
switch s[i] {
|
||||
case '&':
|
||||
esc = "&"
|
||||
case '\'':
|
||||
// "'" is shorter than "'" and apos was not in HTML until HTML5.
|
||||
esc = "'"
|
||||
case '<':
|
||||
esc = "<"
|
||||
case '>':
|
||||
esc = ">"
|
||||
case '"':
|
||||
// """ is shorter than """.
|
||||
esc = """
|
||||
case '\r':
|
||||
esc = " "
|
||||
default:
|
||||
panic("unrecognized escape character")
|
||||
}
|
||||
s = s[i+1:]
|
||||
if _, err := w.WriteString(esc); err != nil {
|
||||
return err
|
||||
}
|
||||
i = strings.IndexAny(s, escapedChars)
|
||||
}
|
||||
_, err := w.WriteString(s)
|
||||
return err
|
||||
}
|
||||
|
||||
// EscapeString escapes special characters like "<" to become "<". It
|
||||
// escapes only five such characters: <, >, &, ' and ".
|
||||
// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
|
||||
// always true.
|
||||
func EscapeString(s string) string {
|
||||
if strings.IndexAny(s, escapedChars) == -1 {
|
||||
return s
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
escape(&buf, s)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// UnescapeString unescapes entities like "<" to become "<". It unescapes a
|
||||
// larger range of entities than EscapeString escapes. For example, "á"
|
||||
// unescapes to "á", as does "á" and "&xE1;".
|
||||
// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
|
||||
// always true.
|
||||
func UnescapeString(s string) string {
|
||||
for _, c := range s {
|
||||
if c == '&' {
|
||||
return string(unescape([]byte(s), false))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
226
vendor/golang.org/x/net/html/foreign.go
generated
vendored
226
vendor/golang.org/x/net/html/foreign.go
generated
vendored
@@ -1,226 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package html
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
|
||||
for i := range aa {
|
||||
if newName, ok := nameMap[aa[i].Key]; ok {
|
||||
aa[i].Key = newName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func adjustForeignAttributes(aa []Attribute) {
|
||||
for i, a := range aa {
|
||||
if a.Key == "" || a.Key[0] != 'x' {
|
||||
continue
|
||||
}
|
||||
switch a.Key {
|
||||
case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
|
||||
"xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
|
||||
j := strings.Index(a.Key, ":")
|
||||
aa[i].Namespace = a.Key[:j]
|
||||
aa[i].Key = a.Key[j+1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func htmlIntegrationPoint(n *Node) bool {
|
||||
if n.Type != ElementNode {
|
||||
return false
|
||||
}
|
||||
switch n.Namespace {
|
||||
case "math":
|
||||
if n.Data == "annotation-xml" {
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "encoding" {
|
||||
val := strings.ToLower(a.Val)
|
||||
if val == "text/html" || val == "application/xhtml+xml" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case "svg":
|
||||
switch n.Data {
|
||||
case "desc", "foreignObject", "title":
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func mathMLTextIntegrationPoint(n *Node) bool {
|
||||
if n.Namespace != "math" {
|
||||
return false
|
||||
}
|
||||
switch n.Data {
|
||||
case "mi", "mo", "mn", "ms", "mtext":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Section 12.2.5.5.
|
||||
var breakout = map[string]bool{
|
||||
"b": true,
|
||||
"big": true,
|
||||
"blockquote": true,
|
||||
"body": true,
|
||||
"br": true,
|
||||
"center": true,
|
||||
"code": true,
|
||||
"dd": true,
|
||||
"div": true,
|
||||
"dl": true,
|
||||
"dt": true,
|
||||
"em": true,
|
||||
"embed": true,
|
||||
"h1": true,
|
||||
"h2": true,
|
||||
"h3": true,
|
||||
"h4": true,
|
||||
"h5": true,
|
||||
"h6": true,
|
||||
"head": true,
|
||||
"hr": true,
|
||||
"i": true,
|
||||
"img": true,
|
||||
"li": true,
|
||||
"listing": true,
|
||||
"menu": true,
|
||||
"meta": true,
|
||||
"nobr": true,
|
||||
"ol": true,
|
||||
"p": true,
|
||||
"pre": true,
|
||||
"ruby": true,
|
||||
"s": true,
|
||||
"small": true,
|
||||
"span": true,
|
||||
"strong": true,
|
||||
"strike": true,
|
||||
"sub": true,
|
||||
"sup": true,
|
||||
"table": true,
|
||||
"tt": true,
|
||||
"u": true,
|
||||
"ul": true,
|
||||
"var": true,
|
||||
}
|
||||
|
||||
// Section 12.2.5.5.
|
||||
var svgTagNameAdjustments = map[string]string{
|
||||
"altglyph": "altGlyph",
|
||||
"altglyphdef": "altGlyphDef",
|
||||
"altglyphitem": "altGlyphItem",
|
||||
"animatecolor": "animateColor",
|
||||
"animatemotion": "animateMotion",
|
||||
"animatetransform": "animateTransform",
|
||||
"clippath": "clipPath",
|
||||
"feblend": "feBlend",
|
||||
"fecolormatrix": "feColorMatrix",
|
||||
"fecomponenttransfer": "feComponentTransfer",
|
||||
"fecomposite": "feComposite",
|
||||
"feconvolvematrix": "feConvolveMatrix",
|
||||
"fediffuselighting": "feDiffuseLighting",
|
||||
"fedisplacementmap": "feDisplacementMap",
|
||||
"fedistantlight": "feDistantLight",
|
||||
"feflood": "feFlood",
|
||||
"fefunca": "feFuncA",
|
||||
"fefuncb": "feFuncB",
|
||||
"fefuncg": "feFuncG",
|
||||
"fefuncr": "feFuncR",
|
||||
"fegaussianblur": "feGaussianBlur",
|
||||
"feimage": "feImage",
|
||||
"femerge": "feMerge",
|
||||
"femergenode": "feMergeNode",
|
||||
"femorphology": "feMorphology",
|
||||
"feoffset": "feOffset",
|
||||
"fepointlight": "fePointLight",
|
||||
"fespecularlighting": "feSpecularLighting",
|
||||
"fespotlight": "feSpotLight",
|
||||
"fetile": "feTile",
|
||||
"feturbulence": "feTurbulence",
|
||||
"foreignobject": "foreignObject",
|
||||
"glyphref": "glyphRef",
|
||||
"lineargradient": "linearGradient",
|
||||
"radialgradient": "radialGradient",
|
||||
"textpath": "textPath",
|
||||
}
|
||||
|
||||
// Section 12.2.5.1
|
||||
var mathMLAttributeAdjustments = map[string]string{
|
||||
"definitionurl": "definitionURL",
|
||||
}
|
||||
|
||||
var svgAttributeAdjustments = map[string]string{
|
||||
"attributename": "attributeName",
|
||||
"attributetype": "attributeType",
|
||||
"basefrequency": "baseFrequency",
|
||||
"baseprofile": "baseProfile",
|
||||
"calcmode": "calcMode",
|
||||
"clippathunits": "clipPathUnits",
|
||||
"contentscripttype": "contentScriptType",
|
||||
"contentstyletype": "contentStyleType",
|
||||
"diffuseconstant": "diffuseConstant",
|
||||
"edgemode": "edgeMode",
|
||||
"externalresourcesrequired": "externalResourcesRequired",
|
||||
"filterres": "filterRes",
|
||||
"filterunits": "filterUnits",
|
||||
"glyphref": "glyphRef",
|
||||
"gradienttransform": "gradientTransform",
|
||||
"gradientunits": "gradientUnits",
|
||||
"kernelmatrix": "kernelMatrix",
|
||||
"kernelunitlength": "kernelUnitLength",
|
||||
"keypoints": "keyPoints",
|
||||
"keysplines": "keySplines",
|
||||
"keytimes": "keyTimes",
|
||||
"lengthadjust": "lengthAdjust",
|
||||
"limitingconeangle": "limitingConeAngle",
|
||||
"markerheight": "markerHeight",
|
||||
"markerunits": "markerUnits",
|
||||
"markerwidth": "markerWidth",
|
||||
"maskcontentunits": "maskContentUnits",
|
||||
"maskunits": "maskUnits",
|
||||
"numoctaves": "numOctaves",
|
||||
"pathlength": "pathLength",
|
||||
"patterncontentunits": "patternContentUnits",
|
||||
"patterntransform": "patternTransform",
|
||||
"patternunits": "patternUnits",
|
||||
"pointsatx": "pointsAtX",
|
||||
"pointsaty": "pointsAtY",
|
||||
"pointsatz": "pointsAtZ",
|
||||
"preservealpha": "preserveAlpha",
|
||||
"preserveaspectratio": "preserveAspectRatio",
|
||||
"primitiveunits": "primitiveUnits",
|
||||
"refx": "refX",
|
||||
"refy": "refY",
|
||||
"repeatcount": "repeatCount",
|
||||
"repeatdur": "repeatDur",
|
||||
"requiredextensions": "requiredExtensions",
|
||||
"requiredfeatures": "requiredFeatures",
|
||||
"specularconstant": "specularConstant",
|
||||
"specularexponent": "specularExponent",
|
||||
"spreadmethod": "spreadMethod",
|
||||
"startoffset": "startOffset",
|
||||
"stddeviation": "stdDeviation",
|
||||
"stitchtiles": "stitchTiles",
|
||||
"surfacescale": "surfaceScale",
|
||||
"systemlanguage": "systemLanguage",
|
||||
"tablevalues": "tableValues",
|
||||
"targetx": "targetX",
|
||||
"targety": "targetY",
|
||||
"textlength": "textLength",
|
||||
"viewbox": "viewBox",
|
||||
"viewtarget": "viewTarget",
|
||||
"xchannelselector": "xChannelSelector",
|
||||
"ychannelselector": "yChannelSelector",
|
||||
"zoomandpan": "zoomAndPan",
|
||||
}
|
||||
193
vendor/golang.org/x/net/html/node.go
generated
vendored
193
vendor/golang.org/x/net/html/node.go
generated
vendored
@@ -1,193 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package html
|
||||
|
||||
import (
|
||||
"golang.org/x/net/html/atom"
|
||||
)
|
||||
|
||||
// A NodeType is the type of a Node.
|
||||
type NodeType uint32
|
||||
|
||||
const (
|
||||
ErrorNode NodeType = iota
|
||||
TextNode
|
||||
DocumentNode
|
||||
ElementNode
|
||||
CommentNode
|
||||
DoctypeNode
|
||||
scopeMarkerNode
|
||||
)
|
||||
|
||||
// Section 12.2.3.3 says "scope markers are inserted when entering applet
|
||||
// elements, buttons, object elements, marquees, table cells, and table
|
||||
// captions, and are used to prevent formatting from 'leaking'".
|
||||
var scopeMarker = Node{Type: scopeMarkerNode}
|
||||
|
||||
// A Node consists of a NodeType and some Data (tag name for element nodes,
|
||||
// content for text) and are part of a tree of Nodes. Element nodes may also
|
||||
// have a Namespace and contain a slice of Attributes. Data is unescaped, so
|
||||
// that it looks like "a<b" rather than "a<b". For element nodes, DataAtom
|
||||
// is the atom for Data, or zero if Data is not a known tag name.
|
||||
//
|
||||
// An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
|
||||
// Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
|
||||
// "svg" is short for "http://www.w3.org/2000/svg".
|
||||
type Node struct {
|
||||
Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
|
||||
|
||||
Type NodeType
|
||||
DataAtom atom.Atom
|
||||
Data string
|
||||
Namespace string
|
||||
Attr []Attribute
|
||||
}
|
||||
|
||||
// InsertBefore inserts newChild as a child of n, immediately before oldChild
|
||||
// in the sequence of n's children. oldChild may be nil, in which case newChild
|
||||
// is appended to the end of n's children.
|
||||
//
|
||||
// It will panic if newChild already has a parent or siblings.
|
||||
func (n *Node) InsertBefore(newChild, oldChild *Node) {
|
||||
if newChild.Parent != nil || newChild.PrevSibling != nil || newChild.NextSibling != nil {
|
||||
panic("html: InsertBefore called for an attached child Node")
|
||||
}
|
||||
var prev, next *Node
|
||||
if oldChild != nil {
|
||||
prev, next = oldChild.PrevSibling, oldChild
|
||||
} else {
|
||||
prev = n.LastChild
|
||||
}
|
||||
if prev != nil {
|
||||
prev.NextSibling = newChild
|
||||
} else {
|
||||
n.FirstChild = newChild
|
||||
}
|
||||
if next != nil {
|
||||
next.PrevSibling = newChild
|
||||
} else {
|
||||
n.LastChild = newChild
|
||||
}
|
||||
newChild.Parent = n
|
||||
newChild.PrevSibling = prev
|
||||
newChild.NextSibling = next
|
||||
}
|
||||
|
||||
// AppendChild adds a node c as a child of n.
|
||||
//
|
||||
// It will panic if c already has a parent or siblings.
|
||||
func (n *Node) AppendChild(c *Node) {
|
||||
if c.Parent != nil || c.PrevSibling != nil || c.NextSibling != nil {
|
||||
panic("html: AppendChild called for an attached child Node")
|
||||
}
|
||||
last := n.LastChild
|
||||
if last != nil {
|
||||
last.NextSibling = c
|
||||
} else {
|
||||
n.FirstChild = c
|
||||
}
|
||||
n.LastChild = c
|
||||
c.Parent = n
|
||||
c.PrevSibling = last
|
||||
}
|
||||
|
||||
// RemoveChild removes a node c that is a child of n. Afterwards, c will have
|
||||
// no parent and no siblings.
|
||||
//
|
||||
// It will panic if c's parent is not n.
|
||||
func (n *Node) RemoveChild(c *Node) {
|
||||
if c.Parent != n {
|
||||
panic("html: RemoveChild called for a non-child Node")
|
||||
}
|
||||
if n.FirstChild == c {
|
||||
n.FirstChild = c.NextSibling
|
||||
}
|
||||
if c.NextSibling != nil {
|
||||
c.NextSibling.PrevSibling = c.PrevSibling
|
||||
}
|
||||
if n.LastChild == c {
|
||||
n.LastChild = c.PrevSibling
|
||||
}
|
||||
if c.PrevSibling != nil {
|
||||
c.PrevSibling.NextSibling = c.NextSibling
|
||||
}
|
||||
c.Parent = nil
|
||||
c.PrevSibling = nil
|
||||
c.NextSibling = nil
|
||||
}
|
||||
|
||||
// reparentChildren reparents all of src's child nodes to dst.
|
||||
func reparentChildren(dst, src *Node) {
|
||||
for {
|
||||
child := src.FirstChild
|
||||
if child == nil {
|
||||
break
|
||||
}
|
||||
src.RemoveChild(child)
|
||||
dst.AppendChild(child)
|
||||
}
|
||||
}
|
||||
|
||||
// clone returns a new node with the same type, data and attributes.
|
||||
// The clone has no parent, no siblings and no children.
|
||||
func (n *Node) clone() *Node {
|
||||
m := &Node{
|
||||
Type: n.Type,
|
||||
DataAtom: n.DataAtom,
|
||||
Data: n.Data,
|
||||
Attr: make([]Attribute, len(n.Attr)),
|
||||
}
|
||||
copy(m.Attr, n.Attr)
|
||||
return m
|
||||
}
|
||||
|
||||
// nodeStack is a stack of nodes.
|
||||
type nodeStack []*Node
|
||||
|
||||
// pop pops the stack. It will panic if s is empty.
|
||||
func (s *nodeStack) pop() *Node {
|
||||
i := len(*s)
|
||||
n := (*s)[i-1]
|
||||
*s = (*s)[:i-1]
|
||||
return n
|
||||
}
|
||||
|
||||
// top returns the most recently pushed node, or nil if s is empty.
|
||||
func (s *nodeStack) top() *Node {
|
||||
if i := len(*s); i > 0 {
|
||||
return (*s)[i-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// index returns the index of the top-most occurrence of n in the stack, or -1
|
||||
// if n is not present.
|
||||
func (s *nodeStack) index(n *Node) int {
|
||||
for i := len(*s) - 1; i >= 0; i-- {
|
||||
if (*s)[i] == n {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// insert inserts a node at the given index.
|
||||
func (s *nodeStack) insert(i int, n *Node) {
|
||||
(*s) = append(*s, nil)
|
||||
copy((*s)[i+1:], (*s)[i:])
|
||||
(*s)[i] = n
|
||||
}
|
||||
|
||||
// remove removes a node from the stack. It is a no-op if n is not present.
|
||||
func (s *nodeStack) remove(n *Node) {
|
||||
i := s.index(n)
|
||||
if i == -1 {
|
||||
return
|
||||
}
|
||||
copy((*s)[i:], (*s)[i+1:])
|
||||
j := len(*s) - 1
|
||||
(*s)[j] = nil
|
||||
*s = (*s)[:j]
|
||||
}
|
||||
2094
vendor/golang.org/x/net/html/parse.go
generated
vendored
2094
vendor/golang.org/x/net/html/parse.go
generated
vendored
File diff suppressed because it is too large
Load Diff
271
vendor/golang.org/x/net/html/render.go
generated
vendored
271
vendor/golang.org/x/net/html/render.go
generated
vendored
@@ -1,271 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package html
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
io.ByteWriter
|
||||
WriteString(string) (int, error)
|
||||
}
|
||||
|
||||
// Render renders the parse tree n to the given writer.
|
||||
//
|
||||
// Rendering is done on a 'best effort' basis: calling Parse on the output of
|
||||
// Render will always result in something similar to the original tree, but it
|
||||
// is not necessarily an exact clone unless the original tree was 'well-formed'.
|
||||
// 'Well-formed' is not easily specified; the HTML5 specification is
|
||||
// complicated.
|
||||
//
|
||||
// Calling Parse on arbitrary input typically results in a 'well-formed' parse
|
||||
// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.
|
||||
// For example, in a 'well-formed' parse tree, no <a> element is a child of
|
||||
// another <a> element: parsing "<a><a>" results in two sibling elements.
|
||||
// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a
|
||||
// <table> element: parsing "<p><table><a>" results in a <p> with two sibling
|
||||
// children; the <a> is reparented to the <table>'s parent. However, calling
|
||||
// Parse on "<a><table><a>" does not return an error, but the result has an <a>
|
||||
// element with an <a> child, and is therefore not 'well-formed'.
|
||||
//
|
||||
// Programmatically constructed trees are typically also 'well-formed', but it
|
||||
// is possible to construct a tree that looks innocuous but, when rendered and
|
||||
// re-parsed, results in a different tree. A simple example is that a solitary
|
||||
// text node would become a tree containing <html>, <head> and <body> elements.
|
||||
// Another example is that the programmatic equivalent of "a<head>b</head>c"
|
||||
// becomes "<html><head><head/><body>abc</body></html>".
|
||||
func Render(w io.Writer, n *Node) error {
|
||||
if x, ok := w.(writer); ok {
|
||||
return render(x, n)
|
||||
}
|
||||
buf := bufio.NewWriter(w)
|
||||
if err := render(buf, n); err != nil {
|
||||
return err
|
||||
}
|
||||
return buf.Flush()
|
||||
}
|
||||
|
||||
// plaintextAbort is returned from render1 when a <plaintext> element
|
||||
// has been rendered. No more end tags should be rendered after that.
|
||||
var plaintextAbort = errors.New("html: internal error (plaintext abort)")
|
||||
|
||||
func render(w writer, n *Node) error {
|
||||
err := render1(w, n)
|
||||
if err == plaintextAbort {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func render1(w writer, n *Node) error {
|
||||
// Render non-element nodes; these are the easy cases.
|
||||
switch n.Type {
|
||||
case ErrorNode:
|
||||
return errors.New("html: cannot render an ErrorNode node")
|
||||
case TextNode:
|
||||
return escape(w, n.Data)
|
||||
case DocumentNode:
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
if err := render1(w, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case ElementNode:
|
||||
// No-op.
|
||||
case CommentNode:
|
||||
if _, err := w.WriteString("<!--"); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.WriteString(n.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.WriteString("-->"); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case DoctypeNode:
|
||||
if _, err := w.WriteString("<!DOCTYPE "); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.WriteString(n.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
if n.Attr != nil {
|
||||
var p, s string
|
||||
for _, a := range n.Attr {
|
||||
switch a.Key {
|
||||
case "public":
|
||||
p = a.Val
|
||||
case "system":
|
||||
s = a.Val
|
||||
}
|
||||
}
|
||||
if p != "" {
|
||||
if _, err := w.WriteString(" PUBLIC "); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeQuoted(w, p); err != nil {
|
||||
return err
|
||||
}
|
||||
if s != "" {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeQuoted(w, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if s != "" {
|
||||
if _, err := w.WriteString(" SYSTEM "); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeQuoted(w, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return w.WriteByte('>')
|
||||
default:
|
||||
return errors.New("html: unknown node type")
|
||||
}
|
||||
|
||||
// Render the <xxx> opening tag.
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.WriteString(n.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, a := range n.Attr {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
if a.Namespace != "" {
|
||||
if _, err := w.WriteString(a.Namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := w.WriteString(a.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.WriteString(`="`); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := escape(w, a.Val); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('"'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if voidElements[n.Data] {
|
||||
if n.FirstChild != nil {
|
||||
return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
|
||||
}
|
||||
_, err := w.WriteString("/>")
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add initial newline where there is danger of a newline beging ignored.
|
||||
if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
|
||||
switch n.Data {
|
||||
case "pre", "listing", "textarea":
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Render any child nodes.
|
||||
switch n.Data {
|
||||
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
if c.Type == TextNode {
|
||||
if _, err := w.WriteString(c.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := render1(w, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if n.Data == "plaintext" {
|
||||
// Don't render anything else. <plaintext> must be the
|
||||
// last element in the file, with no closing tag.
|
||||
return plaintextAbort
|
||||
}
|
||||
default:
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
if err := render1(w, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Render the </xxx> closing tag.
|
||||
if _, err := w.WriteString("</"); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.WriteString(n.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
return w.WriteByte('>')
|
||||
}
|
||||
|
||||
// writeQuoted writes s to w surrounded by quotes. Normally it will use double
|
||||
// quotes, but if s contains a double quote, it will use single quotes.
|
||||
// It is used for writing the identifiers in a doctype declaration.
|
||||
// In valid HTML, they can't contain both types of quotes.
|
||||
func writeQuoted(w writer, s string) error {
|
||||
var q byte = '"'
|
||||
if strings.Contains(s, `"`) {
|
||||
q = '\''
|
||||
}
|
||||
if err := w.WriteByte(q); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.WriteString(s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte(q); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Section 12.1.2, "Elements", gives this list of void elements. Void elements
|
||||
// are those that can't have any contents.
|
||||
var voidElements = map[string]bool{
|
||||
"area": true,
|
||||
"base": true,
|
||||
"br": true,
|
||||
"col": true,
|
||||
"command": true,
|
||||
"embed": true,
|
||||
"hr": true,
|
||||
"img": true,
|
||||
"input": true,
|
||||
"keygen": true,
|
||||
"link": true,
|
||||
"meta": true,
|
||||
"param": true,
|
||||
"source": true,
|
||||
"track": true,
|
||||
"wbr": true,
|
||||
}
|
||||
1219
vendor/golang.org/x/net/html/token.go
generated
vendored
1219
vendor/golang.org/x/net/html/token.go
generated
vendored
File diff suppressed because it is too large
Load Diff
106
vendor/golang.org/x/net/websocket/client.go
generated
vendored
106
vendor/golang.org/x/net/websocket/client.go
generated
vendored
@@ -1,106 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// DialError is an error that occurs while dialling a websocket server.
|
||||
type DialError struct {
|
||||
*Config
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *DialError) Error() string {
|
||||
return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
// NewConfig creates a new WebSocket config for client connection.
|
||||
func NewConfig(server, origin string) (config *Config, err error) {
|
||||
config = new(Config)
|
||||
config.Version = ProtocolVersionHybi13
|
||||
config.Location, err = url.ParseRequestURI(server)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
config.Origin, err = url.ParseRequestURI(origin)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
config.Header = http.Header(make(map[string][]string))
|
||||
return
|
||||
}
|
||||
|
||||
// NewClient creates a new WebSocket client connection over rwc.
|
||||
func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {
|
||||
br := bufio.NewReader(rwc)
|
||||
bw := bufio.NewWriter(rwc)
|
||||
err = hybiClientHandshake(config, br, bw)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
buf := bufio.NewReadWriter(br, bw)
|
||||
ws = newHybiClientConn(config, buf, rwc)
|
||||
return
|
||||
}
|
||||
|
||||
// Dial opens a new client connection to a WebSocket.
|
||||
func Dial(url_, protocol, origin string) (ws *Conn, err error) {
|
||||
config, err := NewConfig(url_, origin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if protocol != "" {
|
||||
config.Protocol = []string{protocol}
|
||||
}
|
||||
return DialConfig(config)
|
||||
}
|
||||
|
||||
var portMap = map[string]string{
|
||||
"ws": "80",
|
||||
"wss": "443",
|
||||
}
|
||||
|
||||
func parseAuthority(location *url.URL) string {
|
||||
if _, ok := portMap[location.Scheme]; ok {
|
||||
if _, _, err := net.SplitHostPort(location.Host); err != nil {
|
||||
return net.JoinHostPort(location.Host, portMap[location.Scheme])
|
||||
}
|
||||
}
|
||||
return location.Host
|
||||
}
|
||||
|
||||
// DialConfig opens a new client connection to a WebSocket with a config.
|
||||
func DialConfig(config *Config) (ws *Conn, err error) {
|
||||
var client net.Conn
|
||||
if config.Location == nil {
|
||||
return nil, &DialError{config, ErrBadWebSocketLocation}
|
||||
}
|
||||
if config.Origin == nil {
|
||||
return nil, &DialError{config, ErrBadWebSocketOrigin}
|
||||
}
|
||||
dialer := config.Dialer
|
||||
if dialer == nil {
|
||||
dialer = &net.Dialer{}
|
||||
}
|
||||
client, err = dialWithDialer(dialer, config)
|
||||
if err != nil {
|
||||
goto Error
|
||||
}
|
||||
ws, err = NewClient(config, client)
|
||||
if err != nil {
|
||||
client.Close()
|
||||
goto Error
|
||||
}
|
||||
return
|
||||
|
||||
Error:
|
||||
return nil, &DialError{config, err}
|
||||
}
|
||||
24
vendor/golang.org/x/net/websocket/dial.go
generated
vendored
24
vendor/golang.org/x/net/websocket/dial.go
generated
vendored
@@ -1,24 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
)
|
||||
|
||||
func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) {
|
||||
switch config.Location.Scheme {
|
||||
case "ws":
|
||||
conn, err = dialer.Dial("tcp", parseAuthority(config.Location))
|
||||
|
||||
case "wss":
|
||||
conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig)
|
||||
|
||||
default:
|
||||
err = ErrBadScheme
|
||||
}
|
||||
return
|
||||
}
|
||||
583
vendor/golang.org/x/net/websocket/hybi.go
generated
vendored
583
vendor/golang.org/x/net/websocket/hybi.go
generated
vendored
@@ -1,583 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
// This file implements a protocol of hybi draft.
|
||||
// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
|
||||
|
||||
closeStatusNormal = 1000
|
||||
closeStatusGoingAway = 1001
|
||||
closeStatusProtocolError = 1002
|
||||
closeStatusUnsupportedData = 1003
|
||||
closeStatusFrameTooLarge = 1004
|
||||
closeStatusNoStatusRcvd = 1005
|
||||
closeStatusAbnormalClosure = 1006
|
||||
closeStatusBadMessageData = 1007
|
||||
closeStatusPolicyViolation = 1008
|
||||
closeStatusTooBigData = 1009
|
||||
closeStatusExtensionMismatch = 1010
|
||||
|
||||
maxControlFramePayloadLength = 125
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBadMaskingKey = &ProtocolError{"bad masking key"}
|
||||
ErrBadPongMessage = &ProtocolError{"bad pong message"}
|
||||
ErrBadClosingStatus = &ProtocolError{"bad closing status"}
|
||||
ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"}
|
||||
ErrNotImplemented = &ProtocolError{"not implemented"}
|
||||
|
||||
handshakeHeader = map[string]bool{
|
||||
"Host": true,
|
||||
"Upgrade": true,
|
||||
"Connection": true,
|
||||
"Sec-Websocket-Key": true,
|
||||
"Sec-Websocket-Origin": true,
|
||||
"Sec-Websocket-Version": true,
|
||||
"Sec-Websocket-Protocol": true,
|
||||
"Sec-Websocket-Accept": true,
|
||||
}
|
||||
)
|
||||
|
||||
// A hybiFrameHeader is a frame header as defined in hybi draft.
|
||||
type hybiFrameHeader struct {
|
||||
Fin bool
|
||||
Rsv [3]bool
|
||||
OpCode byte
|
||||
Length int64
|
||||
MaskingKey []byte
|
||||
|
||||
data *bytes.Buffer
|
||||
}
|
||||
|
||||
// A hybiFrameReader is a reader for hybi frame.
|
||||
type hybiFrameReader struct {
|
||||
reader io.Reader
|
||||
|
||||
header hybiFrameHeader
|
||||
pos int64
|
||||
length int
|
||||
}
|
||||
|
||||
func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) {
|
||||
n, err = frame.reader.Read(msg)
|
||||
if frame.header.MaskingKey != nil {
|
||||
for i := 0; i < n; i++ {
|
||||
msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4]
|
||||
frame.pos++
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode }
|
||||
|
||||
func (frame *hybiFrameReader) HeaderReader() io.Reader {
|
||||
if frame.header.data == nil {
|
||||
return nil
|
||||
}
|
||||
if frame.header.data.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
return frame.header.data
|
||||
}
|
||||
|
||||
func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil }
|
||||
|
||||
func (frame *hybiFrameReader) Len() (n int) { return frame.length }
|
||||
|
||||
// A hybiFrameReaderFactory creates new frame reader based on its frame type.
|
||||
type hybiFrameReaderFactory struct {
|
||||
*bufio.Reader
|
||||
}
|
||||
|
||||
// NewFrameReader reads a frame header from the connection, and creates new reader for the frame.
|
||||
// See Section 5.2 Base Framing protocol for detail.
|
||||
// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2
|
||||
func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) {
|
||||
hybiFrame := new(hybiFrameReader)
|
||||
frame = hybiFrame
|
||||
var header []byte
|
||||
var b byte
|
||||
// First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits)
|
||||
b, err = buf.ReadByte()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
header = append(header, b)
|
||||
hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0
|
||||
for i := 0; i < 3; i++ {
|
||||
j := uint(6 - i)
|
||||
hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0
|
||||
}
|
||||
hybiFrame.header.OpCode = header[0] & 0x0f
|
||||
|
||||
// Second byte. Mask/Payload len(7bits)
|
||||
b, err = buf.ReadByte()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
header = append(header, b)
|
||||
mask := (b & 0x80) != 0
|
||||
b &= 0x7f
|
||||
lengthFields := 0
|
||||
switch {
|
||||
case b <= 125: // Payload length 7bits.
|
||||
hybiFrame.header.Length = int64(b)
|
||||
case b == 126: // Payload length 7+16bits
|
||||
lengthFields = 2
|
||||
case b == 127: // Payload length 7+64bits
|
||||
lengthFields = 8
|
||||
}
|
||||
for i := 0; i < lengthFields; i++ {
|
||||
b, err = buf.ReadByte()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits
|
||||
b &= 0x7f
|
||||
}
|
||||
header = append(header, b)
|
||||
hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b)
|
||||
}
|
||||
if mask {
|
||||
// Masking key. 4 bytes.
|
||||
for i := 0; i < 4; i++ {
|
||||
b, err = buf.ReadByte()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
header = append(header, b)
|
||||
hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b)
|
||||
}
|
||||
}
|
||||
hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length)
|
||||
hybiFrame.header.data = bytes.NewBuffer(header)
|
||||
hybiFrame.length = len(header) + int(hybiFrame.header.Length)
|
||||
return
|
||||
}
|
||||
|
||||
// A HybiFrameWriter is a writer for hybi frame.
|
||||
type hybiFrameWriter struct {
|
||||
writer *bufio.Writer
|
||||
|
||||
header *hybiFrameHeader
|
||||
}
|
||||
|
||||
func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) {
|
||||
var header []byte
|
||||
var b byte
|
||||
if frame.header.Fin {
|
||||
b |= 0x80
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
if frame.header.Rsv[i] {
|
||||
j := uint(6 - i)
|
||||
b |= 1 << j
|
||||
}
|
||||
}
|
||||
b |= frame.header.OpCode
|
||||
header = append(header, b)
|
||||
if frame.header.MaskingKey != nil {
|
||||
b = 0x80
|
||||
} else {
|
||||
b = 0
|
||||
}
|
||||
lengthFields := 0
|
||||
length := len(msg)
|
||||
switch {
|
||||
case length <= 125:
|
||||
b |= byte(length)
|
||||
case length < 65536:
|
||||
b |= 126
|
||||
lengthFields = 2
|
||||
default:
|
||||
b |= 127
|
||||
lengthFields = 8
|
||||
}
|
||||
header = append(header, b)
|
||||
for i := 0; i < lengthFields; i++ {
|
||||
j := uint((lengthFields - i - 1) * 8)
|
||||
b = byte((length >> j) & 0xff)
|
||||
header = append(header, b)
|
||||
}
|
||||
if frame.header.MaskingKey != nil {
|
||||
if len(frame.header.MaskingKey) != 4 {
|
||||
return 0, ErrBadMaskingKey
|
||||
}
|
||||
header = append(header, frame.header.MaskingKey...)
|
||||
frame.writer.Write(header)
|
||||
data := make([]byte, length)
|
||||
for i := range data {
|
||||
data[i] = msg[i] ^ frame.header.MaskingKey[i%4]
|
||||
}
|
||||
frame.writer.Write(data)
|
||||
err = frame.writer.Flush()
|
||||
return length, err
|
||||
}
|
||||
frame.writer.Write(header)
|
||||
frame.writer.Write(msg)
|
||||
err = frame.writer.Flush()
|
||||
return length, err
|
||||
}
|
||||
|
||||
func (frame *hybiFrameWriter) Close() error { return nil }
|
||||
|
||||
type hybiFrameWriterFactory struct {
|
||||
*bufio.Writer
|
||||
needMaskingKey bool
|
||||
}
|
||||
|
||||
func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) {
|
||||
frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType}
|
||||
if buf.needMaskingKey {
|
||||
frameHeader.MaskingKey, err = generateMaskingKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil
|
||||
}
|
||||
|
||||
type hybiFrameHandler struct {
|
||||
conn *Conn
|
||||
payloadType byte
|
||||
}
|
||||
|
||||
func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) {
|
||||
if handler.conn.IsServerConn() {
|
||||
// The client MUST mask all frames sent to the server.
|
||||
if frame.(*hybiFrameReader).header.MaskingKey == nil {
|
||||
handler.WriteClose(closeStatusProtocolError)
|
||||
return nil, io.EOF
|
||||
}
|
||||
} else {
|
||||
// The server MUST NOT mask all frames.
|
||||
if frame.(*hybiFrameReader).header.MaskingKey != nil {
|
||||
handler.WriteClose(closeStatusProtocolError)
|
||||
return nil, io.EOF
|
||||
}
|
||||
}
|
||||
if header := frame.HeaderReader(); header != nil {
|
||||
io.Copy(ioutil.Discard, header)
|
||||
}
|
||||
switch frame.PayloadType() {
|
||||
case ContinuationFrame:
|
||||
frame.(*hybiFrameReader).header.OpCode = handler.payloadType
|
||||
case TextFrame, BinaryFrame:
|
||||
handler.payloadType = frame.PayloadType()
|
||||
case CloseFrame:
|
||||
return nil, io.EOF
|
||||
case PingFrame, PongFrame:
|
||||
b := make([]byte, maxControlFramePayloadLength)
|
||||
n, err := io.ReadFull(frame, b)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, frame)
|
||||
if frame.PayloadType() == PingFrame {
|
||||
if _, err := handler.WritePong(b[:n]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
return frame, nil
|
||||
}
|
||||
|
||||
func (handler *hybiFrameHandler) WriteClose(status int) (err error) {
|
||||
handler.conn.wio.Lock()
|
||||
defer handler.conn.wio.Unlock()
|
||||
w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(msg, uint16(status))
|
||||
_, err = w.Write(msg)
|
||||
w.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) {
|
||||
handler.conn.wio.Lock()
|
||||
defer handler.conn.wio.Unlock()
|
||||
w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = w.Write(msg)
|
||||
w.Close()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// newHybiConn creates a new WebSocket connection speaking hybi draft protocol.
|
||||
func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
|
||||
if buf == nil {
|
||||
br := bufio.NewReader(rwc)
|
||||
bw := bufio.NewWriter(rwc)
|
||||
buf = bufio.NewReadWriter(br, bw)
|
||||
}
|
||||
ws := &Conn{config: config, request: request, buf: buf, rwc: rwc,
|
||||
frameReaderFactory: hybiFrameReaderFactory{buf.Reader},
|
||||
frameWriterFactory: hybiFrameWriterFactory{
|
||||
buf.Writer, request == nil},
|
||||
PayloadType: TextFrame,
|
||||
defaultCloseStatus: closeStatusNormal}
|
||||
ws.frameHandler = &hybiFrameHandler{conn: ws}
|
||||
return ws
|
||||
}
|
||||
|
||||
// generateMaskingKey generates a masking key for a frame.
|
||||
func generateMaskingKey() (maskingKey []byte, err error) {
|
||||
maskingKey = make([]byte, 4)
|
||||
if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// generateNonce generates a nonce consisting of a randomly selected 16-byte
|
||||
// value that has been base64-encoded.
|
||||
func generateNonce() (nonce []byte) {
|
||||
key := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
nonce = make([]byte, 24)
|
||||
base64.StdEncoding.Encode(nonce, key)
|
||||
return
|
||||
}
|
||||
|
||||
// removeZone removes IPv6 zone identifer from host.
|
||||
// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
|
||||
func removeZone(host string) string {
|
||||
if !strings.HasPrefix(host, "[") {
|
||||
return host
|
||||
}
|
||||
i := strings.LastIndex(host, "]")
|
||||
if i < 0 {
|
||||
return host
|
||||
}
|
||||
j := strings.LastIndex(host[:i], "%")
|
||||
if j < 0 {
|
||||
return host
|
||||
}
|
||||
return host[:j] + host[i:]
|
||||
}
|
||||
|
||||
// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of
|
||||
// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string.
|
||||
func getNonceAccept(nonce []byte) (expected []byte, err error) {
|
||||
h := sha1.New()
|
||||
if _, err = h.Write(nonce); err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = h.Write([]byte(websocketGUID)); err != nil {
|
||||
return
|
||||
}
|
||||
expected = make([]byte, 28)
|
||||
base64.StdEncoding.Encode(expected, h.Sum(nil))
|
||||
return
|
||||
}
|
||||
|
||||
// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17
|
||||
func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) {
|
||||
bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
|
||||
|
||||
// According to RFC 6874, an HTTP client, proxy, or other
|
||||
// intermediary must remove any IPv6 zone identifier attached
|
||||
// to an outgoing URI.
|
||||
bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n")
|
||||
bw.WriteString("Upgrade: websocket\r\n")
|
||||
bw.WriteString("Connection: Upgrade\r\n")
|
||||
nonce := generateNonce()
|
||||
if config.handshakeData != nil {
|
||||
nonce = []byte(config.handshakeData["key"])
|
||||
}
|
||||
bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n")
|
||||
bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n")
|
||||
|
||||
if config.Version != ProtocolVersionHybi13 {
|
||||
return ErrBadProtocolVersion
|
||||
}
|
||||
|
||||
bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n")
|
||||
if len(config.Protocol) > 0 {
|
||||
bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n")
|
||||
}
|
||||
// TODO(ukai): send Sec-WebSocket-Extensions.
|
||||
err = config.Header.WriteSubset(bw, handshakeHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bw.WriteString("\r\n")
|
||||
if err = bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 101 {
|
||||
return ErrBadStatus
|
||||
}
|
||||
if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" ||
|
||||
strings.ToLower(resp.Header.Get("Connection")) != "upgrade" {
|
||||
return ErrBadUpgrade
|
||||
}
|
||||
expectedAccept, err := getNonceAccept(nonce)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) {
|
||||
return ErrChallengeResponse
|
||||
}
|
||||
if resp.Header.Get("Sec-WebSocket-Extensions") != "" {
|
||||
return ErrUnsupportedExtensions
|
||||
}
|
||||
offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol")
|
||||
if offeredProtocol != "" {
|
||||
protocolMatched := false
|
||||
for i := 0; i < len(config.Protocol); i++ {
|
||||
if config.Protocol[i] == offeredProtocol {
|
||||
protocolMatched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !protocolMatched {
|
||||
return ErrBadWebSocketProtocol
|
||||
}
|
||||
config.Protocol = []string{offeredProtocol}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newHybiClientConn creates a client WebSocket connection after handshake.
|
||||
func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn {
|
||||
return newHybiConn(config, buf, rwc, nil)
|
||||
}
|
||||
|
||||
// A HybiServerHandshaker performs a server handshake using hybi draft protocol.
|
||||
type hybiServerHandshaker struct {
|
||||
*Config
|
||||
accept []byte
|
||||
}
|
||||
|
||||
func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) {
|
||||
c.Version = ProtocolVersionHybi13
|
||||
if req.Method != "GET" {
|
||||
return http.StatusMethodNotAllowed, ErrBadRequestMethod
|
||||
}
|
||||
// HTTP version can be safely ignored.
|
||||
|
||||
if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" ||
|
||||
!strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") {
|
||||
return http.StatusBadRequest, ErrNotWebSocket
|
||||
}
|
||||
|
||||
key := req.Header.Get("Sec-Websocket-Key")
|
||||
if key == "" {
|
||||
return http.StatusBadRequest, ErrChallengeResponse
|
||||
}
|
||||
version := req.Header.Get("Sec-Websocket-Version")
|
||||
switch version {
|
||||
case "13":
|
||||
c.Version = ProtocolVersionHybi13
|
||||
default:
|
||||
return http.StatusBadRequest, ErrBadWebSocketVersion
|
||||
}
|
||||
var scheme string
|
||||
if req.TLS != nil {
|
||||
scheme = "wss"
|
||||
} else {
|
||||
scheme = "ws"
|
||||
}
|
||||
c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI())
|
||||
if err != nil {
|
||||
return http.StatusBadRequest, err
|
||||
}
|
||||
protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol"))
|
||||
if protocol != "" {
|
||||
protocols := strings.Split(protocol, ",")
|
||||
for i := 0; i < len(protocols); i++ {
|
||||
c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i]))
|
||||
}
|
||||
}
|
||||
c.accept, err = getNonceAccept([]byte(key))
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
return http.StatusSwitchingProtocols, nil
|
||||
}
|
||||
|
||||
// Origin parses the Origin header in req.
|
||||
// If the Origin header is not set, it returns nil and nil.
|
||||
func Origin(config *Config, req *http.Request) (*url.URL, error) {
|
||||
var origin string
|
||||
switch config.Version {
|
||||
case ProtocolVersionHybi13:
|
||||
origin = req.Header.Get("Origin")
|
||||
}
|
||||
if origin == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return url.ParseRequestURI(origin)
|
||||
}
|
||||
|
||||
func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) {
|
||||
if len(c.Protocol) > 0 {
|
||||
if len(c.Protocol) != 1 {
|
||||
// You need choose a Protocol in Handshake func in Server.
|
||||
return ErrBadWebSocketProtocol
|
||||
}
|
||||
}
|
||||
buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n")
|
||||
buf.WriteString("Upgrade: websocket\r\n")
|
||||
buf.WriteString("Connection: Upgrade\r\n")
|
||||
buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n")
|
||||
if len(c.Protocol) > 0 {
|
||||
buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n")
|
||||
}
|
||||
// TODO(ukai): send Sec-WebSocket-Extensions.
|
||||
if c.Header != nil {
|
||||
err := c.Header.WriteSubset(buf, handshakeHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
buf.WriteString("\r\n")
|
||||
return buf.Flush()
|
||||
}
|
||||
|
||||
func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
|
||||
return newHybiServerConn(c.Config, buf, rwc, request)
|
||||
}
|
||||
|
||||
// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol.
|
||||
func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
|
||||
return newHybiConn(config, buf, rwc, request)
|
||||
}
|
||||
113
vendor/golang.org/x/net/websocket/server.go
generated
vendored
113
vendor/golang.org/x/net/websocket/server.go
generated
vendored
@@ -1,113 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) {
|
||||
var hs serverHandshaker = &hybiServerHandshaker{Config: config}
|
||||
code, err := hs.ReadHandshake(buf.Reader, req)
|
||||
if err == ErrBadWebSocketVersion {
|
||||
fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
|
||||
fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion)
|
||||
buf.WriteString("\r\n")
|
||||
buf.WriteString(err.Error())
|
||||
buf.Flush()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
|
||||
buf.WriteString("\r\n")
|
||||
buf.WriteString(err.Error())
|
||||
buf.Flush()
|
||||
return
|
||||
}
|
||||
if handshake != nil {
|
||||
err = handshake(config, req)
|
||||
if err != nil {
|
||||
code = http.StatusForbidden
|
||||
fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
|
||||
buf.WriteString("\r\n")
|
||||
buf.Flush()
|
||||
return
|
||||
}
|
||||
}
|
||||
err = hs.AcceptHandshake(buf.Writer)
|
||||
if err != nil {
|
||||
code = http.StatusBadRequest
|
||||
fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
|
||||
buf.WriteString("\r\n")
|
||||
buf.Flush()
|
||||
return
|
||||
}
|
||||
conn = hs.NewServerConn(buf, rwc, req)
|
||||
return
|
||||
}
|
||||
|
||||
// Server represents a server of a WebSocket.
|
||||
type Server struct {
|
||||
// Config is a WebSocket configuration for new WebSocket connection.
|
||||
Config
|
||||
|
||||
// Handshake is an optional function in WebSocket handshake.
|
||||
// For example, you can check, or don't check Origin header.
|
||||
// Another example, you can select config.Protocol.
|
||||
Handshake func(*Config, *http.Request) error
|
||||
|
||||
// Handler handles a WebSocket connection.
|
||||
Handler
|
||||
}
|
||||
|
||||
// ServeHTTP implements the http.Handler interface for a WebSocket
|
||||
func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
s.serveWebSocket(w, req)
|
||||
}
|
||||
|
||||
func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) {
|
||||
rwc, buf, err := w.(http.Hijacker).Hijack()
|
||||
if err != nil {
|
||||
panic("Hijack failed: " + err.Error())
|
||||
}
|
||||
// The server should abort the WebSocket connection if it finds
|
||||
// the client did not send a handshake that matches with protocol
|
||||
// specification.
|
||||
defer rwc.Close()
|
||||
conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if conn == nil {
|
||||
panic("unexpected nil conn")
|
||||
}
|
||||
s.Handler(conn)
|
||||
}
|
||||
|
||||
// Handler is a simple interface to a WebSocket browser client.
|
||||
// It checks if Origin header is valid URL by default.
|
||||
// You might want to verify websocket.Conn.Config().Origin in the func.
|
||||
// If you use Server instead of Handler, you could call websocket.Origin and
|
||||
// check the origin in your Handshake func. So, if you want to accept
|
||||
// non-browser clients, which do not send an Origin header, set a
|
||||
// Server.Handshake that does not check the origin.
|
||||
type Handler func(*Conn)
|
||||
|
||||
func checkOrigin(config *Config, req *http.Request) (err error) {
|
||||
config.Origin, err = Origin(config, req)
|
||||
if err == nil && config.Origin == nil {
|
||||
return fmt.Errorf("null origin")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ServeHTTP implements the http.Handler interface for a WebSocket
|
||||
func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
s := Server{Handler: h, Handshake: checkOrigin}
|
||||
s.serveWebSocket(w, req)
|
||||
}
|
||||
448
vendor/golang.org/x/net/websocket/websocket.go
generated
vendored
448
vendor/golang.org/x/net/websocket/websocket.go
generated
vendored
@@ -1,448 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements a client and server for the WebSocket protocol
|
||||
// as specified in RFC 6455.
|
||||
//
|
||||
// This package currently lacks some features found in an alternative
|
||||
// and more actively maintained WebSocket package:
|
||||
//
|
||||
// https://godoc.org/github.com/gorilla/websocket
|
||||
//
|
||||
package websocket // import "golang.org/x/net/websocket"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
ProtocolVersionHybi13 = 13
|
||||
ProtocolVersionHybi = ProtocolVersionHybi13
|
||||
SupportedProtocolVersion = "13"
|
||||
|
||||
ContinuationFrame = 0
|
||||
TextFrame = 1
|
||||
BinaryFrame = 2
|
||||
CloseFrame = 8
|
||||
PingFrame = 9
|
||||
PongFrame = 10
|
||||
UnknownFrame = 255
|
||||
|
||||
DefaultMaxPayloadBytes = 32 << 20 // 32MB
|
||||
)
|
||||
|
||||
// ProtocolError represents WebSocket protocol errors.
|
||||
type ProtocolError struct {
|
||||
ErrorString string
|
||||
}
|
||||
|
||||
func (err *ProtocolError) Error() string { return err.ErrorString }
|
||||
|
||||
var (
|
||||
ErrBadProtocolVersion = &ProtocolError{"bad protocol version"}
|
||||
ErrBadScheme = &ProtocolError{"bad scheme"}
|
||||
ErrBadStatus = &ProtocolError{"bad status"}
|
||||
ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"}
|
||||
ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"}
|
||||
ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"}
|
||||
ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"}
|
||||
ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"}
|
||||
ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"}
|
||||
ErrBadFrame = &ProtocolError{"bad frame"}
|
||||
ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"}
|
||||
ErrNotWebSocket = &ProtocolError{"not websocket protocol"}
|
||||
ErrBadRequestMethod = &ProtocolError{"bad method"}
|
||||
ErrNotSupported = &ProtocolError{"not supported"}
|
||||
)
|
||||
|
||||
// ErrFrameTooLarge is returned by Codec's Receive method if payload size
|
||||
// exceeds limit set by Conn.MaxPayloadBytes
|
||||
var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit")
|
||||
|
||||
// Addr is an implementation of net.Addr for WebSocket.
|
||||
type Addr struct {
|
||||
*url.URL
|
||||
}
|
||||
|
||||
// Network returns the network type for a WebSocket, "websocket".
|
||||
func (addr *Addr) Network() string { return "websocket" }
|
||||
|
||||
// Config is a WebSocket configuration
|
||||
type Config struct {
|
||||
// A WebSocket server address.
|
||||
Location *url.URL
|
||||
|
||||
// A Websocket client origin.
|
||||
Origin *url.URL
|
||||
|
||||
// WebSocket subprotocols.
|
||||
Protocol []string
|
||||
|
||||
// WebSocket protocol version.
|
||||
Version int
|
||||
|
||||
// TLS config for secure WebSocket (wss).
|
||||
TlsConfig *tls.Config
|
||||
|
||||
// Additional header fields to be sent in WebSocket opening handshake.
|
||||
Header http.Header
|
||||
|
||||
// Dialer used when opening websocket connections.
|
||||
Dialer *net.Dialer
|
||||
|
||||
handshakeData map[string]string
|
||||
}
|
||||
|
||||
// serverHandshaker is an interface to handle WebSocket server side handshake.
|
||||
type serverHandshaker interface {
|
||||
// ReadHandshake reads handshake request message from client.
|
||||
// Returns http response code and error if any.
|
||||
ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error)
|
||||
|
||||
// AcceptHandshake accepts the client handshake request and sends
|
||||
// handshake response back to client.
|
||||
AcceptHandshake(buf *bufio.Writer) (err error)
|
||||
|
||||
// NewServerConn creates a new WebSocket connection.
|
||||
NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)
|
||||
}
|
||||
|
||||
// frameReader is an interface to read a WebSocket frame.
|
||||
type frameReader interface {
|
||||
// Reader is to read payload of the frame.
|
||||
io.Reader
|
||||
|
||||
// PayloadType returns payload type.
|
||||
PayloadType() byte
|
||||
|
||||
// HeaderReader returns a reader to read header of the frame.
|
||||
HeaderReader() io.Reader
|
||||
|
||||
// TrailerReader returns a reader to read trailer of the frame.
|
||||
// If it returns nil, there is no trailer in the frame.
|
||||
TrailerReader() io.Reader
|
||||
|
||||
// Len returns total length of the frame, including header and trailer.
|
||||
Len() int
|
||||
}
|
||||
|
||||
// frameReaderFactory is an interface to creates new frame reader.
|
||||
type frameReaderFactory interface {
|
||||
NewFrameReader() (r frameReader, err error)
|
||||
}
|
||||
|
||||
// frameWriter is an interface to write a WebSocket frame.
|
||||
type frameWriter interface {
|
||||
// Writer is to write payload of the frame.
|
||||
io.WriteCloser
|
||||
}
|
||||
|
||||
// frameWriterFactory is an interface to create new frame writer.
|
||||
type frameWriterFactory interface {
|
||||
NewFrameWriter(payloadType byte) (w frameWriter, err error)
|
||||
}
|
||||
|
||||
type frameHandler interface {
|
||||
HandleFrame(frame frameReader) (r frameReader, err error)
|
||||
WriteClose(status int) (err error)
|
||||
}
|
||||
|
||||
// Conn represents a WebSocket connection.
|
||||
//
|
||||
// Multiple goroutines may invoke methods on a Conn simultaneously.
|
||||
type Conn struct {
|
||||
config *Config
|
||||
request *http.Request
|
||||
|
||||
buf *bufio.ReadWriter
|
||||
rwc io.ReadWriteCloser
|
||||
|
||||
rio sync.Mutex
|
||||
frameReaderFactory
|
||||
frameReader
|
||||
|
||||
wio sync.Mutex
|
||||
frameWriterFactory
|
||||
|
||||
frameHandler
|
||||
PayloadType byte
|
||||
defaultCloseStatus int
|
||||
|
||||
// MaxPayloadBytes limits the size of frame payload received over Conn
|
||||
// by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used.
|
||||
MaxPayloadBytes int
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface:
|
||||
// it reads data of a frame from the WebSocket connection.
|
||||
// if msg is not large enough for the frame data, it fills the msg and next Read
|
||||
// will read the rest of the frame data.
|
||||
// it reads Text frame or Binary frame.
|
||||
func (ws *Conn) Read(msg []byte) (n int, err error) {
|
||||
ws.rio.Lock()
|
||||
defer ws.rio.Unlock()
|
||||
again:
|
||||
if ws.frameReader == nil {
|
||||
frame, err := ws.frameReaderFactory.NewFrameReader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ws.frameReader, err = ws.frameHandler.HandleFrame(frame)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if ws.frameReader == nil {
|
||||
goto again
|
||||
}
|
||||
}
|
||||
n, err = ws.frameReader.Read(msg)
|
||||
if err == io.EOF {
|
||||
if trailer := ws.frameReader.TrailerReader(); trailer != nil {
|
||||
io.Copy(ioutil.Discard, trailer)
|
||||
}
|
||||
ws.frameReader = nil
|
||||
goto again
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Write implements the io.Writer interface:
|
||||
// it writes data as a frame to the WebSocket connection.
|
||||
func (ws *Conn) Write(msg []byte) (n int, err error) {
|
||||
ws.wio.Lock()
|
||||
defer ws.wio.Unlock()
|
||||
w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = w.Write(msg)
|
||||
w.Close()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface.
|
||||
func (ws *Conn) Close() error {
|
||||
err := ws.frameHandler.WriteClose(ws.defaultCloseStatus)
|
||||
err1 := ws.rwc.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err1
|
||||
}
|
||||
|
||||
func (ws *Conn) IsClientConn() bool { return ws.request == nil }
|
||||
func (ws *Conn) IsServerConn() bool { return ws.request != nil }
|
||||
|
||||
// LocalAddr returns the WebSocket Origin for the connection for client, or
|
||||
// the WebSocket location for server.
|
||||
func (ws *Conn) LocalAddr() net.Addr {
|
||||
if ws.IsClientConn() {
|
||||
return &Addr{ws.config.Origin}
|
||||
}
|
||||
return &Addr{ws.config.Location}
|
||||
}
|
||||
|
||||
// RemoteAddr returns the WebSocket location for the connection for client, or
|
||||
// the Websocket Origin for server.
|
||||
func (ws *Conn) RemoteAddr() net.Addr {
|
||||
if ws.IsClientConn() {
|
||||
return &Addr{ws.config.Location}
|
||||
}
|
||||
return &Addr{ws.config.Origin}
|
||||
}
|
||||
|
||||
var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn")
|
||||
|
||||
// SetDeadline sets the connection's network read & write deadlines.
|
||||
func (ws *Conn) SetDeadline(t time.Time) error {
|
||||
if conn, ok := ws.rwc.(net.Conn); ok {
|
||||
return conn.SetDeadline(t)
|
||||
}
|
||||
return errSetDeadline
|
||||
}
|
||||
|
||||
// SetReadDeadline sets the connection's network read deadline.
|
||||
func (ws *Conn) SetReadDeadline(t time.Time) error {
|
||||
if conn, ok := ws.rwc.(net.Conn); ok {
|
||||
return conn.SetReadDeadline(t)
|
||||
}
|
||||
return errSetDeadline
|
||||
}
|
||||
|
||||
// SetWriteDeadline sets the connection's network write deadline.
|
||||
func (ws *Conn) SetWriteDeadline(t time.Time) error {
|
||||
if conn, ok := ws.rwc.(net.Conn); ok {
|
||||
return conn.SetWriteDeadline(t)
|
||||
}
|
||||
return errSetDeadline
|
||||
}
|
||||
|
||||
// Config returns the WebSocket config.
|
||||
func (ws *Conn) Config() *Config { return ws.config }
|
||||
|
||||
// Request returns the http request upgraded to the WebSocket.
|
||||
// It is nil for client side.
|
||||
func (ws *Conn) Request() *http.Request { return ws.request }
|
||||
|
||||
// Codec represents a symmetric pair of functions that implement a codec.
|
||||
type Codec struct {
|
||||
Marshal func(v interface{}) (data []byte, payloadType byte, err error)
|
||||
Unmarshal func(data []byte, payloadType byte, v interface{}) (err error)
|
||||
}
|
||||
|
||||
// Send sends v marshaled by cd.Marshal as single frame to ws.
|
||||
func (cd Codec) Send(ws *Conn, v interface{}) (err error) {
|
||||
data, payloadType, err := cd.Marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ws.wio.Lock()
|
||||
defer ws.wio.Unlock()
|
||||
w, err := ws.frameWriterFactory.NewFrameWriter(payloadType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(data)
|
||||
w.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores
|
||||
// in v. The whole frame payload is read to an in-memory buffer; max size of
|
||||
// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds
|
||||
// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire
|
||||
// completely. The next call to Receive would read and discard leftover data of
|
||||
// previous oversized frame before processing next frame.
|
||||
func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
|
||||
ws.rio.Lock()
|
||||
defer ws.rio.Unlock()
|
||||
if ws.frameReader != nil {
|
||||
_, err = io.Copy(ioutil.Discard, ws.frameReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ws.frameReader = nil
|
||||
}
|
||||
again:
|
||||
frame, err := ws.frameReaderFactory.NewFrameReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
frame, err = ws.frameHandler.HandleFrame(frame)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if frame == nil {
|
||||
goto again
|
||||
}
|
||||
maxPayloadBytes := ws.MaxPayloadBytes
|
||||
if maxPayloadBytes == 0 {
|
||||
maxPayloadBytes = DefaultMaxPayloadBytes
|
||||
}
|
||||
if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) {
|
||||
// payload size exceeds limit, no need to call Unmarshal
|
||||
//
|
||||
// set frameReader to current oversized frame so that
|
||||
// the next call to this function can drain leftover
|
||||
// data before processing the next frame
|
||||
ws.frameReader = frame
|
||||
return ErrFrameTooLarge
|
||||
}
|
||||
payloadType := frame.PayloadType()
|
||||
data, err := ioutil.ReadAll(frame)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cd.Unmarshal(data, payloadType, v)
|
||||
}
|
||||
|
||||
func marshal(v interface{}) (msg []byte, payloadType byte, err error) {
|
||||
switch data := v.(type) {
|
||||
case string:
|
||||
return []byte(data), TextFrame, nil
|
||||
case []byte:
|
||||
return data, BinaryFrame, nil
|
||||
}
|
||||
return nil, UnknownFrame, ErrNotSupported
|
||||
}
|
||||
|
||||
func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
|
||||
switch data := v.(type) {
|
||||
case *string:
|
||||
*data = string(msg)
|
||||
return nil
|
||||
case *[]byte:
|
||||
*data = msg
|
||||
return nil
|
||||
}
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
/*
|
||||
Message is a codec to send/receive text/binary data in a frame on WebSocket connection.
|
||||
To send/receive text frame, use string type.
|
||||
To send/receive binary frame, use []byte type.
|
||||
|
||||
Trivial usage:
|
||||
|
||||
import "websocket"
|
||||
|
||||
// receive text frame
|
||||
var message string
|
||||
websocket.Message.Receive(ws, &message)
|
||||
|
||||
// send text frame
|
||||
message = "hello"
|
||||
websocket.Message.Send(ws, message)
|
||||
|
||||
// receive binary frame
|
||||
var data []byte
|
||||
websocket.Message.Receive(ws, &data)
|
||||
|
||||
// send binary frame
|
||||
data = []byte{0, 1, 2}
|
||||
websocket.Message.Send(ws, data)
|
||||
|
||||
*/
|
||||
var Message = Codec{marshal, unmarshal}
|
||||
|
||||
func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) {
|
||||
msg, err = json.Marshal(v)
|
||||
return msg, TextFrame, err
|
||||
}
|
||||
|
||||
func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
|
||||
return json.Unmarshal(msg, v)
|
||||
}
|
||||
|
||||
/*
|
||||
JSON is a codec to send/receive JSON data in a frame from a WebSocket connection.
|
||||
|
||||
Trivial usage:
|
||||
|
||||
import "websocket"
|
||||
|
||||
type T struct {
|
||||
Msg string
|
||||
Count int
|
||||
}
|
||||
|
||||
// receive JSON type T
|
||||
var data T
|
||||
websocket.JSON.Receive(ws, &data)
|
||||
|
||||
// send JSON type T
|
||||
websocket.JSON.Send(ws, data)
|
||||
*/
|
||||
var JSON = Codec{jsonMarshal, jsonUnmarshal}
|
||||
3
vendor/golang.org/x/sys/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/sys/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
3
vendor/golang.org/x/sys/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/sys/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
3
vendor/golang.org/x/text/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/text/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
3
vendor/golang.org/x/text/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/text/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
13
vendor/golang.org/x/text/doc.go
generated
vendored
13
vendor/golang.org/x/text/doc.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run gen.go
|
||||
|
||||
// text is a repository of text-related packages related to internationalization
|
||||
// (i18n) and localization (l10n), such as character encodings, text
|
||||
// transformations, and locale-specific text handling.
|
||||
package text
|
||||
|
||||
// TODO: more documentation on general concepts, such as Transformers, use
|
||||
// of normalization, etc.
|
||||
292
vendor/golang.org/x/text/gen.go
generated
vendored
292
vendor/golang.org/x/text/gen.go
generated
vendored
@@ -1,292 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// gen runs go generate on Unicode- and CLDR-related package in the text
|
||||
// repositories, taking into account dependencies and versions.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
)
|
||||
|
||||
var (
|
||||
verbose = flag.Bool("v", false, "verbose output")
|
||||
force = flag.Bool("force", false, "ignore failing dependencies")
|
||||
doCore = flag.Bool("core", false, "force an update to core")
|
||||
excludeList = flag.String("exclude", "",
|
||||
"comma-separated list of packages to exclude")
|
||||
|
||||
// The user can specify a selection of packages to build on the command line.
|
||||
args []string
|
||||
)
|
||||
|
||||
func exclude(pkg string) bool {
|
||||
if len(args) > 0 {
|
||||
return !contains(args, pkg)
|
||||
}
|
||||
return contains(strings.Split(*excludeList, ","), pkg)
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// - Better version handling.
|
||||
// - Generate tables for the core unicode package?
|
||||
// - Add generation for encodings. This requires some retooling here and there.
|
||||
// - Running repo-wide "long" tests.
|
||||
|
||||
var vprintf = fmt.Printf
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
args = flag.Args()
|
||||
if !*verbose {
|
||||
// Set vprintf to a no-op.
|
||||
vprintf = func(string, ...interface{}) (int, error) { return 0, nil }
|
||||
}
|
||||
|
||||
// TODO: create temporary cache directory to load files and create and set
|
||||
// a "cache" option if the user did not specify the UNICODE_DIR environment
|
||||
// variable. This will prevent duplicate downloads and also will enable long
|
||||
// tests, which really need to be run after each generated package.
|
||||
|
||||
updateCore := *doCore
|
||||
if gen.UnicodeVersion() != unicode.Version {
|
||||
fmt.Printf("Requested Unicode version %s; core unicode version is %s.\n",
|
||||
gen.UnicodeVersion(),
|
||||
unicode.Version)
|
||||
// TODO: use collate to compare. Simple comparison will work, though,
|
||||
// until Unicode reaches version 10. To avoid circular dependencies, we
|
||||
// could use the NumericWeighter without using package collate using a
|
||||
// trivial Weighter implementation.
|
||||
if gen.UnicodeVersion() < unicode.Version && !*force {
|
||||
os.Exit(2)
|
||||
}
|
||||
updateCore = true
|
||||
}
|
||||
|
||||
var unicode = &dependency{}
|
||||
if updateCore {
|
||||
fmt.Printf("Updating core to version %s...\n", gen.UnicodeVersion())
|
||||
unicode = generate("unicode")
|
||||
|
||||
// Test some users of the unicode packages, especially the ones that
|
||||
// keep a mirrored table. These may need to be corrected by hand.
|
||||
generate("regexp", unicode)
|
||||
generate("strconv", unicode) // mimics Unicode table
|
||||
generate("strings", unicode)
|
||||
generate("testing", unicode) // mimics Unicode table
|
||||
}
|
||||
|
||||
var (
|
||||
cldr = generate("./unicode/cldr", unicode)
|
||||
language = generate("./language", cldr)
|
||||
internal = generate("./internal", unicode, language)
|
||||
norm = generate("./unicode/norm", unicode)
|
||||
rangetable = generate("./unicode/rangetable", unicode)
|
||||
cases = generate("./cases", unicode, norm, language, rangetable)
|
||||
width = generate("./width", unicode)
|
||||
bidi = generate("./unicode/bidi", unicode, norm, rangetable)
|
||||
mib = generate("./encoding/internal/identifier", unicode)
|
||||
_ = generate("./encoding/htmlindex", unicode, language, mib)
|
||||
_ = generate("./encoding/ianaindex", unicode, language, mib)
|
||||
_ = generate("./secure/precis", unicode, norm, rangetable, cases, width, bidi)
|
||||
_ = generate("./currency", unicode, cldr, language, internal)
|
||||
_ = generate("./internal/number", unicode, cldr, language, internal)
|
||||
_ = generate("./feature/plural", unicode, cldr, language, internal)
|
||||
_ = generate("./internal/export/idna", unicode, bidi, norm)
|
||||
_ = generate("./language/display", unicode, cldr, language, internal)
|
||||
_ = generate("./collate", unicode, norm, cldr, language, rangetable)
|
||||
_ = generate("./search", unicode, norm, cldr, language, rangetable)
|
||||
)
|
||||
all.Wait()
|
||||
|
||||
// Copy exported packages to the destination golang.org repo.
|
||||
copyExported("golang.org/x/net/idna")
|
||||
|
||||
if updateCore {
|
||||
copyVendored()
|
||||
}
|
||||
|
||||
if hasErrors {
|
||||
fmt.Println("FAIL")
|
||||
os.Exit(1)
|
||||
}
|
||||
vprintf("SUCCESS\n")
|
||||
}
|
||||
|
||||
var (
|
||||
all sync.WaitGroup
|
||||
hasErrors bool
|
||||
)
|
||||
|
||||
type dependency struct {
|
||||
sync.WaitGroup
|
||||
hasErrors bool
|
||||
}
|
||||
|
||||
func generate(pkg string, deps ...*dependency) *dependency {
|
||||
var wg dependency
|
||||
if exclude(pkg) {
|
||||
return &wg
|
||||
}
|
||||
wg.Add(1)
|
||||
all.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer all.Done()
|
||||
// Wait for dependencies to finish.
|
||||
for _, d := range deps {
|
||||
d.Wait()
|
||||
if d.hasErrors && !*force {
|
||||
fmt.Printf("--- ABORT: %s\n", pkg)
|
||||
wg.hasErrors = true
|
||||
return
|
||||
}
|
||||
}
|
||||
vprintf("=== GENERATE %s\n", pkg)
|
||||
args := []string{"generate"}
|
||||
if *verbose {
|
||||
args = append(args, "-v")
|
||||
}
|
||||
args = append(args, pkg)
|
||||
cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
|
||||
w := &bytes.Buffer{}
|
||||
cmd.Stderr = w
|
||||
cmd.Stdout = w
|
||||
if err := cmd.Run(); err != nil {
|
||||
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(w), err)
|
||||
hasErrors = true
|
||||
wg.hasErrors = true
|
||||
return
|
||||
}
|
||||
|
||||
vprintf("=== TEST %s\n", pkg)
|
||||
args[0] = "test"
|
||||
cmd = exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
|
||||
wt := &bytes.Buffer{}
|
||||
cmd.Stderr = wt
|
||||
cmd.Stdout = wt
|
||||
if err := cmd.Run(); err != nil {
|
||||
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(wt), err)
|
||||
hasErrors = true
|
||||
wg.hasErrors = true
|
||||
return
|
||||
}
|
||||
vprintf("--- SUCCESS: %s\n\t%v\n", pkg, indent(w))
|
||||
fmt.Print(wt.String())
|
||||
}()
|
||||
return &wg
|
||||
}
|
||||
|
||||
// copyExported copies a package in x/text/internal/export to the
|
||||
// destination repository.
|
||||
func copyExported(p string) {
|
||||
copyPackage(
|
||||
filepath.Join("internal", "export", path.Base(p)),
|
||||
filepath.Join("..", filepath.FromSlash(p[len("golang.org/x"):])),
|
||||
"golang.org/x/text/internal/export/"+path.Base(p),
|
||||
p)
|
||||
}
|
||||
|
||||
// copyVendored copies packages used by Go core into the vendored directory.
|
||||
func copyVendored() {
|
||||
root := filepath.Join(build.Default.GOROOT, filepath.FromSlash("src/vendor/golang_org/x"))
|
||||
|
||||
err := filepath.Walk(root, func(dir string, info os.FileInfo, err error) error {
|
||||
if err != nil || !info.IsDir() || root == dir {
|
||||
return err
|
||||
}
|
||||
src := dir[len(root)+1:]
|
||||
const slash = string(filepath.Separator)
|
||||
if c := strings.Split(src, slash); c[0] == "text" {
|
||||
// Copy a text repo package from its normal location.
|
||||
src = strings.Join(c[1:], slash)
|
||||
} else {
|
||||
// Copy the vendored package if it exists in the export directory.
|
||||
src = filepath.Join("internal", "export", filepath.Base(src))
|
||||
}
|
||||
copyPackage(src, dir, "golang.org", "golang_org")
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Seeding directory %s has failed %v:", root, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// goGenRE is used to remove go:generate lines.
|
||||
var goGenRE = regexp.MustCompile("//go:generate[^\n]*\n")
|
||||
|
||||
// copyPackage copies relevant files from a directory in x/text to the
|
||||
// destination package directory. The destination package is assumed to have
|
||||
// the same name. For each copied file go:generate lines are removed and
|
||||
// and package comments are rewritten to the new path.
|
||||
func copyPackage(dirSrc, dirDst, search, replace string) {
|
||||
err := filepath.Walk(dirSrc, func(file string, info os.FileInfo, err error) error {
|
||||
base := filepath.Base(file)
|
||||
if err != nil || info.IsDir() ||
|
||||
!strings.HasSuffix(base, ".go") ||
|
||||
strings.HasSuffix(base, "_test.go") && !strings.HasPrefix(base, "example") ||
|
||||
// Don't process subdirectories.
|
||||
filepath.Dir(file) != dirSrc {
|
||||
return nil
|
||||
}
|
||||
b, err := ioutil.ReadFile(file)
|
||||
if err != nil || bytes.Contains(b, []byte("\n// +build ignore")) {
|
||||
return err
|
||||
}
|
||||
// Fix paths.
|
||||
b = bytes.Replace(b, []byte(search), []byte(replace), -1)
|
||||
// Remove go:generate lines.
|
||||
b = goGenRE.ReplaceAllLiteral(b, nil)
|
||||
comment := "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n"
|
||||
if *doCore {
|
||||
comment = "// Code generated by running \"go run gen.go -core\" in golang.org/x/text. DO NOT EDIT.\n\n"
|
||||
}
|
||||
if !bytes.HasPrefix(b, []byte(comment)) {
|
||||
b = append([]byte(comment), b...)
|
||||
}
|
||||
if b, err = format.Source(b); err != nil {
|
||||
fmt.Println("Failed to format file:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
file = filepath.Join(dirDst, base)
|
||||
vprintf("=== COPY %s\n", file)
|
||||
return ioutil.WriteFile(file, b, 0666)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("Copying exported files failed:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(a []string, s string) bool {
|
||||
for _, e := range a {
|
||||
if s == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func indent(b *bytes.Buffer) string {
|
||||
return strings.Replace(strings.TrimSpace(b.String()), "\n", "\n\t", -1)
|
||||
}
|
||||
351
vendor/golang.org/x/text/internal/gen/code.go
generated
vendored
Normal file
351
vendor/golang.org/x/text/internal/gen/code.go
generated
vendored
Normal file
@@ -0,0 +1,351 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// This file contains utilities for generating code.
|
||||
|
||||
// TODO: other write methods like:
|
||||
// - slices, maps, types, etc.
|
||||
|
||||
// CodeWriter is a utility for writing structured code. It computes the content
|
||||
// hash and size of written content. It ensures there are newlines between
|
||||
// written code blocks.
|
||||
type CodeWriter struct {
|
||||
buf bytes.Buffer
|
||||
Size int
|
||||
Hash hash.Hash32 // content hash
|
||||
gob *gob.Encoder
|
||||
// For comments we skip the usual one-line separator if they are followed by
|
||||
// a code block.
|
||||
skipSep bool
|
||||
}
|
||||
|
||||
func (w *CodeWriter) Write(p []byte) (n int, err error) {
|
||||
return w.buf.Write(p)
|
||||
}
|
||||
|
||||
// NewCodeWriter returns a new CodeWriter.
|
||||
func NewCodeWriter() *CodeWriter {
|
||||
h := fnv.New32()
|
||||
return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}
|
||||
}
|
||||
|
||||
// WriteGoFile appends the buffer with the total size of all created structures
|
||||
// and writes it as a Go file to the the given file with the given package name.
|
||||
func (w *CodeWriter) WriteGoFile(filename, pkg string) {
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err = w.WriteGo(f, pkg); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteGo appends the buffer with the total size of all created structures and
|
||||
// writes it as a Go file to the the given writer with the given package name.
|
||||
func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) {
|
||||
sz := w.Size
|
||||
w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32())
|
||||
defer w.buf.Reset()
|
||||
return WriteGo(out, pkg, w.buf.Bytes())
|
||||
}
|
||||
|
||||
func (w *CodeWriter) printf(f string, x ...interface{}) {
|
||||
fmt.Fprintf(w, f, x...)
|
||||
}
|
||||
|
||||
func (w *CodeWriter) insertSep() {
|
||||
if w.skipSep {
|
||||
w.skipSep = false
|
||||
return
|
||||
}
|
||||
// Use at least two newlines to ensure a blank space between the previous
|
||||
// block. WriteGoFile will remove extraneous newlines.
|
||||
w.printf("\n\n")
|
||||
}
|
||||
|
||||
// WriteComment writes a comment block. All line starts are prefixed with "//".
|
||||
// Initial empty lines are gobbled. The indentation for the first line is
|
||||
// stripped from consecutive lines.
|
||||
func (w *CodeWriter) WriteComment(comment string, args ...interface{}) {
|
||||
s := fmt.Sprintf(comment, args...)
|
||||
s = strings.Trim(s, "\n")
|
||||
|
||||
// Use at least two newlines to ensure a blank space between the previous
|
||||
// block. WriteGoFile will remove extraneous newlines.
|
||||
w.printf("\n\n// ")
|
||||
w.skipSep = true
|
||||
|
||||
// strip first indent level.
|
||||
sep := "\n"
|
||||
for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] {
|
||||
sep += s[:1]
|
||||
}
|
||||
|
||||
strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s)
|
||||
|
||||
w.printf("\n")
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeSizeInfo(size int) {
|
||||
w.printf("// Size: %d bytes\n", size)
|
||||
}
|
||||
|
||||
// WriteConst writes a constant of the given name and value.
|
||||
func (w *CodeWriter) WriteConst(name string, x interface{}) {
|
||||
w.insertSep()
|
||||
v := reflect.ValueOf(x)
|
||||
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
w.printf("const %s %s = ", name, typeName(x))
|
||||
w.WriteString(v.String())
|
||||
w.printf("\n")
|
||||
default:
|
||||
w.printf("const %s = %#v\n", name, x)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteVar writes a variable of the given name and value.
|
||||
func (w *CodeWriter) WriteVar(name string, x interface{}) {
|
||||
w.insertSep()
|
||||
v := reflect.ValueOf(x)
|
||||
oldSize := w.Size
|
||||
sz := int(v.Type().Size())
|
||||
w.Size += sz
|
||||
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
w.printf("var %s %s = ", name, typeName(x))
|
||||
w.WriteString(v.String())
|
||||
case reflect.Struct:
|
||||
w.gob.Encode(x)
|
||||
fallthrough
|
||||
case reflect.Slice, reflect.Array:
|
||||
w.printf("var %s = ", name)
|
||||
w.writeValue(v)
|
||||
w.writeSizeInfo(w.Size - oldSize)
|
||||
default:
|
||||
w.printf("var %s %s = ", name, typeName(x))
|
||||
w.gob.Encode(x)
|
||||
w.writeValue(v)
|
||||
w.writeSizeInfo(w.Size - oldSize)
|
||||
}
|
||||
w.printf("\n")
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeValue(v reflect.Value) {
|
||||
x := v.Interface()
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
w.WriteString(v.String())
|
||||
case reflect.Array:
|
||||
// Don't double count: callers of WriteArray count on the size being
|
||||
// added, so we need to discount it here.
|
||||
w.Size -= int(v.Type().Size())
|
||||
w.writeSlice(x, true)
|
||||
case reflect.Slice:
|
||||
w.writeSlice(x, false)
|
||||
case reflect.Struct:
|
||||
w.printf("%s{\n", typeName(v.Interface()))
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
w.printf("%s: ", t.Field(i).Name)
|
||||
w.writeValue(v.Field(i))
|
||||
w.printf(",\n")
|
||||
}
|
||||
w.printf("}")
|
||||
default:
|
||||
w.printf("%#v", x)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteString writes a string literal.
|
||||
func (w *CodeWriter) WriteString(s string) {
|
||||
s = strings.Replace(s, `\`, `\\`, -1)
|
||||
io.WriteString(w.Hash, s) // content hash
|
||||
w.Size += len(s)
|
||||
|
||||
const maxInline = 40
|
||||
if len(s) <= maxInline {
|
||||
w.printf("%q", s)
|
||||
return
|
||||
}
|
||||
|
||||
// We will render the string as a multi-line string.
|
||||
const maxWidth = 80 - 4 - len(`"`) - len(`" +`)
|
||||
|
||||
// When starting on its own line, go fmt indents line 2+ an extra level.
|
||||
n, max := maxWidth, maxWidth-4
|
||||
|
||||
// As per https://golang.org/issue/18078, the compiler has trouble
|
||||
// compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
|
||||
// for large N. We insert redundant, explicit parentheses to work around
|
||||
// that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
|
||||
// ... + s127) + etc + (etc + ... + sN).
|
||||
explicitParens, extraComment := len(s) > 128*1024, ""
|
||||
if explicitParens {
|
||||
w.printf(`(`)
|
||||
extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078"
|
||||
}
|
||||
|
||||
// Print "" +\n, if a string does not start on its own line.
|
||||
b := w.buf.Bytes()
|
||||
if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' {
|
||||
w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment)
|
||||
n, max = maxWidth, maxWidth
|
||||
}
|
||||
|
||||
w.printf(`"`)
|
||||
|
||||
for sz, p, nLines := 0, 0, 0; p < len(s); {
|
||||
var r rune
|
||||
r, sz = utf8.DecodeRuneInString(s[p:])
|
||||
out := s[p : p+sz]
|
||||
chars := 1
|
||||
if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' {
|
||||
switch sz {
|
||||
case 1:
|
||||
out = fmt.Sprintf("\\x%02x", s[p])
|
||||
case 2, 3:
|
||||
out = fmt.Sprintf("\\u%04x", r)
|
||||
case 4:
|
||||
out = fmt.Sprintf("\\U%08x", r)
|
||||
}
|
||||
chars = len(out)
|
||||
}
|
||||
if n -= chars; n < 0 {
|
||||
nLines++
|
||||
if explicitParens && nLines&63 == 63 {
|
||||
w.printf("\") + (\"")
|
||||
}
|
||||
w.printf("\" +\n\"")
|
||||
n = max - len(out)
|
||||
}
|
||||
w.printf("%s", out)
|
||||
p += sz
|
||||
}
|
||||
w.printf(`"`)
|
||||
if explicitParens {
|
||||
w.printf(`)`)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteSlice writes a slice value.
|
||||
func (w *CodeWriter) WriteSlice(x interface{}) {
|
||||
w.writeSlice(x, false)
|
||||
}
|
||||
|
||||
// WriteArray writes an array value.
|
||||
func (w *CodeWriter) WriteArray(x interface{}) {
|
||||
w.writeSlice(x, true)
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeSlice(x interface{}, isArray bool) {
|
||||
v := reflect.ValueOf(x)
|
||||
w.gob.Encode(v.Len())
|
||||
w.Size += v.Len() * int(v.Type().Elem().Size())
|
||||
name := typeName(x)
|
||||
if isArray {
|
||||
name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:])
|
||||
}
|
||||
if isArray {
|
||||
w.printf("%s{\n", name)
|
||||
} else {
|
||||
w.printf("%s{ // %d elements\n", name, v.Len())
|
||||
}
|
||||
|
||||
switch kind := v.Type().Elem().Kind(); kind {
|
||||
case reflect.String:
|
||||
for _, s := range x.([]string) {
|
||||
w.WriteString(s)
|
||||
w.printf(",\n")
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
// nLine and nBlock are the number of elements per line and block.
|
||||
nLine, nBlock, format := 8, 64, "%d,"
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
format = "%#02x,"
|
||||
case reflect.Uint16:
|
||||
format = "%#04x,"
|
||||
case reflect.Uint32:
|
||||
nLine, nBlock, format = 4, 32, "%#08x,"
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
nLine, nBlock, format = 4, 32, "%#016x,"
|
||||
case reflect.Int8:
|
||||
nLine = 16
|
||||
}
|
||||
n := nLine
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i%nBlock == 0 && v.Len() > nBlock {
|
||||
w.printf("// Entry %X - %X\n", i, i+nBlock-1)
|
||||
}
|
||||
x := v.Index(i).Interface()
|
||||
w.gob.Encode(x)
|
||||
w.printf(format, x)
|
||||
if n--; n == 0 {
|
||||
n = nLine
|
||||
w.printf("\n")
|
||||
}
|
||||
}
|
||||
w.printf("\n")
|
||||
case reflect.Struct:
|
||||
zero := reflect.Zero(v.Type().Elem()).Interface()
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
x := v.Index(i).Interface()
|
||||
w.gob.EncodeValue(v)
|
||||
if !reflect.DeepEqual(zero, x) {
|
||||
line := fmt.Sprintf("%#v,\n", x)
|
||||
line = line[strings.IndexByte(line, '{'):]
|
||||
w.printf("%d: ", i)
|
||||
w.printf(line)
|
||||
}
|
||||
}
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
w.printf("%d: %#v,\n", i, v.Index(i).Interface())
|
||||
}
|
||||
default:
|
||||
panic("gen: slice elem type not supported")
|
||||
}
|
||||
w.printf("}")
|
||||
}
|
||||
|
||||
// WriteType writes a definition of the type of the given value and returns the
|
||||
// type name.
|
||||
func (w *CodeWriter) WriteType(x interface{}) string {
|
||||
t := reflect.TypeOf(x)
|
||||
w.printf("type %s struct {\n", t.Name())
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type)
|
||||
}
|
||||
w.printf("}\n")
|
||||
return t.Name()
|
||||
}
|
||||
|
||||
// typeName returns the name of the go type of x.
|
||||
func typeName(x interface{}) string {
|
||||
t := reflect.ValueOf(x).Type()
|
||||
return strings.Replace(fmt.Sprint(t), "main.", "", 1)
|
||||
}
|
||||
281
vendor/golang.org/x/text/internal/gen/gen.go
generated
vendored
Normal file
281
vendor/golang.org/x/text/internal/gen/gen.go
generated
vendored
Normal file
@@ -0,0 +1,281 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gen contains common code for the various code generation tools in the
|
||||
// text repository. Its usage ensures consistency between tools.
|
||||
//
|
||||
// This package defines command line flags that are common to most generation
|
||||
// tools. The flags allow for specifying specific Unicode and CLDR versions
|
||||
// in the public Unicode data repository (http://www.unicode.org/Public).
|
||||
//
|
||||
// A local Unicode data mirror can be set through the flag -local or the
|
||||
// environment variable UNICODE_DIR. The former takes precedence. The local
|
||||
// directory should follow the same structure as the public repository.
|
||||
//
|
||||
// IANA data can also optionally be mirrored by putting it in the iana directory
|
||||
// rooted at the top of the local mirror. Beware, though, that IANA data is not
|
||||
// versioned. So it is up to the developer to use the right version.
|
||||
package gen // import "golang.org/x/text/internal/gen"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/format"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/unicode/cldr"
|
||||
)
|
||||
|
||||
var (
|
||||
url = flag.String("url",
|
||||
"http://www.unicode.org/Public",
|
||||
"URL of Unicode database directory")
|
||||
iana = flag.String("iana",
|
||||
"http://www.iana.org",
|
||||
"URL of the IANA repository")
|
||||
unicodeVersion = flag.String("unicode",
|
||||
getEnv("UNICODE_VERSION", unicode.Version),
|
||||
"unicode version to use")
|
||||
cldrVersion = flag.String("cldr",
|
||||
getEnv("CLDR_VERSION", cldr.Version),
|
||||
"cldr version to use")
|
||||
)
|
||||
|
||||
func getEnv(name, def string) string {
|
||||
if v := os.Getenv(name); v != "" {
|
||||
return v
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// Init performs common initialization for a gen command. It parses the flags
|
||||
// and sets up the standard logging parameters.
|
||||
func Init() {
|
||||
log.SetPrefix("")
|
||||
log.SetFlags(log.Lshortfile)
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
package %s
|
||||
|
||||
`
|
||||
|
||||
// UnicodeVersion reports the requested Unicode version.
|
||||
func UnicodeVersion() string {
|
||||
return *unicodeVersion
|
||||
}
|
||||
|
||||
// UnicodeVersion reports the requested CLDR version.
|
||||
func CLDRVersion() string {
|
||||
return *cldrVersion
|
||||
}
|
||||
|
||||
// IsLocal reports whether data files are available locally.
|
||||
func IsLocal() bool {
|
||||
dir, err := localReadmeFile()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, err = os.Stat(dir); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// OpenUCDFile opens the requested UCD file. The file is specified relative to
|
||||
// the public Unicode root directory. It will call log.Fatal if there are any
|
||||
// errors.
|
||||
func OpenUCDFile(file string) io.ReadCloser {
|
||||
return openUnicode(path.Join(*unicodeVersion, "ucd", file))
|
||||
}
|
||||
|
||||
// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there
|
||||
// are any errors.
|
||||
func OpenCLDRCoreZip() io.ReadCloser {
|
||||
return OpenUnicodeFile("cldr", *cldrVersion, "core.zip")
|
||||
}
|
||||
|
||||
// OpenUnicodeFile opens the requested file of the requested category from the
|
||||
// root of the Unicode data archive. The file is specified relative to the
|
||||
// public Unicode root directory. If version is "", it will use the default
|
||||
// Unicode version. It will call log.Fatal if there are any errors.
|
||||
func OpenUnicodeFile(category, version, file string) io.ReadCloser {
|
||||
if version == "" {
|
||||
version = UnicodeVersion()
|
||||
}
|
||||
return openUnicode(path.Join(category, version, file))
|
||||
}
|
||||
|
||||
// OpenIANAFile opens the requested IANA file. The file is specified relative
|
||||
// to the IANA root, which is typically either http://www.iana.org or the
|
||||
// iana directory in the local mirror. It will call log.Fatal if there are any
|
||||
// errors.
|
||||
func OpenIANAFile(path string) io.ReadCloser {
|
||||
return Open(*iana, "iana", path)
|
||||
}
|
||||
|
||||
var (
|
||||
dirMutex sync.Mutex
|
||||
localDir string
|
||||
)
|
||||
|
||||
const permissions = 0755
|
||||
|
||||
func localReadmeFile() (string, error) {
|
||||
p, err := build.Import("golang.org/x/text", "", build.FindOnly)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Could not locate package: %v", err)
|
||||
}
|
||||
return filepath.Join(p.Dir, "DATA", "README"), nil
|
||||
}
|
||||
|
||||
func getLocalDir() string {
|
||||
dirMutex.Lock()
|
||||
defer dirMutex.Unlock()
|
||||
|
||||
readme, err := localReadmeFile()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
dir := filepath.Dir(readme)
|
||||
if _, err := os.Stat(readme); err != nil {
|
||||
if err := os.MkdirAll(dir, permissions); err != nil {
|
||||
log.Fatalf("Could not create directory: %v", err)
|
||||
}
|
||||
ioutil.WriteFile(readme, []byte(readmeTxt), permissions)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT.
|
||||
|
||||
This directory contains downloaded files used to generate the various tables
|
||||
in the golang.org/x/text subrepo.
|
||||
|
||||
Note that the language subtag repo (iana/assignments/language-subtag-registry)
|
||||
and all other times in the iana subdirectory are not versioned and will need
|
||||
to be periodically manually updated. The easiest way to do this is to remove
|
||||
the entire iana directory. This is mostly of concern when updating the language
|
||||
package.
|
||||
`
|
||||
|
||||
// Open opens subdir/path if a local directory is specified and the file exists,
|
||||
// where subdir is a directory relative to the local root, or fetches it from
|
||||
// urlRoot/path otherwise. It will call log.Fatal if there are any errors.
|
||||
func Open(urlRoot, subdir, path string) io.ReadCloser {
|
||||
file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path))
|
||||
return open(file, urlRoot, path)
|
||||
}
|
||||
|
||||
func openUnicode(path string) io.ReadCloser {
|
||||
file := filepath.Join(getLocalDir(), filepath.FromSlash(path))
|
||||
return open(file, *url, path)
|
||||
}
|
||||
|
||||
// TODO: automatically periodically update non-versioned files.
|
||||
|
||||
func open(file, urlRoot, path string) io.ReadCloser {
|
||||
if f, err := os.Open(file); err == nil {
|
||||
return f
|
||||
}
|
||||
r := get(urlRoot, path)
|
||||
defer r.Close()
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not download file: %v", err)
|
||||
}
|
||||
os.MkdirAll(filepath.Dir(file), permissions)
|
||||
if err := ioutil.WriteFile(file, b, permissions); err != nil {
|
||||
log.Fatalf("Could not create file: %v", err)
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
|
||||
func get(root, path string) io.ReadCloser {
|
||||
url := root + "/" + path
|
||||
fmt.Printf("Fetching %s...", url)
|
||||
defer fmt.Println(" done.")
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Fatalf("HTTP GET: %v", err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
log.Fatalf("Bad GET status for %q: %q", url, resp.Status)
|
||||
}
|
||||
return resp.Body
|
||||
}
|
||||
|
||||
// TODO: use Write*Version in all applicable packages.
|
||||
|
||||
// WriteUnicodeVersion writes a constant for the Unicode version from which the
|
||||
// tables are generated.
|
||||
func WriteUnicodeVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n")
|
||||
fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion())
|
||||
}
|
||||
|
||||
// WriteCLDRVersion writes a constant for the CLDR version from which the
|
||||
// tables are generated.
|
||||
func WriteCLDRVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n")
|
||||
fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion())
|
||||
}
|
||||
|
||||
// WriteGoFile prepends a standard file comment and package statement to the
|
||||
// given bytes, applies gofmt, and writes them to a file with the given name.
|
||||
// It will call log.Fatal if there are any errors.
|
||||
func WriteGoFile(filename, pkg string, b []byte) {
|
||||
w, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer w.Close()
|
||||
if _, err = WriteGo(w, pkg, b); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteGo prepends a standard file comment and package statement to the given
|
||||
// bytes, applies gofmt, and writes them to w.
|
||||
func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) {
|
||||
src := []byte(fmt.Sprintf(header, pkg))
|
||||
src = append(src, b...)
|
||||
formatted, err := format.Source(src)
|
||||
if err != nil {
|
||||
// Print the generated code even in case of an error so that the
|
||||
// returned error can be meaningfully interpreted.
|
||||
n, _ = w.Write(src)
|
||||
return n, err
|
||||
}
|
||||
return w.Write(formatted)
|
||||
}
|
||||
|
||||
// Repackage rewrites a Go file from belonging to package main to belonging to
|
||||
// the given package.
|
||||
func Repackage(inFile, outFile, pkg string) {
|
||||
src, err := ioutil.ReadFile(inFile)
|
||||
if err != nil {
|
||||
log.Fatalf("reading %s: %v", inFile, err)
|
||||
}
|
||||
const toDelete = "package main\n\n"
|
||||
i := bytes.Index(src, []byte(toDelete))
|
||||
if i < 0 {
|
||||
log.Fatalf("Could not find %q in %s.", toDelete, inFile)
|
||||
}
|
||||
w := &bytes.Buffer{}
|
||||
w.Write(src[i+len(toDelete):])
|
||||
WriteGoFile(outFile, pkg, w.Bytes())
|
||||
}
|
||||
58
vendor/golang.org/x/text/internal/triegen/compact.go
generated
vendored
Normal file
58
vendor/golang.org/x/text/internal/triegen/compact.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package triegen
|
||||
|
||||
// This file defines Compacter and its implementations.
|
||||
|
||||
import "io"
|
||||
|
||||
// A Compacter generates an alternative, more space-efficient way to store a
|
||||
// trie value block. A trie value block holds all possible values for the last
|
||||
// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block
|
||||
// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0).
|
||||
type Compacter interface {
|
||||
// Size returns whether the Compacter could encode the given block as well
|
||||
// as its size in case it can. len(v) is always 64.
|
||||
Size(v []uint64) (sz int, ok bool)
|
||||
|
||||
// Store stores the block using the Compacter's compression method.
|
||||
// It returns a handle with which the block can be retrieved.
|
||||
// len(v) is always 64.
|
||||
Store(v []uint64) uint32
|
||||
|
||||
// Print writes the data structures associated to the given store to w.
|
||||
Print(w io.Writer) error
|
||||
|
||||
// Handler returns the name of a function that gets called during trie
|
||||
// lookup for blocks generated by the Compacter. The function should be of
|
||||
// the form func (n uint32, b byte) uint64, where n is the index returned by
|
||||
// the Compacter's Store method and b is the last byte of the UTF-8
|
||||
// encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the
|
||||
// block.
|
||||
Handler() string
|
||||
}
|
||||
|
||||
// simpleCompacter is the default Compacter used by builder. It implements a
|
||||
// normal trie block.
|
||||
type simpleCompacter builder
|
||||
|
||||
func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) {
|
||||
return blockSize * b.ValueSize, true
|
||||
}
|
||||
|
||||
func (b *simpleCompacter) Store(v []uint64) uint32 {
|
||||
h := uint32(len(b.ValueBlocks) - blockOffset)
|
||||
b.ValueBlocks = append(b.ValueBlocks, v)
|
||||
return h
|
||||
}
|
||||
|
||||
func (b *simpleCompacter) Print(io.Writer) error {
|
||||
// Structures are printed in print.go.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *simpleCompacter) Handler() string {
|
||||
panic("Handler should be special-cased for this Compacter")
|
||||
}
|
||||
251
vendor/golang.org/x/text/internal/triegen/print.go
generated
vendored
Normal file
251
vendor/golang.org/x/text/internal/triegen/print.go
generated
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package triegen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// print writes all the data structures as well as the code necessary to use the
|
||||
// trie to w.
|
||||
func (b *builder) print(w io.Writer) error {
|
||||
b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize
|
||||
b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize
|
||||
b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize
|
||||
b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize
|
||||
b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize
|
||||
|
||||
// If we only have one root trie, all starter blocks are at position 0 and
|
||||
// we can access the arrays directly.
|
||||
if len(b.Trie) == 1 {
|
||||
// At this point we cannot refer to the generated tables directly.
|
||||
b.ASCIIBlock = b.Name + "Values"
|
||||
b.StarterBlock = b.Name + "Index"
|
||||
} else {
|
||||
// Otherwise we need to have explicit starter indexes in the trie
|
||||
// structure.
|
||||
b.ASCIIBlock = "t.ascii"
|
||||
b.StarterBlock = "t.utf8Start"
|
||||
}
|
||||
|
||||
b.SourceType = "[]byte"
|
||||
if err := lookupGen.Execute(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.SourceType = "string"
|
||||
if err := lookupGen.Execute(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := trieGen.Execute(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, c := range b.Compactions {
|
||||
if err := c.c.Print(w); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printValues(n int, values []uint64) string {
|
||||
w := &bytes.Buffer{}
|
||||
boff := n * blockSize
|
||||
fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff)
|
||||
var newline bool
|
||||
for i, v := range values {
|
||||
if i%6 == 0 {
|
||||
newline = true
|
||||
}
|
||||
if v != 0 {
|
||||
if newline {
|
||||
fmt.Fprintf(w, "\n")
|
||||
newline = false
|
||||
}
|
||||
fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v)
|
||||
}
|
||||
}
|
||||
return w.String()
|
||||
}
|
||||
|
||||
func printIndex(b *builder, nr int, n *node) string {
|
||||
w := &bytes.Buffer{}
|
||||
boff := nr * blockSize
|
||||
fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff)
|
||||
var newline bool
|
||||
for i, c := range n.children {
|
||||
if i%8 == 0 {
|
||||
newline = true
|
||||
}
|
||||
if c != nil {
|
||||
v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index)
|
||||
if v != 0 {
|
||||
if newline {
|
||||
fmt.Fprintf(w, "\n")
|
||||
newline = false
|
||||
}
|
||||
fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
return w.String()
|
||||
}
|
||||
|
||||
var (
|
||||
trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{
|
||||
"printValues": printValues,
|
||||
"printIndex": printIndex,
|
||||
"title": strings.Title,
|
||||
"dec": func(x int) int { return x - 1 },
|
||||
"psize": func(n int) string {
|
||||
return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024)
|
||||
},
|
||||
}).Parse(trieTemplate))
|
||||
lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate))
|
||||
)
|
||||
|
||||
// TODO: consider the return type of lookup. It could be uint64, even if the
|
||||
// internal value type is smaller. We will have to verify this with the
|
||||
// performance of unicode/norm, which is very sensitive to such changes.
|
||||
const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}}
|
||||
// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}.
|
||||
type {{.Name}}Trie struct { {{if $multi}}
|
||||
ascii []{{.ValueType}} // index for ASCII bytes
|
||||
utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0
|
||||
{{end}}}
|
||||
|
||||
func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}}
|
||||
h := {{.Name}}TrieHandles[i]
|
||||
return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] }
|
||||
}
|
||||
|
||||
type {{.Name}}TrieHandle struct {
|
||||
ascii, multi {{.IndexType}}
|
||||
}
|
||||
|
||||
// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes
|
||||
var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{
|
||||
{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}}
|
||||
{{end}}}{{else}}
|
||||
return &{{.Name}}Trie{}
|
||||
}
|
||||
{{end}}
|
||||
// lookupValue determines the type of block n and looks up the value for b.
|
||||
func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} {
|
||||
switch { {{range $i, $c := .Compactions}}
|
||||
{{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}}
|
||||
n -= {{$c.Offset}}{{end}}
|
||||
return {{print $b.ValueType}}({{$c.Handler}}){{end}}
|
||||
}
|
||||
}
|
||||
|
||||
// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes
|
||||
// The third block is the zero block.
|
||||
var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} {
|
||||
{{range $i, $v := .ValueBlocks}}{{printValues $i $v}}
|
||||
{{end}}}
|
||||
|
||||
// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes
|
||||
// Block 0 is the zero block.
|
||||
var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} {
|
||||
{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}}
|
||||
{{end}}}
|
||||
`
|
||||
|
||||
// TODO: consider allowing zero-length strings after evaluating performance with
|
||||
// unicode/norm.
|
||||
const lookupTemplate = `
|
||||
// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and
|
||||
// the width in bytes of this encoding. The size will be 0 if s does not
|
||||
// hold enough bytes to complete the encoding. len(s) must be greater than 0.
|
||||
func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) {
|
||||
c0 := s[0]
|
||||
switch {
|
||||
case c0 < 0x80: // is ASCII
|
||||
return {{.ASCIIBlock}}[c0], 1
|
||||
case c0 < 0xC2:
|
||||
return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
|
||||
case c0 < 0xE0: // 2-byte UTF-8
|
||||
if len(s) < 2 {
|
||||
return 0, 0
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return 0, 1 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
return t.lookupValue(uint32(i), c1), 2
|
||||
case c0 < 0xF0: // 3-byte UTF-8
|
||||
if len(s) < 3 {
|
||||
return 0, 0
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return 0, 1 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
o := uint32(i)<<6 + uint32(c1)
|
||||
i = {{.Name}}Index[o]
|
||||
c2 := s[2]
|
||||
if c2 < 0x80 || 0xC0 <= c2 {
|
||||
return 0, 2 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
return t.lookupValue(uint32(i), c2), 3
|
||||
case c0 < 0xF8: // 4-byte UTF-8
|
||||
if len(s) < 4 {
|
||||
return 0, 0
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return 0, 1 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
o := uint32(i)<<6 + uint32(c1)
|
||||
i = {{.Name}}Index[o]
|
||||
c2 := s[2]
|
||||
if c2 < 0x80 || 0xC0 <= c2 {
|
||||
return 0, 2 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
o = uint32(i)<<6 + uint32(c2)
|
||||
i = {{.Name}}Index[o]
|
||||
c3 := s[3]
|
||||
if c3 < 0x80 || 0xC0 <= c3 {
|
||||
return 0, 3 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
return t.lookupValue(uint32(i), c3), 4
|
||||
}
|
||||
// Illegal rune
|
||||
return 0, 1
|
||||
}
|
||||
|
||||
// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s.
|
||||
// s must start with a full and valid UTF-8 encoded rune.
|
||||
func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} {
|
||||
c0 := s[0]
|
||||
if c0 < 0x80 { // is ASCII
|
||||
return {{.ASCIIBlock}}[c0]
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
if c0 < 0xE0 { // 2-byte UTF-8
|
||||
return t.lookupValue(uint32(i), s[1])
|
||||
}
|
||||
i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])]
|
||||
if c0 < 0xF0 { // 3-byte UTF-8
|
||||
return t.lookupValue(uint32(i), s[2])
|
||||
}
|
||||
i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])]
|
||||
if c0 < 0xF8 { // 4-byte UTF-8
|
||||
return t.lookupValue(uint32(i), s[3])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
`
|
||||
494
vendor/golang.org/x/text/internal/triegen/triegen.go
generated
vendored
Normal file
494
vendor/golang.org/x/text/internal/triegen/triegen.go
generated
vendored
Normal file
@@ -0,0 +1,494 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package triegen implements a code generator for a trie for associating
|
||||
// unsigned integer values with UTF-8 encoded runes.
|
||||
//
|
||||
// Many of the go.text packages use tries for storing per-rune information. A
|
||||
// trie is especially useful if many of the runes have the same value. If this
|
||||
// is the case, many blocks can be expected to be shared allowing for
|
||||
// information on many runes to be stored in little space.
|
||||
//
|
||||
// As most of the lookups are done directly on []byte slices, the tries use the
|
||||
// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to
|
||||
// runes and contributes a little bit to better performance. It also naturally
|
||||
// provides a fast path for ASCII.
|
||||
//
|
||||
// Space is also an issue. There are many code points defined in Unicode and as
|
||||
// a result tables can get quite large. So every byte counts. The triegen
|
||||
// package automatically chooses the smallest integer values to represent the
|
||||
// tables. Compacters allow further compression of the trie by allowing for
|
||||
// alternative representations of individual trie blocks.
|
||||
//
|
||||
// triegen allows generating multiple tries as a single structure. This is
|
||||
// useful when, for example, one wants to generate tries for several languages
|
||||
// that have a lot of values in common. Some existing libraries for
|
||||
// internationalization store all per-language data as a dynamically loadable
|
||||
// chunk. The go.text packages are designed with the assumption that the user
|
||||
// typically wants to compile in support for all supported languages, in line
|
||||
// with the approach common to Go to create a single standalone binary. The
|
||||
// multi-root trie approach can give significant storage savings in this
|
||||
// scenario.
|
||||
//
|
||||
// triegen generates both tables and code. The code is optimized to use the
|
||||
// automatically chosen data types. The following code is generated for a Trie
|
||||
// or multiple Tries named "foo":
|
||||
// - type fooTrie
|
||||
// The trie type.
|
||||
//
|
||||
// - func newFooTrie(x int) *fooTrie
|
||||
// Trie constructor, where x is the index of the trie passed to Gen.
|
||||
//
|
||||
// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int)
|
||||
// The lookup method, where uintX is automatically chosen.
|
||||
//
|
||||
// - func lookupString, lookupUnsafe and lookupStringUnsafe
|
||||
// Variants of the above.
|
||||
//
|
||||
// - var fooValues and fooIndex and any tables generated by Compacters.
|
||||
// The core trie data.
|
||||
//
|
||||
// - var fooTrieHandles
|
||||
// Indexes of starter blocks in case of multiple trie roots.
|
||||
//
|
||||
// It is recommended that users test the generated trie by checking the returned
|
||||
// value for every rune. Such exhaustive tests are possible as the the number of
|
||||
// runes in Unicode is limited.
|
||||
package triegen // import "golang.org/x/text/internal/triegen"
|
||||
|
||||
// TODO: Arguably, the internally optimized data types would not have to be
|
||||
// exposed in the generated API. We could also investigate not generating the
|
||||
// code, but using it through a package. We would have to investigate the impact
|
||||
// on performance of making such change, though. For packages like unicode/norm,
|
||||
// small changes like this could tank performance.
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"log"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// builder builds a set of tries for associating values with runes. The set of
|
||||
// tries can share common index and value blocks.
|
||||
type builder struct {
|
||||
Name string
|
||||
|
||||
// ValueType is the type of the trie values looked up.
|
||||
ValueType string
|
||||
|
||||
// ValueSize is the byte size of the ValueType.
|
||||
ValueSize int
|
||||
|
||||
// IndexType is the type of trie index values used for all UTF-8 bytes of
|
||||
// a rune except the last one.
|
||||
IndexType string
|
||||
|
||||
// IndexSize is the byte size of the IndexType.
|
||||
IndexSize int
|
||||
|
||||
// SourceType is used when generating the lookup functions. If the user
|
||||
// requests StringSupport, all lookup functions will be generated for
|
||||
// string input as well.
|
||||
SourceType string
|
||||
|
||||
Trie []*Trie
|
||||
|
||||
IndexBlocks []*node
|
||||
ValueBlocks [][]uint64
|
||||
Compactions []compaction
|
||||
Checksum uint64
|
||||
|
||||
ASCIIBlock string
|
||||
StarterBlock string
|
||||
|
||||
indexBlockIdx map[uint64]int
|
||||
valueBlockIdx map[uint64]nodeIndex
|
||||
asciiBlockIdx map[uint64]int
|
||||
|
||||
// Stats are used to fill out the template.
|
||||
Stats struct {
|
||||
NValueEntries int
|
||||
NValueBytes int
|
||||
NIndexEntries int
|
||||
NIndexBytes int
|
||||
NHandleBytes int
|
||||
}
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
// A nodeIndex encodes the index of a node, which is defined by the compaction
|
||||
// which stores it and an index within the compaction. For internal nodes, the
|
||||
// compaction is always 0.
|
||||
type nodeIndex struct {
|
||||
compaction int
|
||||
index int
|
||||
}
|
||||
|
||||
// compaction keeps track of stats used for the compaction.
|
||||
type compaction struct {
|
||||
c Compacter
|
||||
blocks []*node
|
||||
maxHandle uint32
|
||||
totalSize int
|
||||
|
||||
// Used by template-based generator and thus exported.
|
||||
Cutoff uint32
|
||||
Offset uint32
|
||||
Handler string
|
||||
}
|
||||
|
||||
func (b *builder) setError(err error) {
|
||||
if b.err == nil {
|
||||
b.err = err
|
||||
}
|
||||
}
|
||||
|
||||
// An Option can be passed to Gen.
|
||||
type Option func(b *builder) error
|
||||
|
||||
// Compact configures the trie generator to use the given Compacter.
|
||||
func Compact(c Compacter) Option {
|
||||
return func(b *builder) error {
|
||||
b.Compactions = append(b.Compactions, compaction{
|
||||
c: c,
|
||||
Handler: c.Handler() + "(n, b)"})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Gen writes Go code for a shared trie lookup structure to w for the given
|
||||
// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will
|
||||
// return the *nameTrie for tries[x]. A value can be looked up by using one of
|
||||
// the various lookup methods defined on nameTrie. It returns the table size of
|
||||
// the generated trie.
|
||||
func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) {
|
||||
// The index contains two dummy blocks, followed by the zero block. The zero
|
||||
// block is at offset 0x80, so that the offset for the zero block for
|
||||
// continuation bytes is 0.
|
||||
b := &builder{
|
||||
Name: name,
|
||||
Trie: tries,
|
||||
IndexBlocks: []*node{{}, {}, {}},
|
||||
Compactions: []compaction{{
|
||||
Handler: name + "Values[n<<6+uint32(b)]",
|
||||
}},
|
||||
// The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero
|
||||
// block.
|
||||
indexBlockIdx: map[uint64]int{0: 0},
|
||||
valueBlockIdx: map[uint64]nodeIndex{0: {}},
|
||||
asciiBlockIdx: map[uint64]int{},
|
||||
}
|
||||
b.Compactions[0].c = (*simpleCompacter)(b)
|
||||
|
||||
for _, f := range opts {
|
||||
if err := f(b); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
b.build()
|
||||
if b.err != nil {
|
||||
return 0, b.err
|
||||
}
|
||||
if err = b.print(w); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return b.Size(), nil
|
||||
}
|
||||
|
||||
// A Trie represents a single root node of a trie. A builder may build several
|
||||
// overlapping tries at once.
|
||||
type Trie struct {
|
||||
root *node
|
||||
|
||||
hiddenTrie
|
||||
}
|
||||
|
||||
// hiddenTrie contains values we want to be visible to the template generator,
|
||||
// but hidden from the API documentation.
|
||||
type hiddenTrie struct {
|
||||
Name string
|
||||
Checksum uint64
|
||||
ASCIIIndex int
|
||||
StarterIndex int
|
||||
}
|
||||
|
||||
// NewTrie returns a new trie root.
|
||||
func NewTrie(name string) *Trie {
|
||||
return &Trie{
|
||||
&node{
|
||||
children: make([]*node, blockSize),
|
||||
values: make([]uint64, utf8.RuneSelf),
|
||||
},
|
||||
hiddenTrie{Name: name},
|
||||
}
|
||||
}
|
||||
|
||||
// Gen is a convenience wrapper around the Gen func passing t as the only trie
|
||||
// and uses the name passed to NewTrie. It returns the size of the generated
|
||||
// tables.
|
||||
func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) {
|
||||
return Gen(w, t.Name, []*Trie{t}, opts...)
|
||||
}
|
||||
|
||||
// node is a node of the intermediate trie structure.
|
||||
type node struct {
|
||||
// children holds this node's children. It is always of length 64.
|
||||
// A child node may be nil.
|
||||
children []*node
|
||||
|
||||
// values contains the values of this node. If it is non-nil, this node is
|
||||
// either a root or leaf node:
|
||||
// For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F].
|
||||
// For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF].
|
||||
values []uint64
|
||||
|
||||
index nodeIndex
|
||||
}
|
||||
|
||||
// Insert associates value with the given rune. Insert will panic if a non-zero
|
||||
// value is passed for an invalid rune.
|
||||
func (t *Trie) Insert(r rune, value uint64) {
|
||||
if value == 0 {
|
||||
return
|
||||
}
|
||||
s := string(r)
|
||||
if []rune(s)[0] != r && value != 0 {
|
||||
// Note: The UCD tables will always assign what amounts to a zero value
|
||||
// to a surrogate. Allowing a zero value for an illegal rune allows
|
||||
// users to iterate over [0..MaxRune] without having to explicitly
|
||||
// exclude surrogates, which would be tedious.
|
||||
panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r))
|
||||
}
|
||||
if len(s) == 1 {
|
||||
// It is a root node value (ASCII).
|
||||
t.root.values[s[0]] = value
|
||||
return
|
||||
}
|
||||
|
||||
n := t.root
|
||||
for ; len(s) > 1; s = s[1:] {
|
||||
if n.children == nil {
|
||||
n.children = make([]*node, blockSize)
|
||||
}
|
||||
p := s[0] % blockSize
|
||||
c := n.children[p]
|
||||
if c == nil {
|
||||
c = &node{}
|
||||
n.children[p] = c
|
||||
}
|
||||
if len(s) > 2 && c.values != nil {
|
||||
log.Fatalf("triegen: insert(%U): found internal node with values", r)
|
||||
}
|
||||
n = c
|
||||
}
|
||||
if n.values == nil {
|
||||
n.values = make([]uint64, blockSize)
|
||||
}
|
||||
if n.children != nil {
|
||||
log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r)
|
||||
}
|
||||
n.values[s[0]-0x80] = value
|
||||
}
|
||||
|
||||
// Size returns the number of bytes the generated trie will take to store. It
|
||||
// needs to be exported as it is used in the templates.
|
||||
func (b *builder) Size() int {
|
||||
// Index blocks.
|
||||
sz := len(b.IndexBlocks) * blockSize * b.IndexSize
|
||||
|
||||
// Skip the first compaction, which represents the normal value blocks, as
|
||||
// its totalSize does not account for the ASCII blocks, which are managed
|
||||
// separately.
|
||||
sz += len(b.ValueBlocks) * blockSize * b.ValueSize
|
||||
for _, c := range b.Compactions[1:] {
|
||||
sz += c.totalSize
|
||||
}
|
||||
|
||||
// TODO: this computation does not account for the fixed overhead of a using
|
||||
// a compaction, either code or data. As for data, though, the typical
|
||||
// overhead of data is in the order of bytes (2 bytes for cases). Further,
|
||||
// the savings of using a compaction should anyway be substantial for it to
|
||||
// be worth it.
|
||||
|
||||
// For multi-root tries, we also need to account for the handles.
|
||||
if len(b.Trie) > 1 {
|
||||
sz += 2 * b.IndexSize * len(b.Trie)
|
||||
}
|
||||
return sz
|
||||
}
|
||||
|
||||
func (b *builder) build() {
|
||||
// Compute the sizes of the values.
|
||||
var vmax uint64
|
||||
for _, t := range b.Trie {
|
||||
vmax = maxValue(t.root, vmax)
|
||||
}
|
||||
b.ValueType, b.ValueSize = getIntType(vmax)
|
||||
|
||||
// Compute all block allocations.
|
||||
// TODO: first compute the ASCII blocks for all tries and then the other
|
||||
// nodes. ASCII blocks are more restricted in placement, as they require two
|
||||
// blocks to be placed consecutively. Processing them first may improve
|
||||
// sharing (at least one zero block can be expected to be saved.)
|
||||
for _, t := range b.Trie {
|
||||
b.Checksum += b.buildTrie(t)
|
||||
}
|
||||
|
||||
// Compute the offsets for all the Compacters.
|
||||
offset := uint32(0)
|
||||
for i := range b.Compactions {
|
||||
c := &b.Compactions[i]
|
||||
c.Offset = offset
|
||||
offset += c.maxHandle + 1
|
||||
c.Cutoff = offset
|
||||
}
|
||||
|
||||
// Compute the sizes of indexes.
|
||||
// TODO: different byte positions could have different sizes. So far we have
|
||||
// not found a case where this is beneficial.
|
||||
imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff)
|
||||
for _, ib := range b.IndexBlocks {
|
||||
if x := uint64(ib.index.index); x > imax {
|
||||
imax = x
|
||||
}
|
||||
}
|
||||
b.IndexType, b.IndexSize = getIntType(imax)
|
||||
}
|
||||
|
||||
func maxValue(n *node, max uint64) uint64 {
|
||||
if n == nil {
|
||||
return max
|
||||
}
|
||||
for _, c := range n.children {
|
||||
max = maxValue(c, max)
|
||||
}
|
||||
for _, v := range n.values {
|
||||
if max < v {
|
||||
max = v
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
func getIntType(v uint64) (string, int) {
|
||||
switch {
|
||||
case v < 1<<8:
|
||||
return "uint8", 1
|
||||
case v < 1<<16:
|
||||
return "uint16", 2
|
||||
case v < 1<<32:
|
||||
return "uint32", 4
|
||||
}
|
||||
return "uint64", 8
|
||||
}
|
||||
|
||||
const (
|
||||
blockSize = 64
|
||||
|
||||
// Subtract two blocks to offset 0x80, the first continuation byte.
|
||||
blockOffset = 2
|
||||
|
||||
// Subtract three blocks to offset 0xC0, the first non-ASCII starter.
|
||||
rootBlockOffset = 3
|
||||
)
|
||||
|
||||
var crcTable = crc64.MakeTable(crc64.ISO)
|
||||
|
||||
func (b *builder) buildTrie(t *Trie) uint64 {
|
||||
n := t.root
|
||||
|
||||
// Get the ASCII offset. For the first trie, the ASCII block will be at
|
||||
// position 0.
|
||||
hasher := crc64.New(crcTable)
|
||||
binary.Write(hasher, binary.BigEndian, n.values)
|
||||
hash := hasher.Sum64()
|
||||
|
||||
v, ok := b.asciiBlockIdx[hash]
|
||||
if !ok {
|
||||
v = len(b.ValueBlocks)
|
||||
b.asciiBlockIdx[hash] = v
|
||||
|
||||
b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:])
|
||||
if v == 0 {
|
||||
// Add the zero block at position 2 so that it will be assigned a
|
||||
// zero reference in the lookup blocks.
|
||||
// TODO: always do this? This would allow us to remove a check from
|
||||
// the trie lookup, but at the expense of extra space. Analyze
|
||||
// performance for unicode/norm.
|
||||
b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize))
|
||||
}
|
||||
}
|
||||
t.ASCIIIndex = v
|
||||
|
||||
// Compute remaining offsets.
|
||||
t.Checksum = b.computeOffsets(n, true)
|
||||
// We already subtracted the normal blockOffset from the index. Subtract the
|
||||
// difference for starter bytes.
|
||||
t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset)
|
||||
return t.Checksum
|
||||
}
|
||||
|
||||
func (b *builder) computeOffsets(n *node, root bool) uint64 {
|
||||
// For the first trie, the root lookup block will be at position 3, which is
|
||||
// the offset for UTF-8 non-ASCII starter bytes.
|
||||
first := len(b.IndexBlocks) == rootBlockOffset
|
||||
if first {
|
||||
b.IndexBlocks = append(b.IndexBlocks, n)
|
||||
}
|
||||
|
||||
// We special-case the cases where all values recursively are 0. This allows
|
||||
// for the use of a zero block to which all such values can be directed.
|
||||
hash := uint64(0)
|
||||
if n.children != nil || n.values != nil {
|
||||
hasher := crc64.New(crcTable)
|
||||
for _, c := range n.children {
|
||||
var v uint64
|
||||
if c != nil {
|
||||
v = b.computeOffsets(c, false)
|
||||
}
|
||||
binary.Write(hasher, binary.BigEndian, v)
|
||||
}
|
||||
binary.Write(hasher, binary.BigEndian, n.values)
|
||||
hash = hasher.Sum64()
|
||||
}
|
||||
|
||||
if first {
|
||||
b.indexBlockIdx[hash] = rootBlockOffset - blockOffset
|
||||
}
|
||||
|
||||
// Compacters don't apply to internal nodes.
|
||||
if n.children != nil {
|
||||
v, ok := b.indexBlockIdx[hash]
|
||||
if !ok {
|
||||
v = len(b.IndexBlocks) - blockOffset
|
||||
b.IndexBlocks = append(b.IndexBlocks, n)
|
||||
b.indexBlockIdx[hash] = v
|
||||
}
|
||||
n.index = nodeIndex{0, v}
|
||||
} else {
|
||||
h, ok := b.valueBlockIdx[hash]
|
||||
if !ok {
|
||||
bestI, bestSize := 0, blockSize*b.ValueSize
|
||||
for i, c := range b.Compactions[1:] {
|
||||
if sz, ok := c.c.Size(n.values); ok && bestSize > sz {
|
||||
bestI, bestSize = i+1, sz
|
||||
}
|
||||
}
|
||||
c := &b.Compactions[bestI]
|
||||
c.totalSize += bestSize
|
||||
v := c.c.Store(n.values)
|
||||
if c.maxHandle < v {
|
||||
c.maxHandle = v
|
||||
}
|
||||
h = nodeIndex{bestI, int(v)}
|
||||
b.valueBlockIdx[hash] = h
|
||||
}
|
||||
n.index = h
|
||||
}
|
||||
return hash
|
||||
}
|
||||
376
vendor/golang.org/x/text/internal/ucd/ucd.go
generated
vendored
Normal file
376
vendor/golang.org/x/text/internal/ucd/ucd.go
generated
vendored
Normal file
@@ -0,0 +1,376 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ucd provides a parser for Unicode Character Database files, the
|
||||
// format of which is defined in http://www.unicode.org/reports/tr44/. See
|
||||
// http://www.unicode.org/Public/UCD/latest/ucd/ for example files.
|
||||
//
|
||||
// It currently does not support substitutions of missing fields.
|
||||
package ucd // import "golang.org/x/text/internal/ucd"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// UnicodeData.txt fields.
|
||||
const (
|
||||
CodePoint = iota
|
||||
Name
|
||||
GeneralCategory
|
||||
CanonicalCombiningClass
|
||||
BidiClass
|
||||
DecompMapping
|
||||
DecimalValue
|
||||
DigitValue
|
||||
NumericValue
|
||||
BidiMirrored
|
||||
Unicode1Name
|
||||
ISOComment
|
||||
SimpleUppercaseMapping
|
||||
SimpleLowercaseMapping
|
||||
SimpleTitlecaseMapping
|
||||
)
|
||||
|
||||
// Parse calls f for each entry in the given reader of a UCD file. It will close
|
||||
// the reader upon return. It will call log.Fatal if any error occurred.
|
||||
//
|
||||
// This implements the most common usage pattern of using Parser.
|
||||
func Parse(r io.ReadCloser, f func(p *Parser)) {
|
||||
defer r.Close()
|
||||
|
||||
p := New(r)
|
||||
for p.Next() {
|
||||
f(p)
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
r.Close() // os.Exit will cause defers not to be called.
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// An Option is used to configure a Parser.
|
||||
type Option func(p *Parser)
|
||||
|
||||
func keepRanges(p *Parser) {
|
||||
p.keepRanges = true
|
||||
}
|
||||
|
||||
var (
|
||||
// KeepRanges prevents the expansion of ranges. The raw ranges can be
|
||||
// obtained by calling Range(0) on the parser.
|
||||
KeepRanges Option = keepRanges
|
||||
)
|
||||
|
||||
// The Part option register a handler for lines starting with a '@'. The text
|
||||
// after a '@' is available as the first field. Comments are handled as usual.
|
||||
func Part(f func(p *Parser)) Option {
|
||||
return func(p *Parser) {
|
||||
p.partHandler = f
|
||||
}
|
||||
}
|
||||
|
||||
// The CommentHandler option passes comments that are on a line by itself to
|
||||
// a given handler.
|
||||
func CommentHandler(f func(s string)) Option {
|
||||
return func(p *Parser) {
|
||||
p.commentHandler = f
|
||||
}
|
||||
}
|
||||
|
||||
// A Parser parses Unicode Character Database (UCD) files.
|
||||
type Parser struct {
|
||||
scanner *bufio.Scanner
|
||||
|
||||
keepRanges bool // Don't expand rune ranges in field 0.
|
||||
|
||||
err error
|
||||
comment []byte
|
||||
field [][]byte
|
||||
// parsedRange is needed in case Range(0) is called more than once for one
|
||||
// field. In some cases this requires scanning ahead.
|
||||
parsedRange bool
|
||||
rangeStart, rangeEnd rune
|
||||
|
||||
partHandler func(p *Parser)
|
||||
commentHandler func(s string)
|
||||
}
|
||||
|
||||
func (p *Parser) setError(err error) {
|
||||
if p.err == nil {
|
||||
p.err = err
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) getField(i int) []byte {
|
||||
if i >= len(p.field) {
|
||||
return nil
|
||||
}
|
||||
return p.field[i]
|
||||
}
|
||||
|
||||
// Err returns a non-nil error if any error occurred during parsing.
|
||||
func (p *Parser) Err() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
// New returns a Parser for the given Reader.
|
||||
func New(r io.Reader, o ...Option) *Parser {
|
||||
p := &Parser{
|
||||
scanner: bufio.NewScanner(r),
|
||||
}
|
||||
for _, f := range o {
|
||||
f(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Next parses the next line in the file. It returns true if a line was parsed
|
||||
// and false if it reached the end of the file.
|
||||
func (p *Parser) Next() bool {
|
||||
if !p.keepRanges && p.rangeStart < p.rangeEnd {
|
||||
p.rangeStart++
|
||||
return true
|
||||
}
|
||||
p.comment = nil
|
||||
p.field = p.field[:0]
|
||||
p.parsedRange = false
|
||||
|
||||
for p.scanner.Scan() {
|
||||
b := p.scanner.Bytes()
|
||||
if len(b) == 0 {
|
||||
continue
|
||||
}
|
||||
if b[0] == '#' {
|
||||
if p.commentHandler != nil {
|
||||
p.commentHandler(strings.TrimSpace(string(b[1:])))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse line
|
||||
if i := bytes.IndexByte(b, '#'); i != -1 {
|
||||
p.comment = bytes.TrimSpace(b[i+1:])
|
||||
b = b[:i]
|
||||
}
|
||||
if b[0] == '@' {
|
||||
if p.partHandler != nil {
|
||||
p.field = append(p.field, bytes.TrimSpace(b[1:]))
|
||||
p.partHandler(p)
|
||||
p.field = p.field[:0]
|
||||
}
|
||||
p.comment = nil
|
||||
continue
|
||||
}
|
||||
for {
|
||||
i := bytes.IndexByte(b, ';')
|
||||
if i == -1 {
|
||||
p.field = append(p.field, bytes.TrimSpace(b))
|
||||
break
|
||||
}
|
||||
p.field = append(p.field, bytes.TrimSpace(b[:i]))
|
||||
b = b[i+1:]
|
||||
}
|
||||
if !p.keepRanges {
|
||||
p.rangeStart, p.rangeEnd = p.getRange(0)
|
||||
}
|
||||
return true
|
||||
}
|
||||
p.setError(p.scanner.Err())
|
||||
return false
|
||||
}
|
||||
|
||||
func parseRune(b []byte) (rune, error) {
|
||||
if len(b) > 2 && b[0] == 'U' && b[1] == '+' {
|
||||
b = b[2:]
|
||||
}
|
||||
x, err := strconv.ParseUint(string(b), 16, 32)
|
||||
return rune(x), err
|
||||
}
|
||||
|
||||
func (p *Parser) parseRune(b []byte) rune {
|
||||
x, err := parseRune(b)
|
||||
p.setError(err)
|
||||
return x
|
||||
}
|
||||
|
||||
// Rune parses and returns field i as a rune.
|
||||
func (p *Parser) Rune(i int) rune {
|
||||
if i > 0 || p.keepRanges {
|
||||
return p.parseRune(p.getField(i))
|
||||
}
|
||||
return p.rangeStart
|
||||
}
|
||||
|
||||
// Runes interprets and returns field i as a sequence of runes.
|
||||
func (p *Parser) Runes(i int) (runes []rune) {
|
||||
add := func(b []byte) {
|
||||
if b = bytes.TrimSpace(b); len(b) > 0 {
|
||||
runes = append(runes, p.parseRune(b))
|
||||
}
|
||||
}
|
||||
for b := p.getField(i); ; {
|
||||
i := bytes.IndexByte(b, ' ')
|
||||
if i == -1 {
|
||||
add(b)
|
||||
break
|
||||
}
|
||||
add(b[:i])
|
||||
b = b[i+1:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>")
|
||||
|
||||
// reRange matches one line of a legacy rune range.
|
||||
reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$")
|
||||
)
|
||||
|
||||
// Range parses and returns field i as a rune range. A range is inclusive at
|
||||
// both ends. If the field only has one rune, first and last will be identical.
|
||||
// It supports the legacy format for ranges used in UnicodeData.txt.
|
||||
func (p *Parser) Range(i int) (first, last rune) {
|
||||
if !p.keepRanges {
|
||||
return p.rangeStart, p.rangeStart
|
||||
}
|
||||
return p.getRange(i)
|
||||
}
|
||||
|
||||
func (p *Parser) getRange(i int) (first, last rune) {
|
||||
b := p.getField(i)
|
||||
if k := bytes.Index(b, []byte("..")); k != -1 {
|
||||
return p.parseRune(b[:k]), p.parseRune(b[k+2:])
|
||||
}
|
||||
// The first field may not be a rune, in which case we may ignore any error
|
||||
// and set the range as 0..0.
|
||||
x, err := parseRune(b)
|
||||
if err != nil {
|
||||
// Disable range parsing henceforth. This ensures that an error will be
|
||||
// returned if the user subsequently will try to parse this field as
|
||||
// a Rune.
|
||||
p.keepRanges = true
|
||||
}
|
||||
// Special case for UnicodeData that was retained for backwards compatibility.
|
||||
if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) {
|
||||
if p.parsedRange {
|
||||
return p.rangeStart, p.rangeEnd
|
||||
}
|
||||
mf := reRange.FindStringSubmatch(p.scanner.Text())
|
||||
if mf == nil || !p.scanner.Scan() {
|
||||
p.setError(errIncorrectLegacyRange)
|
||||
return x, x
|
||||
}
|
||||
// Using Bytes would be more efficient here, but Text is a lot easier
|
||||
// and this is not a frequent case.
|
||||
ml := reRange.FindStringSubmatch(p.scanner.Text())
|
||||
if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] {
|
||||
p.setError(errIncorrectLegacyRange)
|
||||
return x, x
|
||||
}
|
||||
p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])])
|
||||
p.parsedRange = true
|
||||
return p.rangeStart, p.rangeEnd
|
||||
}
|
||||
return x, x
|
||||
}
|
||||
|
||||
// bools recognizes all valid UCD boolean values.
|
||||
var bools = map[string]bool{
|
||||
"": false,
|
||||
"N": false,
|
||||
"No": false,
|
||||
"F": false,
|
||||
"False": false,
|
||||
"Y": true,
|
||||
"Yes": true,
|
||||
"T": true,
|
||||
"True": true,
|
||||
}
|
||||
|
||||
// Bool parses and returns field i as a boolean value.
|
||||
func (p *Parser) Bool(i int) bool {
|
||||
b := p.getField(i)
|
||||
for s, v := range bools {
|
||||
if bstrEq(b, s) {
|
||||
return v
|
||||
}
|
||||
}
|
||||
p.setError(strconv.ErrSyntax)
|
||||
return false
|
||||
}
|
||||
|
||||
// Int parses and returns field i as an integer value.
|
||||
func (p *Parser) Int(i int) int {
|
||||
x, err := strconv.ParseInt(string(p.getField(i)), 10, 64)
|
||||
p.setError(err)
|
||||
return int(x)
|
||||
}
|
||||
|
||||
// Uint parses and returns field i as an unsigned integer value.
|
||||
func (p *Parser) Uint(i int) uint {
|
||||
x, err := strconv.ParseUint(string(p.getField(i)), 10, 64)
|
||||
p.setError(err)
|
||||
return uint(x)
|
||||
}
|
||||
|
||||
// Float parses and returns field i as a decimal value.
|
||||
func (p *Parser) Float(i int) float64 {
|
||||
x, err := strconv.ParseFloat(string(p.getField(i)), 64)
|
||||
p.setError(err)
|
||||
return x
|
||||
}
|
||||
|
||||
// String parses and returns field i as a string value.
|
||||
func (p *Parser) String(i int) string {
|
||||
return string(p.getField(i))
|
||||
}
|
||||
|
||||
// Strings parses and returns field i as a space-separated list of strings.
|
||||
func (p *Parser) Strings(i int) []string {
|
||||
ss := strings.Split(string(p.getField(i)), " ")
|
||||
for i, s := range ss {
|
||||
ss[i] = strings.TrimSpace(s)
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// Comment returns the comments for the current line.
|
||||
func (p *Parser) Comment() string {
|
||||
return string(p.comment)
|
||||
}
|
||||
|
||||
var errUndefinedEnum = errors.New("ucd: undefined enum value")
|
||||
|
||||
// Enum interprets and returns field i as a value that must be one of the values
|
||||
// in enum.
|
||||
func (p *Parser) Enum(i int, enum ...string) string {
|
||||
b := p.getField(i)
|
||||
for _, s := range enum {
|
||||
if bstrEq(b, s) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
p.setError(errUndefinedEnum)
|
||||
return ""
|
||||
}
|
||||
|
||||
func bstrEq(b []byte, s string) bool {
|
||||
if len(b) != len(s) {
|
||||
return false
|
||||
}
|
||||
for i, c := range b {
|
||||
if c != s[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
100
vendor/golang.org/x/text/unicode/cldr/base.go
generated
vendored
Normal file
100
vendor/golang.org/x/text/unicode/cldr/base.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Elem is implemented by every XML element.
|
||||
type Elem interface {
|
||||
setEnclosing(Elem)
|
||||
setName(string)
|
||||
enclosing() Elem
|
||||
|
||||
GetCommon() *Common
|
||||
}
|
||||
|
||||
type hidden struct {
|
||||
CharData string `xml:",chardata"`
|
||||
Alias *struct {
|
||||
Common
|
||||
Source string `xml:"source,attr"`
|
||||
Path string `xml:"path,attr"`
|
||||
} `xml:"alias"`
|
||||
Def *struct {
|
||||
Common
|
||||
Choice string `xml:"choice,attr,omitempty"`
|
||||
Type string `xml:"type,attr,omitempty"`
|
||||
} `xml:"default"`
|
||||
}
|
||||
|
||||
// Common holds several of the most common attributes and sub elements
|
||||
// of an XML element.
|
||||
type Common struct {
|
||||
XMLName xml.Name
|
||||
name string
|
||||
enclElem Elem
|
||||
Type string `xml:"type,attr,omitempty"`
|
||||
Reference string `xml:"reference,attr,omitempty"`
|
||||
Alt string `xml:"alt,attr,omitempty"`
|
||||
ValidSubLocales string `xml:"validSubLocales,attr,omitempty"`
|
||||
Draft string `xml:"draft,attr,omitempty"`
|
||||
hidden
|
||||
}
|
||||
|
||||
// Default returns the default type to select from the enclosed list
|
||||
// or "" if no default value is specified.
|
||||
func (e *Common) Default() string {
|
||||
if e.Def == nil {
|
||||
return ""
|
||||
}
|
||||
if e.Def.Choice != "" {
|
||||
return e.Def.Choice
|
||||
} else if e.Def.Type != "" {
|
||||
// Type is still used by the default element in collation.
|
||||
return e.Def.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetCommon returns e. It is provided such that Common implements Elem.
|
||||
func (e *Common) GetCommon() *Common {
|
||||
return e
|
||||
}
|
||||
|
||||
// Data returns the character data accumulated for this element.
|
||||
func (e *Common) Data() string {
|
||||
e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode)
|
||||
return e.CharData
|
||||
}
|
||||
|
||||
func (e *Common) setName(s string) {
|
||||
e.name = s
|
||||
}
|
||||
|
||||
func (e *Common) enclosing() Elem {
|
||||
return e.enclElem
|
||||
}
|
||||
|
||||
func (e *Common) setEnclosing(en Elem) {
|
||||
e.enclElem = en
|
||||
}
|
||||
|
||||
// Escape characters that can be escaped without further escaping the string.
|
||||
var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`)
|
||||
|
||||
// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string.
|
||||
// It assumes the input string is correctly formatted.
|
||||
func replaceUnicode(s string) string {
|
||||
if s[1] == '#' {
|
||||
r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32)
|
||||
return string(r)
|
||||
}
|
||||
r, _, _, _ := strconv.UnquoteChar(s, 0)
|
||||
return string(r)
|
||||
}
|
||||
130
vendor/golang.org/x/text/unicode/cldr/cldr.go
generated
vendored
Normal file
130
vendor/golang.org/x/text/unicode/cldr/cldr.go
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run makexml.go -output xml.go
|
||||
|
||||
// Package cldr provides a parser for LDML and related XML formats.
|
||||
// This package is intended to be used by the table generation tools
|
||||
// for the various internationalization-related packages.
|
||||
// As the XML types are generated from the CLDR DTD, and as the CLDR standard
|
||||
// is periodically amended, this package may change considerably over time.
|
||||
// This mostly means that data may appear and disappear between versions.
|
||||
// That is, old code should keep compiling for newer versions, but data
|
||||
// may have moved or changed.
|
||||
// CLDR version 22 is the first version supported by this package.
|
||||
// Older versions may not work.
|
||||
package cldr // import "golang.org/x/text/unicode/cldr"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// CLDR provides access to parsed data of the Unicode Common Locale Data Repository.
|
||||
type CLDR struct {
|
||||
parent map[string][]string
|
||||
locale map[string]*LDML
|
||||
resolved map[string]*LDML
|
||||
bcp47 *LDMLBCP47
|
||||
supp *SupplementalData
|
||||
}
|
||||
|
||||
func makeCLDR() *CLDR {
|
||||
return &CLDR{
|
||||
parent: make(map[string][]string),
|
||||
locale: make(map[string]*LDML),
|
||||
resolved: make(map[string]*LDML),
|
||||
bcp47: &LDMLBCP47{},
|
||||
supp: &SupplementalData{},
|
||||
}
|
||||
}
|
||||
|
||||
// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned.
|
||||
func (cldr *CLDR) BCP47() *LDMLBCP47 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Draft indicates the draft level of an element.
|
||||
type Draft int
|
||||
|
||||
const (
|
||||
Approved Draft = iota
|
||||
Contributed
|
||||
Provisional
|
||||
Unconfirmed
|
||||
)
|
||||
|
||||
var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""}
|
||||
|
||||
// ParseDraft returns the Draft value corresponding to the given string. The
|
||||
// empty string corresponds to Approved.
|
||||
func ParseDraft(level string) (Draft, error) {
|
||||
if level == "" {
|
||||
return Approved, nil
|
||||
}
|
||||
for i, s := range drafts {
|
||||
if level == s {
|
||||
return Unconfirmed - Draft(i), nil
|
||||
}
|
||||
}
|
||||
return Approved, fmt.Errorf("cldr: unknown draft level %q", level)
|
||||
}
|
||||
|
||||
func (d Draft) String() string {
|
||||
return drafts[len(drafts)-1-int(d)]
|
||||
}
|
||||
|
||||
// SetDraftLevel sets which draft levels to include in the evaluated LDML.
|
||||
// Any draft element for which the draft level is higher than lev will be excluded.
|
||||
// If multiple draft levels are available for a single element, the one with the
|
||||
// lowest draft level will be selected, unless preferDraft is true, in which case
|
||||
// the highest draft will be chosen.
|
||||
// It is assumed that the underlying LDML is canonicalized.
|
||||
func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) {
|
||||
// TODO: implement
|
||||
cldr.resolved = make(map[string]*LDML)
|
||||
}
|
||||
|
||||
// RawLDML returns the LDML XML for id in unresolved form.
|
||||
// id must be one of the strings returned by Locales.
|
||||
func (cldr *CLDR) RawLDML(loc string) *LDML {
|
||||
return cldr.locale[loc]
|
||||
}
|
||||
|
||||
// LDML returns the fully resolved LDML XML for loc, which must be one of
|
||||
// the strings returned by Locales.
|
||||
func (cldr *CLDR) LDML(loc string) (*LDML, error) {
|
||||
return cldr.resolve(loc)
|
||||
}
|
||||
|
||||
// Supplemental returns the parsed supplemental data. If no such data was parsed,
|
||||
// nil is returned.
|
||||
func (cldr *CLDR) Supplemental() *SupplementalData {
|
||||
return cldr.supp
|
||||
}
|
||||
|
||||
// Locales returns the locales for which there exist files.
|
||||
// Valid sublocales for which there is no file are not included.
|
||||
// The root locale is always sorted first.
|
||||
func (cldr *CLDR) Locales() []string {
|
||||
loc := []string{"root"}
|
||||
hasRoot := false
|
||||
for l, _ := range cldr.locale {
|
||||
if l == "root" {
|
||||
hasRoot = true
|
||||
continue
|
||||
}
|
||||
loc = append(loc, l)
|
||||
}
|
||||
sort.Strings(loc[1:])
|
||||
if !hasRoot {
|
||||
return loc[1:]
|
||||
}
|
||||
return loc
|
||||
}
|
||||
|
||||
// Get fills in the fields of x based on the XPath path.
|
||||
func Get(e Elem, path string) (res Elem, err error) {
|
||||
return walkXPath(e, path)
|
||||
}
|
||||
359
vendor/golang.org/x/text/unicode/cldr/collate.go
generated
vendored
Normal file
359
vendor/golang.org/x/text/unicode/cldr/collate.go
generated
vendored
Normal file
@@ -0,0 +1,359 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// RuleProcessor can be passed to Collator's Process method, which
|
||||
// parses the rules and calls the respective method for each rule found.
|
||||
type RuleProcessor interface {
|
||||
Reset(anchor string, before int) error
|
||||
Insert(level int, str, context, extend string) error
|
||||
Index(id string)
|
||||
}
|
||||
|
||||
const (
|
||||
// cldrIndex is a Unicode-reserved sentinel value used to mark the start
|
||||
// of a grouping within an index.
|
||||
// We ignore any rule that starts with this rune.
|
||||
// See http://unicode.org/reports/tr35/#Collation_Elements for details.
|
||||
cldrIndex = "\uFDD0"
|
||||
|
||||
// specialAnchor is the format in which to represent logical reset positions,
|
||||
// such as "first tertiary ignorable".
|
||||
specialAnchor = "<%s/>"
|
||||
)
|
||||
|
||||
// Process parses the rules for the tailorings of this collation
|
||||
// and calls the respective methods of p for each rule found.
|
||||
func (c Collation) Process(p RuleProcessor) (err error) {
|
||||
if len(c.Cr) > 0 {
|
||||
if len(c.Cr) > 1 {
|
||||
return fmt.Errorf("multiple cr elements, want 0 or 1")
|
||||
}
|
||||
return processRules(p, c.Cr[0].Data())
|
||||
}
|
||||
if c.Rules.Any != nil {
|
||||
return c.processXML(p)
|
||||
}
|
||||
return errors.New("no tailoring data")
|
||||
}
|
||||
|
||||
// processRules parses rules in the Collation Rule Syntax defined in
|
||||
// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings.
|
||||
func processRules(p RuleProcessor, s string) (err error) {
|
||||
chk := func(s string, e error) string {
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
return s
|
||||
}
|
||||
i := 0 // Save the line number for use after the loop.
|
||||
scanner := bufio.NewScanner(strings.NewReader(s))
|
||||
for ; scanner.Scan() && err == nil; i++ {
|
||||
for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) {
|
||||
level := 5
|
||||
var ch byte
|
||||
switch ch, s = s[0], s[1:]; ch {
|
||||
case '&': // followed by <anchor> or '[' <key> ']'
|
||||
if s = skipSpace(s); consume(&s, '[') {
|
||||
s = chk(parseSpecialAnchor(p, s))
|
||||
} else {
|
||||
s = chk(parseAnchor(p, 0, s))
|
||||
}
|
||||
case '<': // sort relation '<'{1,4}, optionally followed by '*'.
|
||||
for level = 1; consume(&s, '<'); level++ {
|
||||
}
|
||||
if level > 4 {
|
||||
err = fmt.Errorf("level %d > 4", level)
|
||||
}
|
||||
fallthrough
|
||||
case '=': // identity relation, optionally followed by *.
|
||||
if consume(&s, '*') {
|
||||
s = chk(parseSequence(p, level, s))
|
||||
} else {
|
||||
s = chk(parseOrder(p, level, s))
|
||||
}
|
||||
default:
|
||||
chk("", fmt.Errorf("illegal operator %q", ch))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if chk("", scanner.Err()); err != nil {
|
||||
return fmt.Errorf("%d: %v", i, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseSpecialAnchor parses the anchor syntax which is either of the form
|
||||
// ['before' <level>] <anchor>
|
||||
// or
|
||||
// [<label>]
|
||||
// The starting should already be consumed.
|
||||
func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) {
|
||||
i := strings.IndexByte(s, ']')
|
||||
if i == -1 {
|
||||
return "", errors.New("unmatched bracket")
|
||||
}
|
||||
a := strings.TrimSpace(s[:i])
|
||||
s = s[i+1:]
|
||||
if strings.HasPrefix(a, "before ") {
|
||||
l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return parseAnchor(p, int(l), s)
|
||||
}
|
||||
return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0)
|
||||
}
|
||||
|
||||
func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
anchor, s, err := scanString(s)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return s, p.Reset(anchor, level)
|
||||
}
|
||||
|
||||
func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
var value, context, extend string
|
||||
if value, s, err = scanString(s); err != nil {
|
||||
return s, err
|
||||
}
|
||||
if strings.HasPrefix(value, cldrIndex) {
|
||||
p.Index(value[len(cldrIndex):])
|
||||
return
|
||||
}
|
||||
if consume(&s, '|') {
|
||||
if context, s, err = scanString(s); err != nil {
|
||||
return s, errors.New("missing string after context")
|
||||
}
|
||||
}
|
||||
if consume(&s, '/') {
|
||||
if extend, s, err = scanString(s); err != nil {
|
||||
return s, errors.New("missing string after extension")
|
||||
}
|
||||
}
|
||||
return s, p.Insert(level, value, context, extend)
|
||||
}
|
||||
|
||||
// scanString scans a single input string.
|
||||
func scanString(s string) (str, tail string, err error) {
|
||||
if s = skipSpace(s); s == "" {
|
||||
return s, s, errors.New("missing string")
|
||||
}
|
||||
buf := [16]byte{} // small but enough to hold most cases.
|
||||
value := buf[:0]
|
||||
for s != "" {
|
||||
if consume(&s, '\'') {
|
||||
i := strings.IndexByte(s, '\'')
|
||||
if i == -1 {
|
||||
return "", "", errors.New(`unmatched single quote`)
|
||||
}
|
||||
if i == 0 {
|
||||
value = append(value, '\'')
|
||||
} else {
|
||||
value = append(value, s[:i]...)
|
||||
}
|
||||
s = s[i+1:]
|
||||
continue
|
||||
}
|
||||
r, sz := utf8.DecodeRuneInString(s)
|
||||
if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) {
|
||||
break
|
||||
}
|
||||
value = append(value, s[:sz]...)
|
||||
s = s[sz:]
|
||||
}
|
||||
return string(value), skipSpace(s), nil
|
||||
}
|
||||
|
||||
func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
if s = skipSpace(s); s == "" {
|
||||
return s, errors.New("empty sequence")
|
||||
}
|
||||
last := rune(0)
|
||||
for s != "" {
|
||||
r, sz := utf8.DecodeRuneInString(s)
|
||||
s = s[sz:]
|
||||
|
||||
if r == '-' {
|
||||
// We have a range. The first element was already written.
|
||||
if last == 0 {
|
||||
return s, errors.New("range without starter value")
|
||||
}
|
||||
r, sz = utf8.DecodeRuneInString(s)
|
||||
s = s[sz:]
|
||||
if r == utf8.RuneError || r < last {
|
||||
return s, fmt.Errorf("invalid range %q-%q", last, r)
|
||||
}
|
||||
for i := last + 1; i <= r; i++ {
|
||||
if err := p.Insert(level, string(i), "", ""); err != nil {
|
||||
return s, err
|
||||
}
|
||||
}
|
||||
last = 0
|
||||
continue
|
||||
}
|
||||
|
||||
if unicode.IsSpace(r) || unicode.IsPunct(r) {
|
||||
break
|
||||
}
|
||||
|
||||
// normal case
|
||||
if err := p.Insert(level, string(r), "", ""); err != nil {
|
||||
return s, err
|
||||
}
|
||||
last = r
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func skipSpace(s string) string {
|
||||
return strings.TrimLeftFunc(s, unicode.IsSpace)
|
||||
}
|
||||
|
||||
// consumes returns whether the next byte is ch. If so, it gobbles it by
|
||||
// updating s.
|
||||
func consume(s *string, ch byte) (ok bool) {
|
||||
if *s == "" || (*s)[0] != ch {
|
||||
return false
|
||||
}
|
||||
*s = (*s)[1:]
|
||||
return true
|
||||
}
|
||||
|
||||
// The following code parses Collation rules of CLDR version 24 and before.
|
||||
|
||||
var lmap = map[byte]int{
|
||||
'p': 1,
|
||||
's': 2,
|
||||
't': 3,
|
||||
'i': 5,
|
||||
}
|
||||
|
||||
type rulesElem struct {
|
||||
Rules struct {
|
||||
Common
|
||||
Any []*struct {
|
||||
XMLName xml.Name
|
||||
rule
|
||||
} `xml:",any"`
|
||||
} `xml:"rules"`
|
||||
}
|
||||
|
||||
type rule struct {
|
||||
Value string `xml:",chardata"`
|
||||
Before string `xml:"before,attr"`
|
||||
Any []*struct {
|
||||
XMLName xml.Name
|
||||
rule
|
||||
} `xml:",any"`
|
||||
}
|
||||
|
||||
var emptyValueError = errors.New("cldr: empty rule value")
|
||||
|
||||
func (r *rule) value() (string, error) {
|
||||
// Convert hexadecimal Unicode codepoint notation to a string.
|
||||
s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode)
|
||||
r.Value = s
|
||||
if s == "" {
|
||||
if len(r.Any) != 1 {
|
||||
return "", emptyValueError
|
||||
}
|
||||
r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local)
|
||||
r.Any = nil
|
||||
} else if len(r.Any) != 0 {
|
||||
return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any)
|
||||
}
|
||||
return r.Value, nil
|
||||
}
|
||||
|
||||
func (r rule) process(p RuleProcessor, name, context, extend string) error {
|
||||
v, err := r.value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch name {
|
||||
case "p", "s", "t", "i":
|
||||
if strings.HasPrefix(v, cldrIndex) {
|
||||
p.Index(v[len(cldrIndex):])
|
||||
return nil
|
||||
}
|
||||
if err := p.Insert(lmap[name[0]], v, context, extend); err != nil {
|
||||
return err
|
||||
}
|
||||
case "pc", "sc", "tc", "ic":
|
||||
level := lmap[name[0]]
|
||||
for _, s := range v {
|
||||
if err := p.Insert(level, string(s), context, extend); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("cldr: unsupported tag: %q", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processXML parses the format of CLDR versions 24 and older.
|
||||
func (c Collation) processXML(p RuleProcessor) (err error) {
|
||||
// Collation is generated and defined in xml.go.
|
||||
var v string
|
||||
for _, r := range c.Rules.Any {
|
||||
switch r.XMLName.Local {
|
||||
case "reset":
|
||||
level := 0
|
||||
switch r.Before {
|
||||
case "primary", "1":
|
||||
level = 1
|
||||
case "secondary", "2":
|
||||
level = 2
|
||||
case "tertiary", "3":
|
||||
level = 3
|
||||
case "":
|
||||
default:
|
||||
return fmt.Errorf("cldr: unknown level %q", r.Before)
|
||||
}
|
||||
v, err = r.value()
|
||||
if err == nil {
|
||||
err = p.Reset(v, level)
|
||||
}
|
||||
case "x":
|
||||
var context, extend string
|
||||
for _, r1 := range r.Any {
|
||||
v, err = r1.value()
|
||||
switch r1.XMLName.Local {
|
||||
case "context":
|
||||
context = v
|
||||
case "extend":
|
||||
extend = v
|
||||
}
|
||||
}
|
||||
for _, r1 := range r.Any {
|
||||
if t := r1.XMLName.Local; t == "context" || t == "extend" {
|
||||
continue
|
||||
}
|
||||
r1.rule.process(p, r1.XMLName.Local, context, extend)
|
||||
}
|
||||
default:
|
||||
err = r.rule.process(p, r.XMLName.Local, "", "")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
171
vendor/golang.org/x/text/unicode/cldr/decode.go
generated
vendored
Normal file
171
vendor/golang.org/x/text/unicode/cldr/decode.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// A Decoder loads an archive of CLDR data.
|
||||
type Decoder struct {
|
||||
dirFilter []string
|
||||
sectionFilter []string
|
||||
loader Loader
|
||||
cldr *CLDR
|
||||
curLocale string
|
||||
}
|
||||
|
||||
// SetSectionFilter takes a list top-level LDML element names to which
|
||||
// evaluation of LDML should be limited. It automatically calls SetDirFilter.
|
||||
func (d *Decoder) SetSectionFilter(filter ...string) {
|
||||
d.sectionFilter = filter
|
||||
// TODO: automatically set dir filter
|
||||
}
|
||||
|
||||
// SetDirFilter limits the loading of LDML XML files of the specied directories.
|
||||
// Note that sections may be split across directories differently for different CLDR versions.
|
||||
// For more robust code, use SetSectionFilter.
|
||||
func (d *Decoder) SetDirFilter(dir ...string) {
|
||||
d.dirFilter = dir
|
||||
}
|
||||
|
||||
// A Loader provides access to the files of a CLDR archive.
|
||||
type Loader interface {
|
||||
Len() int
|
||||
Path(i int) string
|
||||
Reader(i int) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
var fileRe = regexp.MustCompile(".*/(.*)/(.*)\\.xml")
|
||||
|
||||
// Decode loads and decodes the files represented by l.
|
||||
func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) {
|
||||
d.cldr = makeCLDR()
|
||||
for i := 0; i < l.Len(); i++ {
|
||||
fname := l.Path(i)
|
||||
if m := fileRe.FindStringSubmatch(fname); m != nil {
|
||||
if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) {
|
||||
continue
|
||||
}
|
||||
var r io.Reader
|
||||
if r, err = l.Reader(i); err == nil {
|
||||
err = d.decode(m[1], m[2], r)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
d.cldr.finalize(d.sectionFilter)
|
||||
return d.cldr, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decode(dir, id string, r io.Reader) error {
|
||||
var v interface{}
|
||||
var l *LDML
|
||||
cldr := d.cldr
|
||||
switch {
|
||||
case dir == "supplemental":
|
||||
v = cldr.supp
|
||||
case dir == "transforms":
|
||||
return nil
|
||||
case dir == "bcp47":
|
||||
v = cldr.bcp47
|
||||
case dir == "validity":
|
||||
return nil
|
||||
default:
|
||||
ok := false
|
||||
if v, ok = cldr.locale[id]; !ok {
|
||||
l = &LDML{}
|
||||
v, cldr.locale[id] = l, l
|
||||
}
|
||||
}
|
||||
x := xml.NewDecoder(r)
|
||||
if err := x.Decode(v); err != nil {
|
||||
log.Printf("%s/%s: %v", dir, id, err)
|
||||
return err
|
||||
}
|
||||
if l != nil {
|
||||
if l.Identity == nil {
|
||||
return fmt.Errorf("%s/%s: missing identity element", dir, id)
|
||||
}
|
||||
// TODO: verify when CLDR bug http://unicode.org/cldr/trac/ticket/8970
|
||||
// is resolved.
|
||||
// path := strings.Split(id, "_")
|
||||
// if lang := l.Identity.Language.Type; lang != path[0] {
|
||||
// return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0])
|
||||
// }
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pathLoader []string
|
||||
|
||||
func makePathLoader(path string) (pl pathLoader, err error) {
|
||||
err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error {
|
||||
pl = append(pl, path)
|
||||
return err
|
||||
})
|
||||
return pl, err
|
||||
}
|
||||
|
||||
func (pl pathLoader) Len() int {
|
||||
return len(pl)
|
||||
}
|
||||
|
||||
func (pl pathLoader) Path(i int) string {
|
||||
return pl[i]
|
||||
}
|
||||
|
||||
func (pl pathLoader) Reader(i int) (io.ReadCloser, error) {
|
||||
return os.Open(pl[i])
|
||||
}
|
||||
|
||||
// DecodePath loads CLDR data from the given path.
|
||||
func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) {
|
||||
loader, err := makePathLoader(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Decode(loader)
|
||||
}
|
||||
|
||||
type zipLoader struct {
|
||||
r *zip.Reader
|
||||
}
|
||||
|
||||
func (zl zipLoader) Len() int {
|
||||
return len(zl.r.File)
|
||||
}
|
||||
|
||||
func (zl zipLoader) Path(i int) string {
|
||||
return zl.r.File[i].Name
|
||||
}
|
||||
|
||||
func (zl zipLoader) Reader(i int) (io.ReadCloser, error) {
|
||||
return zl.r.File[i].Open()
|
||||
}
|
||||
|
||||
// DecodeZip loads CLDR data from the zip archive for which r is the source.
|
||||
func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) {
|
||||
buffer, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Decode(zipLoader{archive})
|
||||
}
|
||||
400
vendor/golang.org/x/text/unicode/cldr/makexml.go
generated
vendored
Normal file
400
vendor/golang.org/x/text/unicode/cldr/makexml.go
generated
vendored
Normal file
@@ -0,0 +1,400 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// This tool generates types for the various XML formats of CLDR.
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
)
|
||||
|
||||
var outputFile = flag.String("output", "xml.go", "output file name")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
r := gen.OpenCLDRCoreZip()
|
||||
buffer, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Fatal("Could not read zip file")
|
||||
}
|
||||
r.Close()
|
||||
z, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not read zip archive: %v", err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
version := gen.CLDRVersion()
|
||||
|
||||
for _, dtd := range files {
|
||||
for _, f := range z.File {
|
||||
if strings.HasSuffix(f.Name, dtd.file+".dtd") {
|
||||
r, err := f.Open()
|
||||
failOnError(err)
|
||||
|
||||
b := makeBuilder(&buf, dtd)
|
||||
b.parseDTD(r)
|
||||
b.resolve(b.index[dtd.top[0]])
|
||||
b.write()
|
||||
if b.version != "" && version != b.version {
|
||||
println(f.Name)
|
||||
log.Fatalf("main: inconsistent versions: found %s; want %s", b.version, version)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(&buf, "// Version is the version of CLDR from which the XML definitions are generated.")
|
||||
fmt.Fprintf(&buf, "const Version = %q\n", version)
|
||||
|
||||
gen.WriteGoFile(*outputFile, "cldr", buf.Bytes())
|
||||
}
|
||||
|
||||
func failOnError(err error) {
|
||||
if err != nil {
|
||||
log.New(os.Stderr, "", log.Lshortfile).Output(2, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// configuration data per DTD type
|
||||
type dtd struct {
|
||||
file string // base file name
|
||||
root string // Go name of the root XML element
|
||||
top []string // create a different type for this section
|
||||
|
||||
skipElem []string // hard-coded or deprecated elements
|
||||
skipAttr []string // attributes to exclude
|
||||
predefined []string // hard-coded elements exist of the form <name>Elem
|
||||
forceRepeat []string // elements to make slices despite DTD
|
||||
}
|
||||
|
||||
var files = []dtd{
|
||||
{
|
||||
file: "ldmlBCP47",
|
||||
root: "LDMLBCP47",
|
||||
top: []string{"ldmlBCP47"},
|
||||
skipElem: []string{
|
||||
"cldrVersion", // deprecated, not used
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "ldmlSupplemental",
|
||||
root: "SupplementalData",
|
||||
top: []string{"supplementalData"},
|
||||
skipElem: []string{
|
||||
"cldrVersion", // deprecated, not used
|
||||
},
|
||||
forceRepeat: []string{
|
||||
"plurals", // data defined in plurals.xml and ordinals.xml
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "ldml",
|
||||
root: "LDML",
|
||||
top: []string{
|
||||
"ldml", "collation", "calendar", "timeZoneNames", "localeDisplayNames", "numbers",
|
||||
},
|
||||
skipElem: []string{
|
||||
"cp", // not used anywhere
|
||||
"special", // not used anywhere
|
||||
"fallback", // deprecated, not used
|
||||
"alias", // in Common
|
||||
"default", // in Common
|
||||
},
|
||||
skipAttr: []string{
|
||||
"hiraganaQuarternary", // typo in DTD, correct version included as well
|
||||
},
|
||||
predefined: []string{"rules"},
|
||||
},
|
||||
}
|
||||
|
||||
var comments = map[string]string{
|
||||
"ldmlBCP47": `
|
||||
// LDMLBCP47 holds information on allowable values for various variables in LDML.
|
||||
`,
|
||||
"supplementalData": `
|
||||
// SupplementalData holds information relevant for internationalization
|
||||
// and proper use of CLDR, but that is not contained in the locale hierarchy.
|
||||
`,
|
||||
"ldml": `
|
||||
// LDML is the top-level type for locale-specific data.
|
||||
`,
|
||||
"collation": `
|
||||
// Collation contains rules that specify a certain sort-order,
|
||||
// as a tailoring of the root order.
|
||||
// The parsed rules are obtained by passing a RuleProcessor to Collation's
|
||||
// Process method.
|
||||
`,
|
||||
"calendar": `
|
||||
// Calendar specifies the fields used for formatting and parsing dates and times.
|
||||
// The month and quarter names are identified numerically, starting at 1.
|
||||
// The day (of the week) names are identified with short strings, since there is
|
||||
// no universally-accepted numeric designation.
|
||||
`,
|
||||
"dates": `
|
||||
// Dates contains information regarding the format and parsing of dates and times.
|
||||
`,
|
||||
"localeDisplayNames": `
|
||||
// LocaleDisplayNames specifies localized display names for for scripts, languages,
|
||||
// countries, currencies, and variants.
|
||||
`,
|
||||
"numbers": `
|
||||
// Numbers supplies information for formatting and parsing numbers and currencies.
|
||||
`,
|
||||
}
|
||||
|
||||
type element struct {
|
||||
name string // XML element name
|
||||
category string // elements contained by this element
|
||||
signature string // category + attrKey*
|
||||
|
||||
attr []*attribute // attributes supported by this element.
|
||||
sub []struct { // parsed and evaluated sub elements of this element.
|
||||
e *element
|
||||
repeat bool // true if the element needs to be a slice
|
||||
}
|
||||
|
||||
resolved bool // prevent multiple resolutions of this element.
|
||||
}
|
||||
|
||||
type attribute struct {
|
||||
name string
|
||||
key string
|
||||
list []string
|
||||
|
||||
tag string // Go tag
|
||||
}
|
||||
|
||||
var (
|
||||
reHead = regexp.MustCompile(` *(\w+) +([\w\-]+)`)
|
||||
reAttr = regexp.MustCompile(` *(\w+) *(?:(\w+)|\(([\w\- \|]+)\)) *(?:#([A-Z]*) *(?:\"([\.\d+])\")?)? *("[\w\-:]*")?`)
|
||||
reElem = regexp.MustCompile(`^ *(EMPTY|ANY|\(.*\)[\*\+\?]?) *$`)
|
||||
reToken = regexp.MustCompile(`\w\-`)
|
||||
)
|
||||
|
||||
// builder is used to read in the DTD files from CLDR and generate Go code
|
||||
// to be used with the encoding/xml package.
|
||||
type builder struct {
|
||||
w io.Writer
|
||||
index map[string]*element
|
||||
elem []*element
|
||||
info dtd
|
||||
version string
|
||||
}
|
||||
|
||||
func makeBuilder(w io.Writer, d dtd) builder {
|
||||
return builder{
|
||||
w: w,
|
||||
index: make(map[string]*element),
|
||||
elem: []*element{},
|
||||
info: d,
|
||||
}
|
||||
}
|
||||
|
||||
// parseDTD parses a DTD file.
|
||||
func (b *builder) parseDTD(r io.Reader) {
|
||||
for d := xml.NewDecoder(r); ; {
|
||||
t, err := d.Token()
|
||||
if t == nil {
|
||||
break
|
||||
}
|
||||
failOnError(err)
|
||||
dir, ok := t.(xml.Directive)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
m := reHead.FindSubmatch(dir)
|
||||
dir = dir[len(m[0]):]
|
||||
ename := string(m[2])
|
||||
el, elementFound := b.index[ename]
|
||||
switch string(m[1]) {
|
||||
case "ELEMENT":
|
||||
if elementFound {
|
||||
log.Fatal("parseDTD: duplicate entry for element %q", ename)
|
||||
}
|
||||
m := reElem.FindSubmatch(dir)
|
||||
if m == nil {
|
||||
log.Fatalf("parseDTD: invalid element %q", string(dir))
|
||||
}
|
||||
if len(m[0]) != len(dir) {
|
||||
log.Fatal("parseDTD: invalid element %q", string(dir), len(dir), len(m[0]), string(m[0]))
|
||||
}
|
||||
s := string(m[1])
|
||||
el = &element{
|
||||
name: ename,
|
||||
category: s,
|
||||
}
|
||||
b.index[ename] = el
|
||||
case "ATTLIST":
|
||||
if !elementFound {
|
||||
log.Fatalf("parseDTD: unknown element %q", ename)
|
||||
}
|
||||
s := string(dir)
|
||||
m := reAttr.FindStringSubmatch(s)
|
||||
if m == nil {
|
||||
log.Fatal(fmt.Errorf("parseDTD: invalid attribute %q", string(dir)))
|
||||
}
|
||||
if m[4] == "FIXED" {
|
||||
b.version = m[5]
|
||||
} else {
|
||||
switch m[1] {
|
||||
case "draft", "references", "alt", "validSubLocales", "standard" /* in Common */ :
|
||||
case "type", "choice":
|
||||
default:
|
||||
el.attr = append(el.attr, &attribute{
|
||||
name: m[1],
|
||||
key: s,
|
||||
list: reToken.FindAllString(m[3], -1),
|
||||
})
|
||||
el.signature = fmt.Sprintf("%s=%s+%s", el.signature, m[1], m[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reCat = regexp.MustCompile(`[ ,\|]*(?:(\(|\)|\#?[\w_-]+)([\*\+\?]?))?`)
|
||||
|
||||
// resolve takes a parsed element and converts it into structured data
|
||||
// that can be used to generate the XML code.
|
||||
func (b *builder) resolve(e *element) {
|
||||
if e.resolved {
|
||||
return
|
||||
}
|
||||
b.elem = append(b.elem, e)
|
||||
e.resolved = true
|
||||
s := e.category
|
||||
found := make(map[string]bool)
|
||||
sequenceStart := []int{}
|
||||
for len(s) > 0 {
|
||||
m := reCat.FindStringSubmatch(s)
|
||||
if m == nil {
|
||||
log.Fatalf("%s: invalid category string %q", e.name, s)
|
||||
}
|
||||
repeat := m[2] == "*" || m[2] == "+" || in(b.info.forceRepeat, m[1])
|
||||
switch m[1] {
|
||||
case "":
|
||||
case "(":
|
||||
sequenceStart = append(sequenceStart, len(e.sub))
|
||||
case ")":
|
||||
if len(sequenceStart) == 0 {
|
||||
log.Fatalf("%s: unmatched closing parenthesis", e.name)
|
||||
}
|
||||
for i := sequenceStart[len(sequenceStart)-1]; i < len(e.sub); i++ {
|
||||
e.sub[i].repeat = e.sub[i].repeat || repeat
|
||||
}
|
||||
sequenceStart = sequenceStart[:len(sequenceStart)-1]
|
||||
default:
|
||||
if in(b.info.skipElem, m[1]) {
|
||||
} else if sub, ok := b.index[m[1]]; ok {
|
||||
if !found[sub.name] {
|
||||
e.sub = append(e.sub, struct {
|
||||
e *element
|
||||
repeat bool
|
||||
}{sub, repeat})
|
||||
found[sub.name] = true
|
||||
b.resolve(sub)
|
||||
}
|
||||
} else if m[1] == "#PCDATA" || m[1] == "ANY" {
|
||||
} else if m[1] != "EMPTY" {
|
||||
log.Fatalf("resolve:%s: element %q not found", e.name, m[1])
|
||||
}
|
||||
}
|
||||
s = s[len(m[0]):]
|
||||
}
|
||||
}
|
||||
|
||||
// return true if s is contained in set.
|
||||
func in(set []string, s string) bool {
|
||||
for _, v := range set {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var repl = strings.NewReplacer("-", " ", "_", " ")
|
||||
|
||||
// title puts the first character or each character following '_' in title case and
|
||||
// removes all occurrences of '_'.
|
||||
func title(s string) string {
|
||||
return strings.Replace(strings.Title(repl.Replace(s)), " ", "", -1)
|
||||
}
|
||||
|
||||
// writeElem generates Go code for a single element, recursively.
|
||||
func (b *builder) writeElem(tab int, e *element) {
|
||||
p := func(f string, x ...interface{}) {
|
||||
f = strings.Replace(f, "\n", "\n"+strings.Repeat("\t", tab), -1)
|
||||
fmt.Fprintf(b.w, f, x...)
|
||||
}
|
||||
if len(e.sub) == 0 && len(e.attr) == 0 {
|
||||
p("Common")
|
||||
return
|
||||
}
|
||||
p("struct {")
|
||||
tab++
|
||||
p("\nCommon")
|
||||
for _, attr := range e.attr {
|
||||
if !in(b.info.skipAttr, attr.name) {
|
||||
p("\n%s string `xml:\"%s,attr\"`", title(attr.name), attr.name)
|
||||
}
|
||||
}
|
||||
for _, sub := range e.sub {
|
||||
if in(b.info.predefined, sub.e.name) {
|
||||
p("\n%sElem", sub.e.name)
|
||||
continue
|
||||
}
|
||||
if in(b.info.skipElem, sub.e.name) {
|
||||
continue
|
||||
}
|
||||
p("\n%s ", title(sub.e.name))
|
||||
if sub.repeat {
|
||||
p("[]")
|
||||
}
|
||||
p("*")
|
||||
if in(b.info.top, sub.e.name) {
|
||||
p(title(sub.e.name))
|
||||
} else {
|
||||
b.writeElem(tab, sub.e)
|
||||
}
|
||||
p(" `xml:\"%s\"`", sub.e.name)
|
||||
}
|
||||
tab--
|
||||
p("\n}")
|
||||
}
|
||||
|
||||
// write generates the Go XML code.
|
||||
func (b *builder) write() {
|
||||
for i, name := range b.info.top {
|
||||
e := b.index[name]
|
||||
if e != nil {
|
||||
fmt.Fprintf(b.w, comments[name])
|
||||
name := title(e.name)
|
||||
if i == 0 {
|
||||
name = b.info.root
|
||||
}
|
||||
fmt.Fprintf(b.w, "type %s ", name)
|
||||
b.writeElem(0, e)
|
||||
fmt.Fprint(b.w, "\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
602
vendor/golang.org/x/text/unicode/cldr/resolve.go
generated
vendored
Normal file
602
vendor/golang.org/x/text/unicode/cldr/resolve.go
generated
vendored
Normal file
@@ -0,0 +1,602 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
// This file implements the various inheritance constructs defined by LDML.
|
||||
// See http://www.unicode.org/reports/tr35/#Inheritance_and_Validity
|
||||
// for more details.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// fieldIter iterates over fields in a struct. It includes
|
||||
// fields of embedded structs.
|
||||
type fieldIter struct {
|
||||
v reflect.Value
|
||||
index, n []int
|
||||
}
|
||||
|
||||
func iter(v reflect.Value) fieldIter {
|
||||
if v.Kind() != reflect.Struct {
|
||||
log.Panicf("value %v must be a struct", v)
|
||||
}
|
||||
i := fieldIter{
|
||||
v: v,
|
||||
index: []int{0},
|
||||
n: []int{v.NumField()},
|
||||
}
|
||||
i.descent()
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *fieldIter) descent() {
|
||||
for f := i.field(); f.Anonymous && f.Type.NumField() > 0; f = i.field() {
|
||||
i.index = append(i.index, 0)
|
||||
i.n = append(i.n, f.Type.NumField())
|
||||
}
|
||||
}
|
||||
|
||||
func (i *fieldIter) done() bool {
|
||||
return len(i.index) == 1 && i.index[0] >= i.n[0]
|
||||
}
|
||||
|
||||
func skip(f reflect.StructField) bool {
|
||||
return !f.Anonymous && (f.Name[0] < 'A' || f.Name[0] > 'Z')
|
||||
}
|
||||
|
||||
func (i *fieldIter) next() {
|
||||
for {
|
||||
k := len(i.index) - 1
|
||||
i.index[k]++
|
||||
if i.index[k] < i.n[k] {
|
||||
if !skip(i.field()) {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if k == 0 {
|
||||
return
|
||||
}
|
||||
i.index = i.index[:k]
|
||||
i.n = i.n[:k]
|
||||
}
|
||||
}
|
||||
i.descent()
|
||||
}
|
||||
|
||||
func (i *fieldIter) value() reflect.Value {
|
||||
return i.v.FieldByIndex(i.index)
|
||||
}
|
||||
|
||||
func (i *fieldIter) field() reflect.StructField {
|
||||
return i.v.Type().FieldByIndex(i.index)
|
||||
}
|
||||
|
||||
type visitor func(v reflect.Value) error
|
||||
|
||||
var stopDescent = fmt.Errorf("do not recurse")
|
||||
|
||||
func (f visitor) visit(x interface{}) error {
|
||||
return f.visitRec(reflect.ValueOf(x))
|
||||
}
|
||||
|
||||
// visit recursively calls f on all nodes in v.
|
||||
func (f visitor) visitRec(v reflect.Value) error {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
return f.visitRec(v.Elem())
|
||||
}
|
||||
if err := f(v); err != nil {
|
||||
if err == stopDescent {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if err := f.visitRec(i.value()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := f.visitRec(v.Index(i)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getPath is used for error reporting purposes only.
|
||||
func getPath(e Elem) string {
|
||||
if e == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
if e.enclosing() == nil {
|
||||
return e.GetCommon().name
|
||||
}
|
||||
if e.GetCommon().Type == "" {
|
||||
return fmt.Sprintf("%s.%s", getPath(e.enclosing()), e.GetCommon().name)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s[type=%s]", getPath(e.enclosing()), e.GetCommon().name, e.GetCommon().Type)
|
||||
}
|
||||
|
||||
// xmlName returns the xml name of the element or attribute
|
||||
func xmlName(f reflect.StructField) (name string, attr bool) {
|
||||
tags := strings.Split(f.Tag.Get("xml"), ",")
|
||||
for _, s := range tags {
|
||||
attr = attr || s == "attr"
|
||||
}
|
||||
return tags[0], attr
|
||||
}
|
||||
|
||||
func findField(v reflect.Value, key string) (reflect.Value, error) {
|
||||
v = reflect.Indirect(v)
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if n, _ := xmlName(i.field()); n == key {
|
||||
return i.value(), nil
|
||||
}
|
||||
}
|
||||
return reflect.Value{}, fmt.Errorf("cldr: no field %q in element %#v", key, v.Interface())
|
||||
}
|
||||
|
||||
var xpathPart = regexp.MustCompile(`(\pL+)(?:\[@(\pL+)='([\w-]+)'\])?`)
|
||||
|
||||
func walkXPath(e Elem, path string) (res Elem, err error) {
|
||||
for _, c := range strings.Split(path, "/") {
|
||||
if c == ".." {
|
||||
if e = e.enclosing(); e == nil {
|
||||
panic("path ..")
|
||||
return nil, fmt.Errorf(`cldr: ".." moves past root in path %q`, path)
|
||||
}
|
||||
continue
|
||||
} else if c == "" {
|
||||
continue
|
||||
}
|
||||
m := xpathPart.FindStringSubmatch(c)
|
||||
if len(m) == 0 || len(m[0]) != len(c) {
|
||||
return nil, fmt.Errorf("cldr: syntax error in path component %q", c)
|
||||
}
|
||||
v, err := findField(reflect.ValueOf(e), m[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
i := 0
|
||||
if m[2] != "" || v.Len() > 1 {
|
||||
if m[2] == "" {
|
||||
m[2] = "type"
|
||||
if m[3] = e.GetCommon().Default(); m[3] == "" {
|
||||
return nil, fmt.Errorf("cldr: type selector or default value needed for element %s", m[1])
|
||||
}
|
||||
}
|
||||
for ; i < v.Len(); i++ {
|
||||
vi := v.Index(i)
|
||||
key, err := findField(vi.Elem(), m[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = reflect.Indirect(key)
|
||||
if key.Kind() == reflect.String && key.String() == m[3] {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if i == v.Len() || v.Index(i).IsNil() {
|
||||
return nil, fmt.Errorf("no %s found with %s==%s", m[1], m[2], m[3])
|
||||
}
|
||||
e = v.Index(i).Interface().(Elem)
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return nil, fmt.Errorf("cldr: element %q not found within element %q", m[1], e.GetCommon().name)
|
||||
}
|
||||
var ok bool
|
||||
if e, ok = v.Interface().(Elem); !ok {
|
||||
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
|
||||
} else if m[2] != "" || m[3] != "" {
|
||||
return nil, fmt.Errorf("cldr: no type selector allowed for element %s", m[1])
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
|
||||
}
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
const absPrefix = "//ldml/"
|
||||
|
||||
func (cldr *CLDR) resolveAlias(e Elem, src, path string) (res Elem, err error) {
|
||||
if src != "locale" {
|
||||
if !strings.HasPrefix(path, absPrefix) {
|
||||
return nil, fmt.Errorf("cldr: expected absolute path, found %q", path)
|
||||
}
|
||||
path = path[len(absPrefix):]
|
||||
if e, err = cldr.resolve(src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return walkXPath(e, path)
|
||||
}
|
||||
|
||||
func (cldr *CLDR) resolveAndMergeAlias(e Elem) error {
|
||||
alias := e.GetCommon().Alias
|
||||
if alias == nil {
|
||||
return nil
|
||||
}
|
||||
a, err := cldr.resolveAlias(e, alias.Source, alias.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%v: error evaluating path %q: %v", getPath(e), alias.Path, err)
|
||||
}
|
||||
// Ensure alias node was already evaluated. TODO: avoid double evaluation.
|
||||
err = cldr.resolveAndMergeAlias(a)
|
||||
v := reflect.ValueOf(e).Elem()
|
||||
for i := iter(reflect.ValueOf(a).Elem()); !i.done(); i.next() {
|
||||
if vv := i.value(); vv.Kind() != reflect.Ptr || !vv.IsNil() {
|
||||
if _, attr := xmlName(i.field()); !attr {
|
||||
v.FieldByIndex(i.index).Set(vv)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (cldr *CLDR) aliasResolver() visitor {
|
||||
return func(v reflect.Value) (err error) {
|
||||
if e, ok := v.Addr().Interface().(Elem); ok {
|
||||
err = cldr.resolveAndMergeAlias(e)
|
||||
if err == nil && blocking[e.GetCommon().name] {
|
||||
return stopDescent
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// elements within blocking elements do not inherit.
|
||||
// Taken from CLDR's supplementalMetaData.xml.
|
||||
var blocking = map[string]bool{
|
||||
"identity": true,
|
||||
"supplementalData": true,
|
||||
"cldrTest": true,
|
||||
"collation": true,
|
||||
"transform": true,
|
||||
}
|
||||
|
||||
// Distinguishing attributes affect inheritance; two elements with different
|
||||
// distinguishing attributes are treated as different for purposes of inheritance,
|
||||
// except when such attributes occur in the indicated elements.
|
||||
// Taken from CLDR's supplementalMetaData.xml.
|
||||
var distinguishing = map[string][]string{
|
||||
"key": nil,
|
||||
"request_id": nil,
|
||||
"id": nil,
|
||||
"registry": nil,
|
||||
"alt": nil,
|
||||
"iso4217": nil,
|
||||
"iso3166": nil,
|
||||
"mzone": nil,
|
||||
"from": nil,
|
||||
"to": nil,
|
||||
"type": []string{
|
||||
"abbreviationFallback",
|
||||
"default",
|
||||
"mapping",
|
||||
"measurementSystem",
|
||||
"preferenceOrdering",
|
||||
},
|
||||
"numberSystem": nil,
|
||||
}
|
||||
|
||||
func in(set []string, s string) bool {
|
||||
for _, v := range set {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// attrKey computes a key based on the distinguishable attributes of
|
||||
// an element and it's values.
|
||||
func attrKey(v reflect.Value, exclude ...string) string {
|
||||
parts := []string{}
|
||||
ename := v.Interface().(Elem).GetCommon().name
|
||||
v = v.Elem()
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if name, attr := xmlName(i.field()); attr {
|
||||
if except, ok := distinguishing[name]; ok && !in(exclude, name) && !in(except, ename) {
|
||||
v := i.value()
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.IsValid() {
|
||||
parts = append(parts, fmt.Sprintf("%s=%s", name, v.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, ";")
|
||||
}
|
||||
|
||||
// Key returns a key for e derived from all distinguishing attributes
|
||||
// except those specified by exclude.
|
||||
func Key(e Elem, exclude ...string) string {
|
||||
return attrKey(reflect.ValueOf(e), exclude...)
|
||||
}
|
||||
|
||||
// linkEnclosing sets the enclosing element as well as the name
|
||||
// for all sub-elements of child, recursively.
|
||||
func linkEnclosing(parent, child Elem) {
|
||||
child.setEnclosing(parent)
|
||||
v := reflect.ValueOf(child).Elem()
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
vf := i.value()
|
||||
if vf.Kind() == reflect.Slice {
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
linkEnclosing(child, vf.Index(j).Interface().(Elem))
|
||||
}
|
||||
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
|
||||
linkEnclosing(child, vf.Interface().(Elem))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setNames(e Elem, name string) {
|
||||
e.setName(name)
|
||||
v := reflect.ValueOf(e).Elem()
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
vf := i.value()
|
||||
name, _ = xmlName(i.field())
|
||||
if vf.Kind() == reflect.Slice {
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
setNames(vf.Index(j).Interface().(Elem), name)
|
||||
}
|
||||
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
|
||||
setNames(vf.Interface().(Elem), name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deepCopy copies elements of v recursively. All elements of v that may
|
||||
// be modified by inheritance are explicitly copied.
|
||||
func deepCopy(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() || v.Elem().Kind() != reflect.Struct {
|
||||
return v
|
||||
}
|
||||
nv := reflect.New(v.Elem().Type())
|
||||
nv.Elem().Set(v.Elem())
|
||||
deepCopyRec(nv.Elem(), v.Elem())
|
||||
return nv
|
||||
case reflect.Slice:
|
||||
nv := reflect.MakeSlice(v.Type(), v.Len(), v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
deepCopyRec(nv.Index(i), v.Index(i))
|
||||
}
|
||||
return nv
|
||||
}
|
||||
panic("deepCopy: must be called with pointer or slice")
|
||||
}
|
||||
|
||||
// deepCopyRec is only called by deepCopy.
|
||||
func deepCopyRec(nv, v reflect.Value) {
|
||||
if v.Kind() == reflect.Struct {
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if name, attr := xmlName(t.Field(i)); name != "" && !attr {
|
||||
deepCopyRec(nv.Field(i), v.Field(i))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
nv.Set(deepCopy(v))
|
||||
}
|
||||
}
|
||||
|
||||
// newNode is used to insert a missing node during inheritance.
|
||||
func (cldr *CLDR) newNode(v, enc reflect.Value) reflect.Value {
|
||||
n := reflect.New(v.Type())
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if name, attr := xmlName(i.field()); name == "" || attr {
|
||||
n.Elem().FieldByIndex(i.index).Set(i.value())
|
||||
}
|
||||
}
|
||||
n.Interface().(Elem).GetCommon().setEnclosing(enc.Addr().Interface().(Elem))
|
||||
return n
|
||||
}
|
||||
|
||||
// v, parent must be pointers to struct
|
||||
func (cldr *CLDR) inheritFields(v, parent reflect.Value) (res reflect.Value, err error) {
|
||||
t := v.Type()
|
||||
nv := reflect.New(t)
|
||||
nv.Elem().Set(v)
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
vf := i.value()
|
||||
f := i.field()
|
||||
name, attr := xmlName(f)
|
||||
if name == "" || attr {
|
||||
continue
|
||||
}
|
||||
pf := parent.FieldByIndex(i.index)
|
||||
if blocking[name] {
|
||||
if vf.IsNil() {
|
||||
vf = pf
|
||||
}
|
||||
nv.Elem().FieldByIndex(i.index).Set(deepCopy(vf))
|
||||
continue
|
||||
}
|
||||
switch f.Type.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.Type.Elem().Kind() == reflect.Struct {
|
||||
if !vf.IsNil() {
|
||||
if vf, err = cldr.inheritStructPtr(vf, pf); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
|
||||
nv.Elem().FieldByIndex(i.index).Set(vf)
|
||||
} else if !pf.IsNil() {
|
||||
n := cldr.newNode(pf.Elem(), v)
|
||||
if vf, err = cldr.inheritStructPtr(n, pf); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
|
||||
nv.Elem().FieldByIndex(i.index).Set(vf)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
vf, err := cldr.inheritSlice(nv.Elem(), vf, pf)
|
||||
if err != nil {
|
||||
return reflect.Zero(t), err
|
||||
}
|
||||
nv.Elem().FieldByIndex(i.index).Set(vf)
|
||||
}
|
||||
}
|
||||
return nv, nil
|
||||
}
|
||||
|
||||
func root(e Elem) *LDML {
|
||||
for ; e.enclosing() != nil; e = e.enclosing() {
|
||||
}
|
||||
return e.(*LDML)
|
||||
}
|
||||
|
||||
// inheritStructPtr first merges possible aliases in with v and then inherits
|
||||
// any underspecified elements from parent.
|
||||
func (cldr *CLDR) inheritStructPtr(v, parent reflect.Value) (r reflect.Value, err error) {
|
||||
if !v.IsNil() {
|
||||
e := v.Interface().(Elem).GetCommon()
|
||||
alias := e.Alias
|
||||
if alias == nil && !parent.IsNil() {
|
||||
alias = parent.Interface().(Elem).GetCommon().Alias
|
||||
}
|
||||
if alias != nil {
|
||||
a, err := cldr.resolveAlias(v.Interface().(Elem), alias.Source, alias.Path)
|
||||
if a != nil {
|
||||
if v, err = cldr.inheritFields(v.Elem(), reflect.ValueOf(a).Elem()); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if !parent.IsNil() {
|
||||
return cldr.inheritFields(v.Elem(), parent.Elem())
|
||||
}
|
||||
} else if parent.IsNil() {
|
||||
panic("should not reach here")
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Must be slice of struct pointers.
|
||||
func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value, err error) {
|
||||
t := v.Type()
|
||||
index := make(map[string]reflect.Value)
|
||||
if !v.IsNil() {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
vi := v.Index(i)
|
||||
key := attrKey(vi)
|
||||
index[key] = vi
|
||||
}
|
||||
}
|
||||
if !parent.IsNil() {
|
||||
for i := 0; i < parent.Len(); i++ {
|
||||
vi := parent.Index(i)
|
||||
key := attrKey(vi)
|
||||
if w, ok := index[key]; ok {
|
||||
index[key], err = cldr.inheritStructPtr(w, vi)
|
||||
} else {
|
||||
n := cldr.newNode(vi.Elem(), enc)
|
||||
index[key], err = cldr.inheritStructPtr(n, vi)
|
||||
}
|
||||
index[key].Interface().(Elem).setEnclosing(enc.Addr().Interface().(Elem))
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
}
|
||||
}
|
||||
keys := make([]string, 0, len(index))
|
||||
for k, _ := range index {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
sl := reflect.MakeSlice(t, len(index), len(index))
|
||||
for i, k := range keys {
|
||||
sl.Index(i).Set(index[k])
|
||||
}
|
||||
return sl, nil
|
||||
}
|
||||
|
||||
func parentLocale(loc string) string {
|
||||
parts := strings.Split(loc, "_")
|
||||
if len(parts) == 1 {
|
||||
return "root"
|
||||
}
|
||||
parts = parts[:len(parts)-1]
|
||||
key := strings.Join(parts, "_")
|
||||
return key
|
||||
}
|
||||
|
||||
func (cldr *CLDR) resolve(loc string) (res *LDML, err error) {
|
||||
if r := cldr.resolved[loc]; r != nil {
|
||||
return r, nil
|
||||
}
|
||||
x := cldr.RawLDML(loc)
|
||||
if x == nil {
|
||||
return nil, fmt.Errorf("cldr: unknown locale %q", loc)
|
||||
}
|
||||
var v reflect.Value
|
||||
if loc == "root" {
|
||||
x = deepCopy(reflect.ValueOf(x)).Interface().(*LDML)
|
||||
linkEnclosing(nil, x)
|
||||
err = cldr.aliasResolver().visit(x)
|
||||
} else {
|
||||
key := parentLocale(loc)
|
||||
var parent *LDML
|
||||
for ; cldr.locale[key] == nil; key = parentLocale(key) {
|
||||
}
|
||||
if parent, err = cldr.resolve(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, err = cldr.inheritFields(reflect.ValueOf(x).Elem(), reflect.ValueOf(parent).Elem())
|
||||
x = v.Interface().(*LDML)
|
||||
linkEnclosing(nil, x)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cldr.resolved[loc] = x
|
||||
return x, err
|
||||
}
|
||||
|
||||
// finalize finalizes the initialization of the raw LDML structs. It also
|
||||
// removed unwanted fields, as specified by filter, so that they will not
|
||||
// be unnecessarily evaluated.
|
||||
func (cldr *CLDR) finalize(filter []string) {
|
||||
for _, x := range cldr.locale {
|
||||
if filter != nil {
|
||||
v := reflect.ValueOf(x).Elem()
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
name, _ := xmlName(f)
|
||||
if name != "" && name != "identity" && !in(filter, name) {
|
||||
v.Field(i).Set(reflect.Zero(f.Type))
|
||||
}
|
||||
}
|
||||
}
|
||||
linkEnclosing(nil, x) // for resolving aliases and paths
|
||||
setNames(x, "ldml")
|
||||
}
|
||||
}
|
||||
144
vendor/golang.org/x/text/unicode/cldr/slice.go
generated
vendored
Normal file
144
vendor/golang.org/x/text/unicode/cldr/slice.go
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Slice provides utilities for modifying slices of elements.
|
||||
// It can be wrapped around any slice of which the element type implements
|
||||
// interface Elem.
|
||||
type Slice struct {
|
||||
ptr reflect.Value
|
||||
typ reflect.Type
|
||||
}
|
||||
|
||||
// Value returns the reflect.Value of the underlying slice.
|
||||
func (s *Slice) Value() reflect.Value {
|
||||
return s.ptr.Elem()
|
||||
}
|
||||
|
||||
// MakeSlice wraps a pointer to a slice of Elems.
|
||||
// It replaces the array pointed to by the slice so that subsequent modifications
|
||||
// do not alter the data in a CLDR type.
|
||||
// It panics if an incorrect type is passed.
|
||||
func MakeSlice(slicePtr interface{}) Slice {
|
||||
ptr := reflect.ValueOf(slicePtr)
|
||||
if ptr.Kind() != reflect.Ptr {
|
||||
panic(fmt.Sprintf("MakeSlice: argument must be pointer to slice, found %v", ptr.Type()))
|
||||
}
|
||||
sl := ptr.Elem()
|
||||
if sl.Kind() != reflect.Slice {
|
||||
panic(fmt.Sprintf("MakeSlice: argument must point to a slice, found %v", sl.Type()))
|
||||
}
|
||||
intf := reflect.TypeOf((*Elem)(nil)).Elem()
|
||||
if !sl.Type().Elem().Implements(intf) {
|
||||
panic(fmt.Sprintf("MakeSlice: element type of slice (%v) does not implement Elem", sl.Type().Elem()))
|
||||
}
|
||||
nsl := reflect.MakeSlice(sl.Type(), sl.Len(), sl.Len())
|
||||
reflect.Copy(nsl, sl)
|
||||
sl.Set(nsl)
|
||||
return Slice{
|
||||
ptr: ptr,
|
||||
typ: sl.Type().Elem().Elem(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s Slice) indexForAttr(a string) []int {
|
||||
for i := iter(reflect.Zero(s.typ)); !i.done(); i.next() {
|
||||
if n, _ := xmlName(i.field()); n == a {
|
||||
return i.index
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("MakeSlice: no attribute %q for type %v", a, s.typ))
|
||||
}
|
||||
|
||||
// Filter filters s to only include elements for which fn returns true.
|
||||
func (s Slice) Filter(fn func(e Elem) bool) {
|
||||
k := 0
|
||||
sl := s.Value()
|
||||
for i := 0; i < sl.Len(); i++ {
|
||||
vi := sl.Index(i)
|
||||
if fn(vi.Interface().(Elem)) {
|
||||
sl.Index(k).Set(vi)
|
||||
k++
|
||||
}
|
||||
}
|
||||
sl.Set(sl.Slice(0, k))
|
||||
}
|
||||
|
||||
// Group finds elements in s for which fn returns the same value and groups
|
||||
// them in a new Slice.
|
||||
func (s Slice) Group(fn func(e Elem) string) []Slice {
|
||||
m := make(map[string][]reflect.Value)
|
||||
sl := s.Value()
|
||||
for i := 0; i < sl.Len(); i++ {
|
||||
vi := sl.Index(i)
|
||||
key := fn(vi.Interface().(Elem))
|
||||
m[key] = append(m[key], vi)
|
||||
}
|
||||
keys := []string{}
|
||||
for k, _ := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
res := []Slice{}
|
||||
for _, k := range keys {
|
||||
nsl := reflect.New(sl.Type())
|
||||
nsl.Elem().Set(reflect.Append(nsl.Elem(), m[k]...))
|
||||
res = append(res, MakeSlice(nsl.Interface()))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// SelectAnyOf filters s to contain only elements for which attr matches
|
||||
// any of the values.
|
||||
func (s Slice) SelectAnyOf(attr string, values ...string) {
|
||||
index := s.indexForAttr(attr)
|
||||
s.Filter(func(e Elem) bool {
|
||||
vf := reflect.ValueOf(e).Elem().FieldByIndex(index)
|
||||
return in(values, vf.String())
|
||||
})
|
||||
}
|
||||
|
||||
// SelectOnePerGroup filters s to include at most one element e per group of
|
||||
// elements matching Key(attr), where e has an attribute a that matches any
|
||||
// the values in v.
|
||||
// If more than one element in a group matches a value in v preference
|
||||
// is given to the element that matches the first value in v.
|
||||
func (s Slice) SelectOnePerGroup(a string, v []string) {
|
||||
index := s.indexForAttr(a)
|
||||
grouped := s.Group(func(e Elem) string { return Key(e, a) })
|
||||
sl := s.Value()
|
||||
sl.Set(sl.Slice(0, 0))
|
||||
for _, g := range grouped {
|
||||
e := reflect.Value{}
|
||||
found := len(v)
|
||||
gsl := g.Value()
|
||||
for i := 0; i < gsl.Len(); i++ {
|
||||
vi := gsl.Index(i).Elem().FieldByIndex(index)
|
||||
j := 0
|
||||
for ; j < len(v) && v[j] != vi.String(); j++ {
|
||||
}
|
||||
if j < found {
|
||||
found = j
|
||||
e = gsl.Index(i)
|
||||
}
|
||||
}
|
||||
if found < len(v) {
|
||||
sl.Set(reflect.Append(sl, e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SelectDraft drops all elements from the list with a draft level smaller than d
|
||||
// and selects the highest draft level of the remaining.
|
||||
// This method assumes that the input CLDR is canonicalized.
|
||||
func (s Slice) SelectDraft(d Draft) {
|
||||
s.SelectOnePerGroup("draft", drafts[len(drafts)-2-int(d):])
|
||||
}
|
||||
1487
vendor/golang.org/x/text/unicode/cldr/xml.go
generated
vendored
Normal file
1487
vendor/golang.org/x/text/unicode/cldr/xml.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
113
vendor/golang.org/x/text/unicode/rangetable/gen.go
generated
vendored
Normal file
113
vendor/golang.org/x/text/unicode/rangetable/gen.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/internal/ucd"
|
||||
"golang.org/x/text/unicode/rangetable"
|
||||
)
|
||||
|
||||
var versionList = flag.String("versions", "",
|
||||
"list of versions for which to generate RangeTables")
|
||||
|
||||
const bootstrapMessage = `No versions specified.
|
||||
To bootstrap the code generation, run:
|
||||
go run gen.go --versions=4.1.0,5.0.0,6.0.0,6.1.0,6.2.0,6.3.0,7.0.0
|
||||
|
||||
and ensure that the latest versions are included by checking:
|
||||
http://www.unicode.org/Public/`
|
||||
|
||||
func getVersions() []string {
|
||||
if *versionList == "" {
|
||||
log.Fatal(bootstrapMessage)
|
||||
}
|
||||
|
||||
versions := strings.Split(*versionList, ",")
|
||||
sort.Strings(versions)
|
||||
|
||||
// Ensure that at least the current version is included.
|
||||
for _, v := range versions {
|
||||
if v == gen.UnicodeVersion() {
|
||||
return versions
|
||||
}
|
||||
}
|
||||
|
||||
versions = append(versions, gen.UnicodeVersion())
|
||||
sort.Strings(versions)
|
||||
return versions
|
||||
}
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
|
||||
versions := getVersions()
|
||||
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
fmt.Fprintf(w, "//go:generate go run gen.go --versions=%s\n\n", strings.Join(versions, ","))
|
||||
fmt.Fprintf(w, "import \"unicode\"\n\n")
|
||||
|
||||
vstr := func(s string) string { return strings.Replace(s, ".", "_", -1) }
|
||||
|
||||
fmt.Fprintf(w, "var assigned = map[string]*unicode.RangeTable{\n")
|
||||
for _, v := range versions {
|
||||
fmt.Fprintf(w, "\t%q: assigned%s,\n", v, vstr(v))
|
||||
}
|
||||
fmt.Fprintf(w, "}\n\n")
|
||||
|
||||
var size int
|
||||
for _, v := range versions {
|
||||
assigned := []rune{}
|
||||
|
||||
r := gen.Open("http://www.unicode.org/Public/", "", v+"/ucd/UnicodeData.txt")
|
||||
ucd.Parse(r, func(p *ucd.Parser) {
|
||||
assigned = append(assigned, p.Rune(0))
|
||||
})
|
||||
|
||||
rt := rangetable.New(assigned...)
|
||||
sz := int(reflect.TypeOf(unicode.RangeTable{}).Size())
|
||||
sz += int(reflect.TypeOf(unicode.Range16{}).Size()) * len(rt.R16)
|
||||
sz += int(reflect.TypeOf(unicode.Range32{}).Size()) * len(rt.R32)
|
||||
|
||||
fmt.Fprintf(w, "// size %d bytes (%d KiB)\n", sz, sz/1024)
|
||||
fmt.Fprintf(w, "var assigned%s = ", vstr(v))
|
||||
print(w, rt)
|
||||
|
||||
size += sz
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "// Total size %d bytes (%d KiB)\n", size, size/1024)
|
||||
|
||||
gen.WriteGoFile("tables.go", "rangetable", w.Bytes())
|
||||
}
|
||||
|
||||
func print(w io.Writer, rt *unicode.RangeTable) {
|
||||
fmt.Fprintln(w, "&unicode.RangeTable{")
|
||||
fmt.Fprintln(w, "\tR16: []unicode.Range16{")
|
||||
for _, r := range rt.R16 {
|
||||
fmt.Fprintf(w, "\t\t{%#04x, %#04x, %d},\n", r.Lo, r.Hi, r.Stride)
|
||||
}
|
||||
fmt.Fprintln(w, "\t},")
|
||||
fmt.Fprintln(w, "\tR32: []unicode.Range32{")
|
||||
for _, r := range rt.R32 {
|
||||
fmt.Fprintf(w, "\t\t{%#08x, %#08x, %d},\n", r.Lo, r.Hi, r.Stride)
|
||||
}
|
||||
fmt.Fprintln(w, "\t},")
|
||||
fmt.Fprintf(w, "\tLatinOffset: %d,\n", rt.LatinOffset)
|
||||
fmt.Fprintf(w, "}\n\n")
|
||||
}
|
||||
260
vendor/golang.org/x/text/unicode/rangetable/merge.go
generated
vendored
Normal file
260
vendor/golang.org/x/text/unicode/rangetable/merge.go
generated
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package rangetable
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// atEnd is used to mark a completed iteration.
|
||||
const atEnd = unicode.MaxRune + 1
|
||||
|
||||
// Merge returns a new RangeTable that is the union of the given tables.
|
||||
// It can also be used to compact user-created RangeTables. The entries in
|
||||
// R16 and R32 for any given RangeTable should be sorted and non-overlapping.
|
||||
//
|
||||
// A lookup in the resulting table can be several times faster than using In
|
||||
// directly on the ranges. Merge is an expensive operation, however, and only
|
||||
// makes sense if one intends to use the result for more than a couple of
|
||||
// hundred lookups.
|
||||
func Merge(ranges ...*unicode.RangeTable) *unicode.RangeTable {
|
||||
rt := &unicode.RangeTable{}
|
||||
if len(ranges) == 0 {
|
||||
return rt
|
||||
}
|
||||
|
||||
iter := tablesIter(make([]tableIndex, len(ranges)))
|
||||
|
||||
for i, t := range ranges {
|
||||
iter[i] = tableIndex{t, 0, atEnd}
|
||||
if len(t.R16) > 0 {
|
||||
iter[i].next = rune(t.R16[0].Lo)
|
||||
}
|
||||
}
|
||||
|
||||
if r0 := iter.next16(); r0.Stride != 0 {
|
||||
for {
|
||||
r1 := iter.next16()
|
||||
if r1.Stride == 0 {
|
||||
rt.R16 = append(rt.R16, r0)
|
||||
break
|
||||
}
|
||||
stride := r1.Lo - r0.Hi
|
||||
if (r1.Lo == r1.Hi || stride == r1.Stride) && (r0.Lo == r0.Hi || stride == r0.Stride) {
|
||||
// Fully merge the next range into the previous one.
|
||||
r0.Hi, r0.Stride = r1.Hi, stride
|
||||
continue
|
||||
} else if stride == r0.Stride {
|
||||
// Move the first element of r1 to r0. This may eliminate an
|
||||
// entry.
|
||||
r0.Hi = r1.Lo
|
||||
r0.Stride = stride
|
||||
r1.Lo = r1.Lo + r1.Stride
|
||||
if r1.Lo > r1.Hi {
|
||||
continue
|
||||
}
|
||||
}
|
||||
rt.R16 = append(rt.R16, r0)
|
||||
r0 = r1
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range ranges {
|
||||
iter[i] = tableIndex{t, 0, atEnd}
|
||||
if len(t.R32) > 0 {
|
||||
iter[i].next = rune(t.R32[0].Lo)
|
||||
}
|
||||
}
|
||||
|
||||
if r0 := iter.next32(); r0.Stride != 0 {
|
||||
for {
|
||||
r1 := iter.next32()
|
||||
if r1.Stride == 0 {
|
||||
rt.R32 = append(rt.R32, r0)
|
||||
break
|
||||
}
|
||||
stride := r1.Lo - r0.Hi
|
||||
if (r1.Lo == r1.Hi || stride == r1.Stride) && (r0.Lo == r0.Hi || stride == r0.Stride) {
|
||||
// Fully merge the next range into the previous one.
|
||||
r0.Hi, r0.Stride = r1.Hi, stride
|
||||
continue
|
||||
} else if stride == r0.Stride {
|
||||
// Move the first element of r1 to r0. This may eliminate an
|
||||
// entry.
|
||||
r0.Hi = r1.Lo
|
||||
r1.Lo = r1.Lo + r1.Stride
|
||||
if r1.Lo > r1.Hi {
|
||||
continue
|
||||
}
|
||||
}
|
||||
rt.R32 = append(rt.R32, r0)
|
||||
r0 = r1
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(rt.R16) && rt.R16[i].Hi <= unicode.MaxLatin1; i++ {
|
||||
rt.LatinOffset = i + 1
|
||||
}
|
||||
|
||||
return rt
|
||||
}
|
||||
|
||||
type tableIndex struct {
|
||||
t *unicode.RangeTable
|
||||
p uint32
|
||||
next rune
|
||||
}
|
||||
|
||||
type tablesIter []tableIndex
|
||||
|
||||
// sortIter does an insertion sort using the next field of tableIndex. Insertion
|
||||
// sort is a good sorting algorithm for this case.
|
||||
func sortIter(t []tableIndex) {
|
||||
for i := range t {
|
||||
for j := i; j > 0 && t[j-1].next > t[j].next; j-- {
|
||||
t[j], t[j-1] = t[j-1], t[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// next16 finds the ranged to be added to the table. If ranges overlap between
|
||||
// multiple tables it clips the result to a non-overlapping range if the
|
||||
// elements are not fully subsumed. It returns a zero range if there are no more
|
||||
// ranges.
|
||||
func (ti tablesIter) next16() unicode.Range16 {
|
||||
sortIter(ti)
|
||||
|
||||
t0 := ti[0]
|
||||
if t0.next == atEnd {
|
||||
return unicode.Range16{}
|
||||
}
|
||||
r0 := t0.t.R16[t0.p]
|
||||
r0.Lo = uint16(t0.next)
|
||||
|
||||
// We restrict the Hi of the current range if it overlaps with another range.
|
||||
for i := range ti {
|
||||
tn := ti[i]
|
||||
// Since our tableIndices are sorted by next, we can break if the there
|
||||
// is no overlap. The first value of a next range can always be merged
|
||||
// into the current one, so we can break in case of equality as well.
|
||||
if rune(r0.Hi) <= tn.next {
|
||||
break
|
||||
}
|
||||
rn := tn.t.R16[tn.p]
|
||||
rn.Lo = uint16(tn.next)
|
||||
|
||||
// Limit r0.Hi based on next ranges in list, but allow it to overlap
|
||||
// with ranges as long as it subsumes it.
|
||||
m := (rn.Lo - r0.Lo) % r0.Stride
|
||||
if m == 0 && (rn.Stride == r0.Stride || rn.Lo == rn.Hi) {
|
||||
// Overlap, take the min of the two Hi values: for simplicity's sake
|
||||
// we only process one range at a time.
|
||||
if r0.Hi > rn.Hi {
|
||||
r0.Hi = rn.Hi
|
||||
}
|
||||
} else {
|
||||
// Not a compatible stride. Set to the last possible value before
|
||||
// rn.Lo, but ensure there is at least one value.
|
||||
if x := rn.Lo - m; r0.Lo <= x {
|
||||
r0.Hi = x
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Update the next values for each table.
|
||||
for i := range ti {
|
||||
tn := &ti[i]
|
||||
if rune(r0.Hi) < tn.next {
|
||||
break
|
||||
}
|
||||
rn := tn.t.R16[tn.p]
|
||||
stride := rune(rn.Stride)
|
||||
tn.next += stride * (1 + ((rune(r0.Hi) - tn.next) / stride))
|
||||
if rune(rn.Hi) < tn.next {
|
||||
if tn.p++; int(tn.p) == len(tn.t.R16) {
|
||||
tn.next = atEnd
|
||||
} else {
|
||||
tn.next = rune(tn.t.R16[tn.p].Lo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r0.Lo == r0.Hi {
|
||||
r0.Stride = 1
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// next32 finds the ranged to be added to the table. If ranges overlap between
|
||||
// multiple tables it clips the result to a non-overlapping range if the
|
||||
// elements are not fully subsumed. It returns a zero range if there are no more
|
||||
// ranges.
|
||||
func (ti tablesIter) next32() unicode.Range32 {
|
||||
sortIter(ti)
|
||||
|
||||
t0 := ti[0]
|
||||
if t0.next == atEnd {
|
||||
return unicode.Range32{}
|
||||
}
|
||||
r0 := t0.t.R32[t0.p]
|
||||
r0.Lo = uint32(t0.next)
|
||||
|
||||
// We restrict the Hi of the current range if it overlaps with another range.
|
||||
for i := range ti {
|
||||
tn := ti[i]
|
||||
// Since our tableIndices are sorted by next, we can break if the there
|
||||
// is no overlap. The first value of a next range can always be merged
|
||||
// into the current one, so we can break in case of equality as well.
|
||||
if rune(r0.Hi) <= tn.next {
|
||||
break
|
||||
}
|
||||
rn := tn.t.R32[tn.p]
|
||||
rn.Lo = uint32(tn.next)
|
||||
|
||||
// Limit r0.Hi based on next ranges in list, but allow it to overlap
|
||||
// with ranges as long as it subsumes it.
|
||||
m := (rn.Lo - r0.Lo) % r0.Stride
|
||||
if m == 0 && (rn.Stride == r0.Stride || rn.Lo == rn.Hi) {
|
||||
// Overlap, take the min of the two Hi values: for simplicity's sake
|
||||
// we only process one range at a time.
|
||||
if r0.Hi > rn.Hi {
|
||||
r0.Hi = rn.Hi
|
||||
}
|
||||
} else {
|
||||
// Not a compatible stride. Set to the last possible value before
|
||||
// rn.Lo, but ensure there is at least one value.
|
||||
if x := rn.Lo - m; r0.Lo <= x {
|
||||
r0.Hi = x
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Update the next values for each table.
|
||||
for i := range ti {
|
||||
tn := &ti[i]
|
||||
if rune(r0.Hi) < tn.next {
|
||||
break
|
||||
}
|
||||
rn := tn.t.R32[tn.p]
|
||||
stride := rune(rn.Stride)
|
||||
tn.next += stride * (1 + ((rune(r0.Hi) - tn.next) / stride))
|
||||
if rune(rn.Hi) < tn.next {
|
||||
if tn.p++; int(tn.p) == len(tn.t.R32) {
|
||||
tn.next = atEnd
|
||||
} else {
|
||||
tn.next = rune(tn.t.R32[tn.p].Lo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r0.Lo == r0.Hi {
|
||||
r0.Stride = 1
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
70
vendor/golang.org/x/text/unicode/rangetable/rangetable.go
generated
vendored
Normal file
70
vendor/golang.org/x/text/unicode/rangetable/rangetable.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package rangetable provides utilities for creating and inspecting
|
||||
// unicode.RangeTables.
|
||||
package rangetable
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// New creates a RangeTable from the given runes, which may contain duplicates.
|
||||
func New(r ...rune) *unicode.RangeTable {
|
||||
if len(r) == 0 {
|
||||
return &unicode.RangeTable{}
|
||||
}
|
||||
|
||||
sort.Sort(byRune(r))
|
||||
|
||||
// Remove duplicates.
|
||||
k := 1
|
||||
for i := 1; i < len(r); i++ {
|
||||
if r[k-1] != r[i] {
|
||||
r[k] = r[i]
|
||||
k++
|
||||
}
|
||||
}
|
||||
|
||||
var rt unicode.RangeTable
|
||||
for _, r := range r[:k] {
|
||||
if r <= 0xFFFF {
|
||||
rt.R16 = append(rt.R16, unicode.Range16{Lo: uint16(r), Hi: uint16(r), Stride: 1})
|
||||
} else {
|
||||
rt.R32 = append(rt.R32, unicode.Range32{Lo: uint32(r), Hi: uint32(r), Stride: 1})
|
||||
}
|
||||
}
|
||||
|
||||
// Optimize RangeTable.
|
||||
return Merge(&rt)
|
||||
}
|
||||
|
||||
type byRune []rune
|
||||
|
||||
func (r byRune) Len() int { return len(r) }
|
||||
func (r byRune) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r byRune) Less(i, j int) bool { return r[i] < r[j] }
|
||||
|
||||
// Visit visits all runes in the given RangeTable in order, calling fn for each.
|
||||
func Visit(rt *unicode.RangeTable, fn func(rune)) {
|
||||
for _, r16 := range rt.R16 {
|
||||
for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
|
||||
fn(r)
|
||||
}
|
||||
}
|
||||
for _, r32 := range rt.R32 {
|
||||
for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
|
||||
fn(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Assigned returns a RangeTable with all assigned code points for a given
|
||||
// Unicode version. This includes graphic, format, control, and private-use
|
||||
// characters. It returns nil if the data for the given version is not
|
||||
// available.
|
||||
func Assigned(version string) *unicode.RangeTable {
|
||||
return assigned[version]
|
||||
}
|
||||
5735
vendor/golang.org/x/text/unicode/rangetable/tables.go
generated
vendored
Normal file
5735
vendor/golang.org/x/text/unicode/rangetable/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
97
vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/clientset.go
generated
vendored
97
vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/clientset.go
generated
vendored
@@ -1,97 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package clientset
|
||||
|
||||
import (
|
||||
glog "github.com/golang/glog"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
rest "k8s.io/client-go/rest"
|
||||
flowcontrol "k8s.io/client-go/util/flowcontrol"
|
||||
clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
Discovery() discovery.DiscoveryInterface
|
||||
ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface
|
||||
// Deprecated: please explicitly pick a version if possible.
|
||||
Cluster() clusterv1alpha1.ClusterV1alpha1Interface
|
||||
}
|
||||
|
||||
// Clientset contains the clients for groups. Each group has exactly one
|
||||
// version included in a Clientset.
|
||||
type Clientset struct {
|
||||
*discovery.DiscoveryClient
|
||||
clusterV1alpha1 *clusterv1alpha1.ClusterV1alpha1Client
|
||||
}
|
||||
|
||||
// ClusterV1alpha1 retrieves the ClusterV1alpha1Client
|
||||
func (c *Clientset) ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface {
|
||||
return c.clusterV1alpha1
|
||||
}
|
||||
|
||||
// Deprecated: Cluster retrieves the default version of ClusterClient.
|
||||
// Please explicitly pick a version.
|
||||
func (c *Clientset) Cluster() clusterv1alpha1.ClusterV1alpha1Interface {
|
||||
return c.clusterV1alpha1
|
||||
}
|
||||
|
||||
// Discovery retrieves the DiscoveryClient
|
||||
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.DiscoveryClient
|
||||
}
|
||||
|
||||
// NewForConfig creates a new Clientset for the given config.
|
||||
func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
configShallowCopy := *c
|
||||
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
|
||||
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
|
||||
}
|
||||
var cs Clientset
|
||||
var err error
|
||||
cs.clusterV1alpha1, err = clusterv1alpha1.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to create the DiscoveryClient: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return &cs, nil
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new Clientset for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
var cs Clientset
|
||||
cs.clusterV1alpha1 = clusterv1alpha1.NewForConfigOrDie(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
|
||||
return &cs
|
||||
}
|
||||
|
||||
// New creates a new Clientset for the given RESTClient.
|
||||
func New(c rest.Interface) *Clientset {
|
||||
var cs Clientset
|
||||
cs.clusterV1alpha1 = clusterv1alpha1.New(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
|
||||
return &cs
|
||||
}
|
||||
17
vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/doc.go
generated
vendored
17
vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/doc.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
// This package has the automatically generated clientset.
|
||||
package clientset
|
||||
Reference in New Issue
Block a user