[ADD] add statsd exporter module to node

This commit is contained in:
goodrain 2017-12-14 18:30:52 +08:00
parent aa2861ad04
commit 1aac0de5cd
85 changed files with 13013 additions and 18736 deletions

View File

@ -112,6 +112,7 @@ run-node:build-node
--run-mode=master --kube-conf=`pwd`/test/admin.kubeconfig \
--nodeid-file=`pwd`/test/host_id.conf \
--static-task-path=`pwd`/test/tasks \
--statsd.mapping-config=`pwd`/test/mapper.yml \
--log-level=debug
doc: ## build the docs

View File

@ -0,0 +1,19 @@
# Component Description
## API
## BUILDER
## ENTRANCE
## EVENTLOG
## GRCTL
## MQ
## NODE
## WEBCLI
## WORKER

View File

@ -117,7 +117,17 @@ type Conf struct {
// 默认 300
LockTTL int64
Etcd client.Config
Etcd client.Config
StatsdConfig StatsdConfig
}
//StatsdConfig StatsdConfig
type StatsdConfig struct {
StatsdListenAddress string
StatsdListenUDP string
StatsdListenTCP string
MappingConfig string
ReadBuffer int
}
//AddFlags AddFlags
@ -156,6 +166,11 @@ func (a *Conf) AddFlags(fs *pflag.FlagSet) {
//fs.StringSliceVar(&a.EventServerAddress, "event-servers", []string{"http://127.0.0.1:6363"}, "event message server address.")
fs.StringVar(&a.DBType, "db-type", "mysql", "db type mysql or etcd")
fs.StringVar(&a.DBConnectionInfo, "mysql", "admin:admin@tcp(127.0.0.1:3306)/region", "mysql db connection info")
fs.StringVar(&a.StatsdConfig.StatsdListenAddress, "statsd.listen-address", "", "The UDP address on which to receive statsd metric lines. DEPRECATED, use statsd.listen-udp instead.")
fs.StringVar(&a.StatsdConfig.StatsdListenUDP, "statsd.listen-udp", ":9125", "The UDP address on which to receive statsd metric lines. \"\" disables it.")
fs.StringVar(&a.StatsdConfig.StatsdListenTCP, "statsd.listen-tcp", ":9125", "The TCP address on which to receive statsd metric lines. \"\" disables it.")
fs.StringVar(&a.StatsdConfig.MappingConfig, "statsd.mapping-config", "", "Metric mapping configuration file name.")
fs.IntVar(&a.StatsdConfig.ReadBuffer, "statsd.read-buffer", 0, "Size (in bytes) of the operating system's transmit read buffer associated with the UDP connection. Please make sure the kernel parameters net.core.rmem_max is set to a value greater than the value specified.")
}
//SetLog 设置log

View File

@ -28,6 +28,8 @@ import (
"github.com/goodrain/rainbond/pkg/node/core/store"
"github.com/goodrain/rainbond/pkg/node/masterserver"
"github.com/goodrain/rainbond/pkg/node/nodeserver"
"github.com/goodrain/rainbond/pkg/node/statsd"
"github.com/prometheus/client_golang/prometheus"
"github.com/Sirupsen/logrus"
@ -86,8 +88,16 @@ func Run(c *option.Conf) error {
}
event.On(event.EXIT, ms.Stop)
}
//statsd exporter
registry := prometheus.NewRegistry()
exporter := statsd.CreateExporter(c.StatsdConfig, registry)
if err := exporter.Start(); err != nil {
logrus.Errorf("start statsd exporter server error,%s", err.Error())
return err
}
//启动API服务
apiManager := api.NewManager(*s.Conf, s.HostNode, ms)
apiManager := api.NewManager(*s.Conf, s.HostNode, ms, exporter)
apiManager.Start(errChan)
defer apiManager.Stop()

View File

@ -330,7 +330,7 @@ class RepoBuilder():
h = self.user_cs_client
try:
h.update_service(self.service_id, json.dumps(update_items))
self.region_client.update_service_region(self.service_id,json.dumps(update_items))
self.region_client.update_service_region(self.service_id, json.dumps(update_items))
except h.CallApiError, e:
self.log.error(
"网络异常,更新应用镜像名称失败. {}".format(e.message),

View File

@ -24,6 +24,9 @@ import (
"time"
"github.com/goodrain/rainbond/pkg/node/masterserver"
"github.com/goodrain/rainbond/pkg/node/statsd"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/goodrain/rainbond/pkg/node/api/controller"
"github.com/goodrain/rainbond/pkg/node/api/model"
@ -44,28 +47,59 @@ import (
//Manager api manager
type Manager struct {
ctx context.Context
cancel context.CancelFunc
conf option.Conf
router *chi.Mux
node *model.HostNode
lID client.LeaseID // lease id
ms *masterserver.MasterServer
ctx context.Context
cancel context.CancelFunc
conf option.Conf
router *chi.Mux
node *model.HostNode
lID client.LeaseID // lease id
ms *masterserver.MasterServer
exporter *statsd.Exporter
}
//NewManager api manager
func NewManager(c option.Conf, node *model.HostNode, ms *masterserver.MasterServer) *Manager {
func NewManager(c option.Conf, node *model.HostNode, ms *masterserver.MasterServer, exporter *statsd.Exporter) *Manager {
r := router.Routers(c.RunMode)
ctx, cancel := context.WithCancel(context.Background())
controller.Init(&c, ms)
return &Manager{
ctx: ctx,
cancel: cancel,
conf: c,
router: r,
node: node,
ms: ms,
m := &Manager{
ctx: ctx,
cancel: cancel,
conf: c,
router: r,
node: node,
ms: ms,
exporter: exporter,
}
m.router.Get("/app/metrics", m.HandleStatsd)
m.router.Get("/-/statsdreload", m.ReloadStatsdMappConfig)
return m
}
//ReloadStatsdMappConfig ReloadStatsdMappConfig
func (m *Manager) ReloadStatsdMappConfig(w http.ResponseWriter, r *http.Request) {
if err := m.exporter.ReloadConfig(); err != nil {
w.Write([]byte(err.Error()))
w.WriteHeader(500)
} else {
w.Write([]byte("Success reload"))
w.WriteHeader(200)
}
}
//HandleStatsd statsd handle
func (m *Manager) HandleStatsd(w http.ResponseWriter, r *http.Request) {
gatherers := prometheus.Gatherers{
prometheus.DefaultGatherer,
m.exporter.GetRegister(),
}
// Delegate http serving to Prometheus client library, which will call collector.Collect.
h := promhttp.HandlerFor(gatherers,
promhttp.HandlerOpts{
ErrorLog: logrus.StandardLogger(),
ErrorHandling: promhttp.ContinueOnError,
})
h.ServeHTTP(w, r)
}
//Start 启动

View File

@ -55,7 +55,7 @@ func Routers(mode string) *chi.Mux {
})
r.Route("/nodes", func(r chi.Router) {
r.Get("/fullres",controller.RegionRes)
r.Get("/fullres", controller.RegionRes)
r.Get("/resources", controller.Resources)
r.Get("/capres", controller.CapRes)
r.Get("/", controller.GetNodes)
@ -67,9 +67,9 @@ func Routers(mode string) *chi.Mux {
r.Put("/{node_id}/unschedulable", controller.Cordon)
r.Put("/{node_id}/reschedulable", controller.UnCordon)
r.Put("/{node_id}/labels", controller.PutLabel)
r.Post("/{node_id}/down", controller.DownNode) //节点下线
r.Post("/{node_id}/up", controller.UpNode) //节点上线
r.Get("/{node_id}/instance", controller.Instances) //节点上线
r.Post("/{node_id}/down", controller.DownNode) //节点下线
r.Post("/{node_id}/up", controller.UpNode) //节点上线
r.Get("/{node_id}/instance", controller.Instances) //节点上线
//历史API
r.Get("/{node}/details", controller.GetNodeDetails)
@ -114,6 +114,6 @@ func Routers(mode string) *chi.Mux {
r.Put("/-/taskreload", controller.ReloadStaticTasks)
}
//节点监控
r.Get("/metrics", controller.NodeExporter)
r.Get("/node/metrics", controller.NodeExporter)
return r
}

View File

@ -855,7 +855,7 @@ func (t *TaskEngine) HandleJobRecord() {
case <-t.ctx.Done():
return
case event := <-ch:
if err:=event.Err();err!=nil{
if err := event.Err(); err != nil {
}
for _, ev := range event.Events {

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,60 @@
# Copyright 2013 The Prometheus Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
GO := GO15VENDOREXPERIMENT=1 go
PROMU := $(GOPATH)/bin/promu
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_NAME ?= statsd-exporter
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
all: format build test
style:
@echo ">> checking code style"
@! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
test:
@echo ">> running tests"
@$(GO) test -short $(pkgs)
format:
@echo ">> formatting code"
@$(GO) fmt $(pkgs)
vet:
@echo ">> vetting code"
@$(GO) vet $(pkgs)
build: promu
@echo ">> building binaries"
@$(PROMU) build --prefix $(PREFIX)
tarball: promu
@echo ">> building release tarball"
@$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
docker:
@echo ">> building docker image"
@docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
promu:
@GOOS=$(shell uname -s | tr A-Z a-z) \
GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
$(GO) get -u github.com/prometheus/promu
.PHONY: all style format build test vet tarball docker promu

View File

@ -0,0 +1,595 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"hash/fnv"
"io"
"net"
"regexp"
"strconv"
"strings"
"unicode/utf8"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/common/model"
)
const (
defaultHelp = "Metric autogenerated by statsd_exporter."
regErrF = "A change of configuration created inconsistent metrics for " +
"%q. You have to restart the statsd_exporter, and you should " +
"consider the effects on your monitoring setup. Error: %s"
)
var (
illegalCharsRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
hash = fnv.New64a()
strBuf bytes.Buffer // Used for hashing.
intBuf = make([]byte, 8)
)
// hashNameAndLabels returns a hash value of the provided name string and all
// the label names and values in the provided labels map.
//
// Not safe for concurrent use! (Uses a shared buffer and hasher to save on
// allocations.)
func hashNameAndLabels(name string, labels prometheus.Labels) uint64 {
hash.Reset()
strBuf.Reset()
strBuf.WriteString(name)
hash.Write(strBuf.Bytes())
binary.BigEndian.PutUint64(intBuf, model.LabelsToSignature(labels))
hash.Write(intBuf)
return hash.Sum64()
}
type CounterContainer struct {
Elements map[uint64]prometheus.Counter
Register prometheus.Registerer
}
func NewCounterContainer(Register prometheus.Registerer) *CounterContainer {
return &CounterContainer{
Elements: make(map[uint64]prometheus.Counter),
Register: Register,
}
}
func (c *CounterContainer) Get(metricName string, labels prometheus.Labels, help string) (prometheus.Counter, error) {
hash := hashNameAndLabels(metricName, labels)
counter, ok := c.Elements[hash]
if !ok {
counter = prometheus.NewCounter(prometheus.CounterOpts{
Name: metricName,
Help: help,
ConstLabels: labels,
})
if err := c.Register.Register(counter); err != nil {
return nil, err
}
c.Elements[hash] = counter
}
return counter, nil
}
type GaugeContainer struct {
Elements map[uint64]prometheus.Gauge
Register prometheus.Registerer
}
func NewGaugeContainer(Register prometheus.Registerer) *GaugeContainer {
return &GaugeContainer{
Elements: make(map[uint64]prometheus.Gauge),
Register: Register,
}
}
func (c *GaugeContainer) Get(metricName string, labels prometheus.Labels, help string) (prometheus.Gauge, error) {
hash := hashNameAndLabels(metricName, labels)
gauge, ok := c.Elements[hash]
if !ok {
gauge = prometheus.NewGauge(prometheus.GaugeOpts{
Name: metricName,
Help: help,
ConstLabels: labels,
})
if err := c.Register.Register(gauge); err != nil {
return nil, err
}
c.Elements[hash] = gauge
}
return gauge, nil
}
type SummaryContainer struct {
Elements map[uint64]prometheus.Summary
Register prometheus.Registerer
}
func NewSummaryContainer(Register prometheus.Registerer) *SummaryContainer {
return &SummaryContainer{
Elements: make(map[uint64]prometheus.Summary),
Register: Register,
}
}
func (c *SummaryContainer) Get(metricName string, labels prometheus.Labels, help string) (prometheus.Summary, error) {
hash := hashNameAndLabels(metricName, labels)
summary, ok := c.Elements[hash]
if !ok {
summary = prometheus.NewSummary(
prometheus.SummaryOpts{
Name: metricName,
Help: help,
ConstLabels: labels,
})
if err := c.Register.Register(summary); err != nil {
return nil, err
}
c.Elements[hash] = summary
}
return summary, nil
}
type HistogramContainer struct {
Elements map[uint64]prometheus.Histogram
mapper *MetricMapper
Register prometheus.Registerer
}
func NewHistogramContainer(mapper *MetricMapper, Register prometheus.Registerer) *HistogramContainer {
return &HistogramContainer{
Elements: make(map[uint64]prometheus.Histogram),
mapper: mapper,
Register: Register,
}
}
func (c *HistogramContainer) Get(metricName string, labels prometheus.Labels, help string, mapping *metricMapping) (prometheus.Histogram, error) {
hash := hashNameAndLabels(metricName, labels)
histogram, ok := c.Elements[hash]
if !ok {
buckets := c.mapper.Defaults.Buckets
if mapping != nil && mapping.Buckets != nil && len(mapping.Buckets) > 0 {
buckets = mapping.Buckets
}
histogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: metricName,
Help: help,
ConstLabels: labels,
Buckets: buckets,
})
c.Elements[hash] = histogram
if err := c.Register.Register(histogram); err != nil {
return nil, err
}
}
return histogram, nil
}
type Event interface {
MetricName() string
Value() float64
Labels() map[string]string
}
type CounterEvent struct {
metricName string
value float64
labels map[string]string
}
func (c *CounterEvent) MetricName() string { return c.metricName }
func (c *CounterEvent) Value() float64 { return c.value }
func (c *CounterEvent) Labels() map[string]string { return c.labels }
type GaugeEvent struct {
metricName string
value float64
relative bool
labels map[string]string
}
func (g *GaugeEvent) MetricName() string { return g.metricName }
func (g *GaugeEvent) Value() float64 { return g.value }
func (c *GaugeEvent) Labels() map[string]string { return c.labels }
type TimerEvent struct {
metricName string
value float64
labels map[string]string
}
func (t *TimerEvent) MetricName() string { return t.metricName }
func (t *TimerEvent) Value() float64 { return t.value }
func (c *TimerEvent) Labels() map[string]string { return c.labels }
type Events []Event
type Exporter struct {
Counters *CounterContainer
Gauges *GaugeContainer
Summaries *SummaryContainer
Histograms *HistogramContainer
mapper *MetricMapper
}
func escapeMetricName(metricName string) string {
// If a metric starts with a digit, prepend an underscore.
if metricName[0] >= '0' && metricName[0] <= '9' {
metricName = "_" + metricName
}
// Replace all illegal metric chars with underscores.
metricName = illegalCharsRE.ReplaceAllString(metricName, "_")
return metricName
}
func (b *Exporter) Listen(e <-chan Events) {
for {
events, ok := <-e
if !ok {
log.Debug("Channel is closed. Break out of Exporter.Listener.")
return
}
for _, event := range events {
var help string
metricName := ""
prometheusLabels := event.Labels()
mapping, labels, present := b.mapper.getMapping(event.MetricName())
if mapping == nil {
mapping = &metricMapping{}
}
if mapping.HelpText == "" {
help = defaultHelp
} else {
help = mapping.HelpText
}
if present {
metricName = mapping.Name
for label, value := range labels {
prometheusLabels[label] = value
}
} else {
eventsUnmapped.Inc()
metricName = escapeMetricName(event.MetricName())
}
switch ev := event.(type) {
case *CounterEvent:
// We don't accept negative values for counters. Incrementing the counter with a negative number
// will cause the exporter to panic. Instead we will warn and continue to the next event.
if event.Value() < 0.0 {
log.Debugf("Counter %q is: '%f' (counter must be non-negative value)", metricName, event.Value())
eventStats.WithLabelValues("illegal_negative_counter").Inc()
continue
}
counter, err := b.Counters.Get(
metricName,
prometheusLabels,
help,
)
if err == nil {
counter.Add(event.Value())
eventStats.WithLabelValues("counter").Inc()
} else {
log.Debugf(regErrF, metricName, err)
conflictingEventStats.WithLabelValues("counter").Inc()
}
case *GaugeEvent:
gauge, err := b.Gauges.Get(
metricName,
prometheusLabels,
help,
)
if err == nil {
if ev.relative {
gauge.Add(event.Value())
} else {
gauge.Set(event.Value())
}
eventStats.WithLabelValues("gauge").Inc()
} else {
log.Debugf(regErrF, metricName, err)
conflictingEventStats.WithLabelValues("gauge").Inc()
}
case *TimerEvent:
t := timerTypeDefault
if mapping != nil {
t = mapping.TimerType
}
if t == timerTypeDefault {
t = b.mapper.Defaults.TimerType
}
switch t {
case timerTypeHistogram:
histogram, err := b.Histograms.Get(
metricName,
prometheusLabels,
help,
mapping,
)
if err == nil {
histogram.Observe(event.Value() / 1000) // prometheus presumes seconds, statsd millisecond
eventStats.WithLabelValues("timer").Inc()
} else {
log.Debugf(regErrF, metricName, err)
conflictingEventStats.WithLabelValues("timer").Inc()
}
case timerTypeDefault, timerTypeSummary:
summary, err := b.Summaries.Get(
metricName,
prometheusLabels,
help,
)
if err == nil {
summary.Observe(event.Value())
eventStats.WithLabelValues("timer").Inc()
} else {
log.Debugf(regErrF, metricName, err)
conflictingEventStats.WithLabelValues("timer").Inc()
}
default:
panic(fmt.Sprintf("unknown timer type '%s'", t))
}
default:
log.Debugln("Unsupported event type")
eventStats.WithLabelValues("illegal").Inc()
}
}
}
}
//NewExporter new exporter
func NewExporter(mapper *MetricMapper, Register prometheus.Registerer) *Exporter {
return &Exporter{
Counters: NewCounterContainer(Register),
Gauges: NewGaugeContainer(Register),
Summaries: NewSummaryContainer(Register),
Histograms: NewHistogramContainer(mapper, Register),
mapper: mapper,
}
}
func buildEvent(statType, metric string, value float64, relative bool, labels map[string]string) (Event, error) {
switch statType {
case "c":
return &CounterEvent{
metricName: metric,
value: float64(value),
labels: labels,
}, nil
case "g":
return &GaugeEvent{
metricName: metric,
value: float64(value),
relative: relative,
labels: labels,
}, nil
case "ms", "h":
return &TimerEvent{
metricName: metric,
value: float64(value),
labels: labels,
}, nil
case "s":
return nil, fmt.Errorf("No support for StatsD sets")
default:
return nil, fmt.Errorf("Bad stat type %s", statType)
}
}
func parseDogStatsDTagsToLabels(component string) map[string]string {
labels := map[string]string{}
tagsReceived.Inc()
tags := strings.Split(component, ",")
for _, t := range tags {
t = strings.TrimPrefix(t, "#")
kv := strings.SplitN(t, ":", 2)
if len(kv) < 2 || len(kv[1]) == 0 {
tagErrors.Inc()
log.Debugf("Malformed or empty DogStatsD tag %s in component %s", t, component)
continue
}
labels[escapeMetricName(kv[0])] = kv[1]
}
return labels
}
func lineToEvents(line string) Events {
events := Events{}
if line == "" {
return events
}
elements := strings.SplitN(line, ":", 2)
if len(elements) < 2 || len(elements[0]) == 0 || !utf8.ValidString(line) {
sampleErrors.WithLabelValues("malformed_line").Inc()
log.Debugln("Bad line from StatsD:", line)
return events
}
metric := elements[0]
var samples []string
if strings.Contains(elements[1], "|#") {
// using datadog extensions, disable multi-metrics
samples = elements[1:]
} else {
samples = strings.Split(elements[1], ":")
}
samples:
for _, sample := range samples {
samplesReceived.Inc()
components := strings.Split(sample, "|")
samplingFactor := 1.0
if len(components) < 2 || len(components) > 4 {
sampleErrors.WithLabelValues("malformed_component").Inc()
log.Debugln("Bad component on line:", line)
continue
}
valueStr, statType := components[0], components[1]
var relative = false
if strings.Index(valueStr, "+") == 0 || strings.Index(valueStr, "-") == 0 {
relative = true
}
value, err := strconv.ParseFloat(valueStr, 64)
if err != nil {
log.Debugf("Bad value %s on line: %s", valueStr, line)
sampleErrors.WithLabelValues("malformed_value").Inc()
continue
}
multiplyEvents := 1
labels := map[string]string{}
if len(components) >= 3 {
for _, component := range components[2:] {
if len(component) == 0 {
log.Debugln("Empty component on line: ", line)
sampleErrors.WithLabelValues("malformed_component").Inc()
continue samples
}
}
for _, component := range components[2:] {
switch component[0] {
case '@':
if statType != "c" && statType != "ms" {
log.Debugln("Illegal sampling factor for non-counter metric on line", line)
sampleErrors.WithLabelValues("illegal_sample_factor").Inc()
continue
}
samplingFactor, err = strconv.ParseFloat(component[1:], 64)
if err != nil {
log.Debugf("Invalid sampling factor %s on line %s", component[1:], line)
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
}
if samplingFactor == 0 {
samplingFactor = 1
}
if statType == "c" {
value /= samplingFactor
} else if statType == "ms" {
multiplyEvents = int(1 / samplingFactor)
}
case '#':
labels = parseDogStatsDTagsToLabels(component)
default:
log.Debugf("Invalid sampling factor or tag section %s on line %s", components[2], line)
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
continue
}
}
}
for i := 0; i < multiplyEvents; i++ {
event, err := buildEvent(statType, metric, value, relative, labels)
if err != nil {
log.Debugf("Error building event on line %s: %s", line, err)
sampleErrors.WithLabelValues("illegal_event").Inc()
continue
}
events = append(events, event)
}
}
return events
}
type StatsDUDPListener struct {
Conn *net.UDPConn
}
func (l *StatsDUDPListener) Listen(e chan<- Events) {
buf := make([]byte, 65535)
for {
n, _, err := l.Conn.ReadFromUDP(buf)
if err != nil {
log.Fatal(err)
}
l.handlePacket(buf[0:n], e)
}
}
func (l *StatsDUDPListener) handlePacket(packet []byte, e chan<- Events) {
udpPackets.Inc()
lines := strings.Split(string(packet), "\n")
events := Events{}
for _, line := range lines {
linesReceived.Inc()
events = append(events, lineToEvents(line)...)
}
e <- events
}
type StatsDTCPListener struct {
Conn *net.TCPListener
}
func (l *StatsDTCPListener) Listen(e chan<- Events) {
defer l.Conn.Close()
for {
c, err := l.Conn.AcceptTCP()
if err != nil {
log.Fatalf("AcceptTCP failed: %v", err)
}
go l.handleConn(c, e)
}
}
func (l *StatsDTCPListener) handleConn(c *net.TCPConn, e chan<- Events) {
defer c.Close()
tcpConnections.Inc()
r := bufio.NewReader(c)
for {
line, isPrefix, err := r.ReadLine()
if err != nil {
if err != io.EOF {
tcpErrors.Inc()
log.Debugf("Read %s failed: %v", c.RemoteAddr(), err)
}
break
}
if isPrefix {
tcpLineTooLong.Inc()
log.Debugf("Read %s failed: line too long", c.RemoteAddr())
break
}
linesReceived.Inc()
e <- lineToEvents(string(line))
}
}

View File

@ -0,0 +1,158 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
import (
"fmt"
"io/ioutil"
"regexp"
"strings"
"sync"
"github.com/prometheus/client_golang/prometheus"
yaml "gopkg.in/yaml.v2"
)
var (
identifierRE = `[a-zA-Z_][a-zA-Z0-9_]+`
statsdMetricRE = `[a-zA-Z_](-?[a-zA-Z0-9_])+`
metricLineRE = regexp.MustCompile(`^(\*\.|` + statsdMetricRE + `\.)+(\*|` + statsdMetricRE + `)$`)
labelLineRE = regexp.MustCompile(`^(` + identifierRE + `)\s*=\s*"(.*)"$`)
metricNameRE = regexp.MustCompile(`^` + identifierRE + `$`)
)
type mapperConfigDefaults struct {
TimerType timerType `yaml:"timer_type"`
Buckets []float64 `yaml:"buckets"`
MatchType matchType `yaml:"match_type"`
}
//MetricMapper MetricMapper
type MetricMapper struct {
Defaults mapperConfigDefaults `yaml:"defaults"`
Mappings []metricMapping `yaml:"mappings"`
mutex sync.Mutex
}
type metricMapping struct {
Match string `yaml:"match"`
Name string `yaml:"name"`
regex *regexp.Regexp
Labels prometheus.Labels `yaml:"labels"`
TimerType timerType `yaml:"timer_type"`
Buckets []float64 `yaml:"buckets"`
MatchType matchType `yaml:"match_type"`
HelpText string `yaml:"help"`
}
func (m *MetricMapper) InitFromYAMLString(fileContents string) error {
var n MetricMapper
if err := yaml.Unmarshal([]byte(fileContents), &n); err != nil {
return err
}
if n.Defaults.Buckets == nil || len(n.Defaults.Buckets) == 0 {
n.Defaults.Buckets = prometheus.DefBuckets
}
if n.Defaults.MatchType == matchTypeDefault {
n.Defaults.MatchType = matchTypeGlob
}
for i := range n.Mappings {
currentMapping := &n.Mappings[i]
// check that label is correct
for k := range currentMapping.Labels {
if !metricNameRE.MatchString(k) {
return fmt.Errorf("invalid label key: %s", k)
}
}
if currentMapping.Name == "" {
return fmt.Errorf("line %d: metric mapping didn't set a metric name", i)
}
if !metricNameRE.MatchString(currentMapping.Name) {
return fmt.Errorf("metric name '%s' doesn't match regex '%s'", currentMapping.Name, metricNameRE)
}
if currentMapping.MatchType == "" {
currentMapping.MatchType = n.Defaults.MatchType
}
if currentMapping.MatchType == matchTypeGlob {
if !metricLineRE.MatchString(currentMapping.Match) {
return fmt.Errorf("invalid match: %s", currentMapping.Match)
}
// Translate the glob-style metric match line into a proper regex that we
// can use to match metrics later on.
metricRe := strings.Replace(currentMapping.Match, ".", "\\.", -1)
metricRe = strings.Replace(metricRe, "*", "([^.]*)", -1)
currentMapping.regex = regexp.MustCompile("^" + metricRe + "$")
} else {
currentMapping.regex = regexp.MustCompile(currentMapping.Match)
}
if currentMapping.TimerType == "" {
currentMapping.TimerType = n.Defaults.TimerType
}
if currentMapping.Buckets == nil || len(currentMapping.Buckets) == 0 {
currentMapping.Buckets = n.Defaults.Buckets
}
}
m.mutex.Lock()
defer m.mutex.Unlock()
m.Defaults = n.Defaults
m.Mappings = n.Mappings
mappingsCount.Set(float64(len(n.Mappings)))
return nil
}
func (m *MetricMapper) InitFromFile(fileName string) error {
mappingStr, err := ioutil.ReadFile(fileName)
if err != nil {
return err
}
return m.InitFromYAMLString(string(mappingStr))
}
func (m *MetricMapper) getMapping(statsdMetric string) (*metricMapping, prometheus.Labels, bool) {
m.mutex.Lock()
defer m.mutex.Unlock()
for _, mapping := range m.Mappings {
matches := mapping.regex.FindStringSubmatchIndex(statsdMetric)
if len(matches) == 0 {
continue
}
labels := prometheus.Labels{}
for label, valueExpr := range mapping.Labels {
value := mapping.regex.ExpandString([]byte{}, valueExpr, statsdMetric, matches)
labels[label] = string(value)
}
return &mapping, labels, true
}
return nil, nil, false
}

View File

@ -0,0 +1,41 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
import "fmt"
type matchType string
const (
matchTypeGlob matchType = "glob"
matchTypeRegex matchType = "regex"
matchTypeDefault matchType = ""
)
func (t *matchType) UnmarshalYAML(unmarshal func(interface{}) error) error {
var v string
if err := unmarshal(&v); err != nil {
return err
}
switch matchType(v) {
case matchTypeRegex:
*t = matchTypeRegex
case matchTypeGlob, matchTypeDefault:
*t = matchTypeGlob
default:
return fmt.Errorf("invalid match type %q", v)
}
return nil
}

View File

@ -0,0 +1,122 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
eventStats = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "statsd_exporter_events_total",
Help: "The total number of StatsD events seen.",
},
[]string{"type"},
)
eventsUnmapped = prometheus.NewCounter(prometheus.CounterOpts{
Name: "statsd_exporter_events_unmapped_total",
Help: "The total number of StatsD events no mapping was found for.",
})
udpPackets = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "statsd_exporter_udp_packets_total",
Help: "The total number of StatsD packets received over UDP.",
},
)
tcpConnections = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "statsd_exporter_tcp_connections_total",
Help: "The total number of TCP connections handled.",
},
)
tcpErrors = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "statsd_exporter_tcp_connection_errors_total",
Help: "The number of errors encountered reading from TCP.",
},
)
tcpLineTooLong = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "statsd_exporter_tcp_too_long_lines_total",
Help: "The number of lines discarded due to being too long.",
},
)
linesReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "statsd_exporter_lines_total",
Help: "The total number of StatsD lines received.",
},
)
samplesReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "statsd_exporter_samples_total",
Help: "The total number of StatsD samples received.",
},
)
sampleErrors = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "statsd_exporter_sample_errors_total",
Help: "The total number of errors parsing StatsD samples.",
},
[]string{"reason"},
)
tagsReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "statsd_exporter_tags_total",
Help: "The total number of DogStatsD tags processed.",
},
)
tagErrors = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "statsd_exporter_tag_errors_total",
Help: "The number of errors parsign DogStatsD tags.",
},
)
ConfigLoads = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "statsd_exporter_config_reloads_total",
Help: "The number of configuration reloads.",
},
[]string{"outcome"},
)
mappingsCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "statsd_exporter_loaded_mappings",
Help: "The current number of configured metric mappings.",
})
conflictingEventStats = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "statsd_exporter_events_conflict_total",
Help: "The total number of StatsD events with conflicting names.",
},
[]string{"type"},
)
)
//MetryInit init
func MetryInit(registry *prometheus.Registry) {
registry.MustRegister(eventStats)
registry.MustRegister(udpPackets)
registry.MustRegister(tcpConnections)
registry.MustRegister(tcpErrors)
registry.MustRegister(tcpLineTooLong)
registry.MustRegister(linesReceived)
registry.MustRegister(samplesReceived)
registry.MustRegister(sampleErrors)
registry.MustRegister(tagsReceived)
registry.MustRegister(tagErrors)
registry.MustRegister(ConfigLoads)
registry.MustRegister(mappingsCount)
registry.MustRegister(conflictingEventStats)
}

View File

@ -0,0 +1,41 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
import "fmt"
type timerType string
const (
timerTypeHistogram timerType = "histogram"
timerTypeSummary timerType = "summary"
timerTypeDefault timerType = ""
)
func (t *timerType) UnmarshalYAML(unmarshal func(interface{}) error) error {
var v string
if err := unmarshal(&v); err != nil {
return err
}
switch timerType(v) {
case timerTypeHistogram:
*t = timerTypeHistogram
case timerTypeSummary, timerTypeDefault:
*t = timerTypeSummary
default:
return fmt.Errorf("invalid timer type '%s'", v)
}
return nil
}

View File

@ -0,0 +1,223 @@
// RAINBOND, Application Management Platform
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package statsd
import (
"fmt"
"net"
"strconv"
"yiyun/common/log"
"github.com/Sirupsen/logrus"
"github.com/howeyc/fsnotify"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/version"
"github.com/goodrain/rainbond/cmd/node/option"
"github.com/goodrain/rainbond/pkg/node/statsd/exporter"
)
//Exporter receive statsd metric and export prometheus metric
type Exporter struct {
statsdListenAddress string
statsdListenUDP string
statsdListenTCP string
mappingConfig string
readBuffer int
exporter *exporter.Exporter
register *prometheus.Registry
mapper *exporter.MetricMapper
}
//CreateExporter create a exporter
func CreateExporter(sc option.StatsdConfig, register *prometheus.Registry) *Exporter {
exp := &Exporter{
statsdListenAddress: sc.StatsdListenAddress,
statsdListenTCP: sc.StatsdListenTCP,
statsdListenUDP: sc.StatsdListenUDP,
readBuffer: sc.ReadBuffer,
mappingConfig: sc.MappingConfig,
register: register,
}
exporter.MetryInit(register)
return exp
}
//Start Start
func (e *Exporter) Start() error {
e.register.Register(version.NewCollector("statsd_exporter"))
if e.statsdListenAddress != "" {
logrus.Warnln("Warning: statsd.listen-address is DEPRECATED, please use statsd.listen-udp instead.")
e.statsdListenUDP = e.statsdListenAddress
}
if e.statsdListenUDP == "" && e.statsdListenTCP == "" {
logrus.Fatalln("At least one of UDP/TCP listeners must be specified.")
return fmt.Errorf("At least one of UDP/TCP listeners must be specified")
}
logrus.Infoln("Starting StatsD -> Prometheus Exporter", version.Info())
logrus.Infoln("Build context", version.BuildContext())
logrus.Infof("Accepting StatsD Traffic: UDP %v, TCP %v", e.statsdListenUDP, e.statsdListenTCP)
events := make(chan exporter.Events, 1024)
if e.statsdListenUDP != "" {
udpListenAddr := udpAddrFromString(e.statsdListenUDP)
uconn, err := net.ListenUDP("udp", udpListenAddr)
if err != nil {
return err
}
if e.readBuffer != 0 {
err = uconn.SetReadBuffer(e.readBuffer)
if err != nil {
return err
}
}
ul := &exporter.StatsDUDPListener{Conn: uconn}
go ul.Listen(events)
}
if e.statsdListenTCP != "" {
tcpListenAddr := tcpAddrFromString(e.statsdListenTCP)
tconn, err := net.ListenTCP("tcp", tcpListenAddr)
if err != nil {
return err
}
tl := &exporter.StatsDTCPListener{Conn: tconn}
go tl.Listen(events)
}
mapper := &exporter.MetricMapper{}
if e.mappingConfig != "" {
err := mapper.InitFromFile(e.mappingConfig)
if err != nil {
log.Fatal("Error loading config:", err)
return err
}
//观察文件变化进行重新reload是有风险的,采用API重新加载
//go watchConfig(e.mappingConfig, mapper)
}
exporter := exporter.NewExporter(mapper, e.register)
e.exporter = exporter
e.mapper = mapper
go exporter.Listen(events)
return nil
}
// Describe implements the prometheus.Collector interface.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
}
// Collect implements the prometheus.Collector interface.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
}
//GetRegister GetRegister
func (e *Exporter) GetRegister() *prometheus.Registry {
return e.register
}
func ipPortFromString(addr string) (*net.IPAddr, int) {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
logrus.Fatal("Bad StatsD listening address", addr)
}
if host == "" {
host = "0.0.0.0"
}
ip, err := net.ResolveIPAddr("ip", host)
if err != nil {
logrus.Fatalf("Unable to resolve %s: %s", host, err)
}
port, err := strconv.Atoi(portStr)
if err != nil || port < 0 || port > 65535 {
logrus.Fatalf("Bad port %s: %s", portStr, err)
}
return ip, port
}
func udpAddrFromString(addr string) *net.UDPAddr {
ip, port := ipPortFromString(addr)
return &net.UDPAddr{
IP: ip.IP,
Port: port,
Zone: ip.Zone,
}
}
func tcpAddrFromString(addr string) *net.TCPAddr {
ip, port := ipPortFromString(addr)
return &net.TCPAddr{
IP: ip.IP,
Port: port,
Zone: ip.Zone,
}
}
func watchConfig(fileName string, mapper *exporter.MetricMapper) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
logrus.Fatal(err)
}
err = watcher.WatchFlags(fileName, fsnotify.FSN_MODIFY)
if err != nil {
logrus.Fatal(err)
}
for {
select {
case ev := <-watcher.Event:
logrus.Infof("Config file changed (%s), attempting reload", ev)
err = mapper.InitFromFile(fileName)
if err != nil {
logrus.Errorln("Error reloading config:", err)
exporter.ConfigLoads.WithLabelValues("failure").Inc()
} else {
logrus.Infoln("Config reloaded successfully")
exporter.ConfigLoads.WithLabelValues("success").Inc()
}
// Re-add the file watcher since it can get lost on some changes. E.g.
// saving a file with vim results in a RENAME-MODIFY-DELETE event
// sequence, after which the newly written file is no longer watched.
err = watcher.WatchFlags(fileName, fsnotify.FSN_MODIFY)
case err := <-watcher.Error:
logrus.Errorln("Error watching config:", err)
}
}
}
//ReloadConfig reload mapper config file
func (e *Exporter) ReloadConfig() (err error) {
logrus.Infof("Config file changed, attempting reload")
err = e.mapper.InitFromFile(e.mappingConfig)
if err != nil {
logrus.Errorln("Error reloading config:", err)
exporter.ConfigLoads.WithLabelValues("failure").Inc()
} else {
logrus.Infoln("Config reloaded successfully")
exporter.ConfigLoads.WithLabelValues("success").Inc()
}
return
}

16
test/mapper.yml Normal file
View File

@ -0,0 +1,16 @@
#
mappings:
- match: "*.*.*.request.*"
name: "app_http_request"
labels:
service_id: "$1"
port: "$2"
protocol: $3
method: "$4"
- match: "*.*.*.request.unusual.*"
name: "app_http_request_unusual"
labels:
service_id: "$1"
port: "$2"
protocol: $3
code: "$4"

28
vendor/github.com/howeyc/fsnotify/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,28 @@
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# You can update this list using the following command:
#
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
# Please keep the list sorted.
Adrien Bustany <adrien@bustany.org>
Caleb Spare <cespare@gmail.com>
Case Nelson <case@teammating.com>
Chris Howey <howeyc@gmail.com> <chris@howey.me>
Christoffer Buchholz <christoffer.buchholz@gmail.com>
Dave Cheney <dave@cheney.net>
Francisco Souza <f@souza.cc>
John C Barstow
Kelvin Fo <vmirage@gmail.com>
Nathan Youngman <git@nathany.com>
Paul Hammond <paul@paulhammond.org>
Pursuit92 <JoshChase@techpursuit.net>
Rob Figueiredo <robfig@gmail.com>
Travis Cline <travis.cline@gmail.com>
Tudor Golubenco <tudor.g@gmail.com>
bronze1man <bronze1man@gmail.com>
debrando <denis.brandolini@gmail.com>
henrikedwards <henrik.edwards@gmail.com>

160
vendor/github.com/howeyc/fsnotify/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,160 @@
# Changelog
## v0.9.0 / 2014-01-17
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
## v0.8.12 / 2013-11-13
* [API] Remove FD_SET and friends from Linux adapter
## v0.8.11 / 2013-11-02
* [Doc] Add Changelog [#72][] (thanks @nathany)
* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
## v0.8.10 / 2013-10-19
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
* [Doc] specify OS-specific limits in README (thanks @debrando)
## v0.8.9 / 2013-09-08
* [Doc] Contributing (thanks @nathany)
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
* [Doc] GoCI badge in README (Linux only) [#60][]
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
## v0.8.8 / 2013-06-17
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
## v0.8.7 / 2013-06-03
* [API] Make syscall flags internal
* [Fix] inotify: ignore event changes
* [Fix] race in symlink test [#45][] (reported by @srid)
* [Fix] tests on Windows
* lower case error messages
## v0.8.6 / 2013-05-23
* kqueue: Use EVT_ONLY flag on Darwin
* [Doc] Update README with full example
## v0.8.5 / 2013-05-09
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
## v0.8.4 / 2013-04-07
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
## v0.8.3 / 2013-03-13
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
## v0.8.2 / 2013-02-07
* [Doc] add Authors
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
## v0.8.1 / 2013-01-09
* [Fix] Windows path separators
* [Doc] BSD License
## v0.8.0 / 2012-11-09
* kqueue: directory watching improvements (thanks @vmirage)
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
## v0.7.4 / 2012-10-09
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
* [Fix] kqueue: modify after recreation of file
## v0.7.3 / 2012-09-27
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
* [Fix] kqueue: no longer get duplicate CREATE events
## v0.7.2 / 2012-09-01
* kqueue: events for created directories
## v0.7.1 / 2012-07-14
* [Fix] for renaming files
## v0.7.0 / 2012-07-02
* [Feature] FSNotify flags
* [Fix] inotify: Added file name back to event path
## v0.6.0 / 2012-06-06
* kqueue: watch files after directory created (thanks @tmc)
## v0.5.1 / 2012-05-22
* [Fix] inotify: remove all watches before Close()
## v0.5.0 / 2012-05-03
* [API] kqueue: return errors during watch instead of sending over channel
* kqueue: match symlink behavior on Linux
* inotify: add `DELETE_SELF` (requested by @taralx)
* [Fix] kqueue: handle EINTR (reported by @robfig)
* [Doc] Godoc example [#1][] (thanks @davecheney)
## v0.4.0 / 2012-03-30
* Go 1 released: build with go tool
* [Feature] Windows support using winfsnotify
* Windows does not have attribute change notifications
* Roll attribute notifications into IsModify
## v0.3.0 / 2012-02-19
* kqueue: add files when watch directory
## v0.2.0 / 2011-12-30
* update to latest Go weekly code
## v0.1.0 / 2011-10-19
* kqueue: add watch on file creation to match inotify
* kqueue: create file event
* inotify: ignore `IN_IGNORED` events
* event String()
* linux: common FileEvent functions
* initial commit
[#79]: https://github.com/howeyc/fsnotify/pull/79
[#77]: https://github.com/howeyc/fsnotify/pull/77
[#72]: https://github.com/howeyc/fsnotify/issues/72
[#71]: https://github.com/howeyc/fsnotify/issues/71
[#70]: https://github.com/howeyc/fsnotify/issues/70
[#63]: https://github.com/howeyc/fsnotify/issues/63
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#60]: https://github.com/howeyc/fsnotify/issues/60
[#59]: https://github.com/howeyc/fsnotify/issues/59
[#49]: https://github.com/howeyc/fsnotify/issues/49
[#45]: https://github.com/howeyc/fsnotify/issues/45
[#40]: https://github.com/howeyc/fsnotify/issues/40
[#36]: https://github.com/howeyc/fsnotify/issues/36
[#33]: https://github.com/howeyc/fsnotify/issues/33
[#29]: https://github.com/howeyc/fsnotify/issues/29
[#25]: https://github.com/howeyc/fsnotify/issues/25
[#24]: https://github.com/howeyc/fsnotify/issues/24
[#21]: https://github.com/howeyc/fsnotify/issues/21
[#1]: https://github.com/howeyc/fsnotify/issues/1

7
vendor/github.com/howeyc/fsnotify/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,7 @@
# Contributing
## Moving Notice
There is a fork being actively developed with a new API in preparation for the Go Standard Library:
[github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify)

View File

@ -1,4 +1,5 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2012 fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

93
vendor/github.com/howeyc/fsnotify/README.md generated vendored Normal file
View File

@ -0,0 +1,93 @@
# File system notifications for Go
[![GoDoc](https://godoc.org/github.com/howeyc/fsnotify?status.png)](http://godoc.org/github.com/howeyc/fsnotify)
Cross platform: Windows, Linux, BSD and OS X.
## Moving Notice
There is a fork being actively developed with a new API in preparation for the Go Standard Library:
[github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify)
## Example:
```go
package main
import (
"log"
"github.com/howeyc/fsnotify"
)
func main() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
done := make(chan bool)
// Process events
go func() {
for {
select {
case ev := <-watcher.Event:
log.Println("event:", ev)
case err := <-watcher.Error:
log.Println("error:", err)
}
}
}()
err = watcher.Watch("testDir")
if err != nil {
log.Fatal(err)
}
// Hang so program doesn't exit
<-done
/* ... do stuff ... */
watcher.Close()
}
```
For each event:
* Name
* IsCreate()
* IsDelete()
* IsModify()
* IsRename()
## FAQ
**When a file is moved to another directory is it still being watched?**
No (it shouldn't be, unless you are watching where it was moved to).
**When I watch a directory, are all subdirectories watched as well?**
No, you must add watches for any directory you want to watch (a recursive watcher is in the works [#56][]).
**Do I have to watch the Error and Event channels in a separate goroutine?**
As of now, yes. Looking into making this single-thread friendly (see [#7][])
**Why am I receiving multiple events for the same file on OS X?**
Spotlight indexing on OS X can result in multiple events (see [#62][]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#54][]).
**How many files can be watched at once?**
There are OS-specific limits as to how many watches can be created:
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit,
reaching this limit results in a "no space left on device" error.
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#56]: https://github.com/howeyc/fsnotify/issues/56
[#54]: https://github.com/howeyc/fsnotify/issues/54
[#7]: https://github.com/howeyc/fsnotify/issues/7

111
vendor/github.com/howeyc/fsnotify/fsnotify.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fsnotify implements file system notification.
package fsnotify
import "fmt"
const (
FSN_CREATE = 1
FSN_MODIFY = 2
FSN_DELETE = 4
FSN_RENAME = 8
FSN_ALL = FSN_MODIFY | FSN_DELETE | FSN_RENAME | FSN_CREATE
)
// Purge events from interal chan to external chan if passes filter
func (w *Watcher) purgeEvents() {
for ev := range w.internalEvent {
sendEvent := false
w.fsnmut.Lock()
fsnFlags := w.fsnFlags[ev.Name]
w.fsnmut.Unlock()
if (fsnFlags&FSN_CREATE == FSN_CREATE) && ev.IsCreate() {
sendEvent = true
}
if (fsnFlags&FSN_MODIFY == FSN_MODIFY) && ev.IsModify() {
sendEvent = true
}
if (fsnFlags&FSN_DELETE == FSN_DELETE) && ev.IsDelete() {
sendEvent = true
}
if (fsnFlags&FSN_RENAME == FSN_RENAME) && ev.IsRename() {
sendEvent = true
}
if sendEvent {
w.Event <- ev
}
// If there's no file, then no more events for user
// BSD must keep watch for internal use (watches DELETEs to keep track
// what files exist for create events)
if ev.IsDelete() {
w.fsnmut.Lock()
delete(w.fsnFlags, ev.Name)
w.fsnmut.Unlock()
}
}
close(w.Event)
}
// Watch a given file path
func (w *Watcher) Watch(path string) error {
return w.WatchFlags(path, FSN_ALL)
}
// Watch a given file path for a particular set of notifications (FSN_MODIFY etc.)
func (w *Watcher) WatchFlags(path string, flags uint32) error {
w.fsnmut.Lock()
w.fsnFlags[path] = flags
w.fsnmut.Unlock()
return w.watch(path)
}
// Remove a watch on a file
func (w *Watcher) RemoveWatch(path string) error {
w.fsnmut.Lock()
delete(w.fsnFlags, path)
w.fsnmut.Unlock()
return w.removeWatch(path)
}
// String formats the event e in the form
// "filename: DELETE|MODIFY|..."
func (e *FileEvent) String() string {
var events string = ""
if e.IsCreate() {
events += "|" + "CREATE"
}
if e.IsDelete() {
events += "|" + "DELETE"
}
if e.IsModify() {
events += "|" + "MODIFY"
}
if e.IsRename() {
events += "|" + "RENAME"
}
if e.IsAttrib() {
events += "|" + "ATTRIB"
}
if len(events) > 0 {
events = events[1:]
}
return fmt.Sprintf("%q: %s", e.Name, events)
}

496
vendor/github.com/howeyc/fsnotify/fsnotify_bsd.go generated vendored Normal file
View File

@ -0,0 +1,496 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"syscall"
)
const (
// Flags (from <sys/event.h>)
sys_NOTE_DELETE = 0x0001 /* vnode was removed */
sys_NOTE_WRITE = 0x0002 /* data contents changed */
sys_NOTE_EXTEND = 0x0004 /* size increased */
sys_NOTE_ATTRIB = 0x0008 /* attributes changed */
sys_NOTE_LINK = 0x0010 /* link count changed */
sys_NOTE_RENAME = 0x0020 /* vnode was renamed */
sys_NOTE_REVOKE = 0x0040 /* vnode access was revoked */
// Watch all events
sys_NOTE_ALLEVENTS = sys_NOTE_DELETE | sys_NOTE_WRITE | sys_NOTE_ATTRIB | sys_NOTE_RENAME
// Block for 100 ms on each call to kevent
keventWaitTime = 100e6
)
type FileEvent struct {
mask uint32 // Mask of events
Name string // File name (optional)
create bool // set by fsnotify package if found new file
}
// IsCreate reports whether the FileEvent was triggered by a creation
func (e *FileEvent) IsCreate() bool { return e.create }
// IsDelete reports whether the FileEvent was triggered by a delete
func (e *FileEvent) IsDelete() bool { return (e.mask & sys_NOTE_DELETE) == sys_NOTE_DELETE }
// IsModify reports whether the FileEvent was triggered by a file modification
func (e *FileEvent) IsModify() bool {
return ((e.mask&sys_NOTE_WRITE) == sys_NOTE_WRITE || (e.mask&sys_NOTE_ATTRIB) == sys_NOTE_ATTRIB)
}
// IsRename reports whether the FileEvent was triggered by a change name
func (e *FileEvent) IsRename() bool { return (e.mask & sys_NOTE_RENAME) == sys_NOTE_RENAME }
// IsAttrib reports whether the FileEvent was triggered by a change in the file metadata.
func (e *FileEvent) IsAttrib() bool {
return (e.mask & sys_NOTE_ATTRIB) == sys_NOTE_ATTRIB
}
type Watcher struct {
mu sync.Mutex // Mutex for the Watcher itself.
kq int // File descriptor (as returned by the kqueue() syscall)
watches map[string]int // Map of watched file descriptors (key: path)
wmut sync.Mutex // Protects access to watches.
fsnFlags map[string]uint32 // Map of watched files to flags used for filter
fsnmut sync.Mutex // Protects access to fsnFlags.
enFlags map[string]uint32 // Map of watched files to evfilt note flags used in kqueue
enmut sync.Mutex // Protects access to enFlags.
paths map[int]string // Map of watched paths (key: watch descriptor)
finfo map[int]os.FileInfo // Map of file information (isDir, isReg; key: watch descriptor)
pmut sync.Mutex // Protects access to paths and finfo.
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events)
femut sync.Mutex // Protects access to fileExists.
externalWatches map[string]bool // Map of watches added by user of the library.
ewmut sync.Mutex // Protects access to externalWatches.
Error chan error // Errors are sent on this channel
internalEvent chan *FileEvent // Events are queued on this channel
Event chan *FileEvent // Events are returned on this channel
done chan bool // Channel for sending a "quit message" to the reader goroutine
isClosed bool // Set to true when Close() is first called
}
// NewWatcher creates and returns a new kevent instance using kqueue(2)
func NewWatcher() (*Watcher, error) {
fd, errno := syscall.Kqueue()
if fd == -1 {
return nil, os.NewSyscallError("kqueue", errno)
}
w := &Watcher{
kq: fd,
watches: make(map[string]int),
fsnFlags: make(map[string]uint32),
enFlags: make(map[string]uint32),
paths: make(map[int]string),
finfo: make(map[int]os.FileInfo),
fileExists: make(map[string]bool),
externalWatches: make(map[string]bool),
internalEvent: make(chan *FileEvent),
Event: make(chan *FileEvent),
Error: make(chan error),
done: make(chan bool, 1),
}
go w.readEvents()
go w.purgeEvents()
return w, nil
}
// Close closes a kevent watcher instance
// It sends a message to the reader goroutine to quit and removes all watches
// associated with the kevent instance
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
w.isClosed = true
w.mu.Unlock()
// Send "quit" message to the reader goroutine
w.done <- true
w.wmut.Lock()
ws := w.watches
w.wmut.Unlock()
for path := range ws {
w.removeWatch(path)
}
return nil
}
// AddWatch adds path to the watched file set.
// The flags are interpreted as described in kevent(2).
func (w *Watcher) addWatch(path string, flags uint32) error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return errors.New("kevent instance already closed")
}
w.mu.Unlock()
watchDir := false
w.wmut.Lock()
watchfd, found := w.watches[path]
w.wmut.Unlock()
if !found {
fi, errstat := os.Lstat(path)
if errstat != nil {
return errstat
}
// don't watch socket
if fi.Mode()&os.ModeSocket == os.ModeSocket {
return nil
}
// Follow Symlinks
// Unfortunately, Linux can add bogus symlinks to watch list without
// issue, and Windows can't do symlinks period (AFAIK). To maintain
// consistency, we will act like everything is fine. There will simply
// be no file events for broken symlinks.
// Hence the returns of nil on errors.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
path, err := filepath.EvalSymlinks(path)
if err != nil {
return nil
}
fi, errstat = os.Lstat(path)
if errstat != nil {
return nil
}
}
fd, errno := syscall.Open(path, open_FLAGS, 0700)
if fd == -1 {
return errno
}
watchfd = fd
w.wmut.Lock()
w.watches[path] = watchfd
w.wmut.Unlock()
w.pmut.Lock()
w.paths[watchfd] = path
w.finfo[watchfd] = fi
w.pmut.Unlock()
}
// Watch the directory if it has not been watched before.
w.pmut.Lock()
w.enmut.Lock()
if w.finfo[watchfd].IsDir() &&
(flags&sys_NOTE_WRITE) == sys_NOTE_WRITE &&
(!found || (w.enFlags[path]&sys_NOTE_WRITE) != sys_NOTE_WRITE) {
watchDir = true
}
w.enmut.Unlock()
w.pmut.Unlock()
w.enmut.Lock()
w.enFlags[path] = flags
w.enmut.Unlock()
var kbuf [1]syscall.Kevent_t
watchEntry := &kbuf[0]
watchEntry.Fflags = flags
syscall.SetKevent(watchEntry, watchfd, syscall.EVFILT_VNODE, syscall.EV_ADD|syscall.EV_CLEAR)
entryFlags := watchEntry.Flags
success, errno := syscall.Kevent(w.kq, kbuf[:], nil, nil)
if success == -1 {
return errno
} else if (entryFlags & syscall.EV_ERROR) == syscall.EV_ERROR {
return errors.New("kevent add error")
}
if watchDir {
errdir := w.watchDirectoryFiles(path)
if errdir != nil {
return errdir
}
}
return nil
}
// Watch adds path to the watched file set, watching all events.
func (w *Watcher) watch(path string) error {
w.ewmut.Lock()
w.externalWatches[path] = true
w.ewmut.Unlock()
return w.addWatch(path, sys_NOTE_ALLEVENTS)
}
// RemoveWatch removes path from the watched file set.
func (w *Watcher) removeWatch(path string) error {
w.wmut.Lock()
watchfd, ok := w.watches[path]
w.wmut.Unlock()
if !ok {
return errors.New(fmt.Sprintf("can't remove non-existent kevent watch for: %s", path))
}
var kbuf [1]syscall.Kevent_t
watchEntry := &kbuf[0]
syscall.SetKevent(watchEntry, watchfd, syscall.EVFILT_VNODE, syscall.EV_DELETE)
entryFlags := watchEntry.Flags
success, errno := syscall.Kevent(w.kq, kbuf[:], nil, nil)
if success == -1 {
return os.NewSyscallError("kevent_rm_watch", errno)
} else if (entryFlags & syscall.EV_ERROR) == syscall.EV_ERROR {
return errors.New("kevent rm error")
}
syscall.Close(watchfd)
w.wmut.Lock()
delete(w.watches, path)
w.wmut.Unlock()
w.enmut.Lock()
delete(w.enFlags, path)
w.enmut.Unlock()
w.pmut.Lock()
delete(w.paths, watchfd)
fInfo := w.finfo[watchfd]
delete(w.finfo, watchfd)
w.pmut.Unlock()
// Find all watched paths that are in this directory that are not external.
if fInfo.IsDir() {
var pathsToRemove []string
w.pmut.Lock()
for _, wpath := range w.paths {
wdir, _ := filepath.Split(wpath)
if filepath.Clean(wdir) == filepath.Clean(path) {
w.ewmut.Lock()
if !w.externalWatches[wpath] {
pathsToRemove = append(pathsToRemove, wpath)
}
w.ewmut.Unlock()
}
}
w.pmut.Unlock()
for _, p := range pathsToRemove {
// Since these are internal, not much sense in propagating error
// to the user, as that will just confuse them with an error about
// a path they did not explicitly watch themselves.
w.removeWatch(p)
}
}
return nil
}
// readEvents reads from the kqueue file descriptor, converts the
// received events into Event objects and sends them via the Event channel
func (w *Watcher) readEvents() {
var (
eventbuf [10]syscall.Kevent_t // Event buffer
events []syscall.Kevent_t // Received events
twait *syscall.Timespec // Time to block waiting for events
n int // Number of events returned from kevent
errno error // Syscall errno
)
events = eventbuf[0:0]
twait = new(syscall.Timespec)
*twait = syscall.NsecToTimespec(keventWaitTime)
for {
// See if there is a message on the "done" channel
var done bool
select {
case done = <-w.done:
default:
}
// If "done" message is received
if done {
errno := syscall.Close(w.kq)
if errno != nil {
w.Error <- os.NewSyscallError("close", errno)
}
close(w.internalEvent)
close(w.Error)
return
}
// Get new events
if len(events) == 0 {
n, errno = syscall.Kevent(w.kq, nil, eventbuf[:], twait)
// EINTR is okay, basically the syscall was interrupted before
// timeout expired.
if errno != nil && errno != syscall.EINTR {
w.Error <- os.NewSyscallError("kevent", errno)
continue
}
// Received some events
if n > 0 {
events = eventbuf[0:n]
}
}
// Flush the events we received to the events channel
for len(events) > 0 {
fileEvent := new(FileEvent)
watchEvent := &events[0]
fileEvent.mask = uint32(watchEvent.Fflags)
w.pmut.Lock()
fileEvent.Name = w.paths[int(watchEvent.Ident)]
fileInfo := w.finfo[int(watchEvent.Ident)]
w.pmut.Unlock()
if fileInfo != nil && fileInfo.IsDir() && !fileEvent.IsDelete() {
// Double check to make sure the directory exist. This can happen when
// we do a rm -fr on a recursively watched folders and we receive a
// modification event first but the folder has been deleted and later
// receive the delete event
if _, err := os.Lstat(fileEvent.Name); os.IsNotExist(err) {
// mark is as delete event
fileEvent.mask |= sys_NOTE_DELETE
}
}
if fileInfo != nil && fileInfo.IsDir() && fileEvent.IsModify() && !fileEvent.IsDelete() {
w.sendDirectoryChangeEvents(fileEvent.Name)
} else {
// Send the event on the events channel
w.internalEvent <- fileEvent
}
// Move to next event
events = events[1:]
if fileEvent.IsRename() {
w.removeWatch(fileEvent.Name)
w.femut.Lock()
delete(w.fileExists, fileEvent.Name)
w.femut.Unlock()
}
if fileEvent.IsDelete() {
w.removeWatch(fileEvent.Name)
w.femut.Lock()
delete(w.fileExists, fileEvent.Name)
w.femut.Unlock()
// Look for a file that may have overwritten this
// (ie mv f1 f2 will delete f2 then create f2)
fileDir, _ := filepath.Split(fileEvent.Name)
fileDir = filepath.Clean(fileDir)
w.wmut.Lock()
_, found := w.watches[fileDir]
w.wmut.Unlock()
if found {
// make sure the directory exist before we watch for changes. When we
// do a recursive watch and perform rm -fr, the parent directory might
// have gone missing, ignore the missing directory and let the
// upcoming delete event remove the watch form the parent folder
if _, err := os.Lstat(fileDir); !os.IsNotExist(err) {
w.sendDirectoryChangeEvents(fileDir)
}
}
}
}
}
}
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
return err
}
// Search for new files
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
// Inherit fsnFlags from parent directory
w.fsnmut.Lock()
if flags, found := w.fsnFlags[dirPath]; found {
w.fsnFlags[filePath] = flags
} else {
w.fsnFlags[filePath] = FSN_ALL
}
w.fsnmut.Unlock()
if fileInfo.IsDir() == false {
// Watch file to mimic linux fsnotify
e := w.addWatch(filePath, sys_NOTE_ALLEVENTS)
if e != nil {
return e
}
} else {
// If the user is currently watching directory
// we want to preserve the flags used
w.enmut.Lock()
currFlags, found := w.enFlags[filePath]
w.enmut.Unlock()
var newFlags uint32 = sys_NOTE_DELETE
if found {
newFlags |= currFlags
}
// Linux gives deletes if not explicitly watching
e := w.addWatch(filePath, newFlags)
if e != nil {
return e
}
}
w.femut.Lock()
w.fileExists[filePath] = true
w.femut.Unlock()
}
return nil
}
// sendDirectoryEvents searches the directory for newly created files
// and sends them over the event channel. This functionality is to have
// the BSD version of fsnotify match linux fsnotify which provides a
// create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
w.Error <- err
}
// Search for new files
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
w.femut.Lock()
_, doesExist := w.fileExists[filePath]
w.femut.Unlock()
if !doesExist {
// Inherit fsnFlags from parent directory
w.fsnmut.Lock()
if flags, found := w.fsnFlags[dirPath]; found {
w.fsnFlags[filePath] = flags
} else {
w.fsnFlags[filePath] = FSN_ALL
}
w.fsnmut.Unlock()
// Send create event
fileEvent := new(FileEvent)
fileEvent.Name = filePath
fileEvent.create = true
w.internalEvent <- fileEvent
}
w.femut.Lock()
w.fileExists[filePath] = true
w.femut.Unlock()
}
w.watchDirectoryFiles(dirPath)
}

304
vendor/github.com/howeyc/fsnotify/fsnotify_linux.go generated vendored Normal file
View File

@ -0,0 +1,304 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"fmt"
"os"
"strings"
"sync"
"syscall"
"unsafe"
)
const (
// Options for inotify_init() are not exported
// sys_IN_CLOEXEC uint32 = syscall.IN_CLOEXEC
// sys_IN_NONBLOCK uint32 = syscall.IN_NONBLOCK
// Options for AddWatch
sys_IN_DONT_FOLLOW uint32 = syscall.IN_DONT_FOLLOW
sys_IN_ONESHOT uint32 = syscall.IN_ONESHOT
sys_IN_ONLYDIR uint32 = syscall.IN_ONLYDIR
// The "sys_IN_MASK_ADD" option is not exported, as AddWatch
// adds it automatically, if there is already a watch for the given path
// sys_IN_MASK_ADD uint32 = syscall.IN_MASK_ADD
// Events
sys_IN_ACCESS uint32 = syscall.IN_ACCESS
sys_IN_ALL_EVENTS uint32 = syscall.IN_ALL_EVENTS
sys_IN_ATTRIB uint32 = syscall.IN_ATTRIB
sys_IN_CLOSE uint32 = syscall.IN_CLOSE
sys_IN_CLOSE_NOWRITE uint32 = syscall.IN_CLOSE_NOWRITE
sys_IN_CLOSE_WRITE uint32 = syscall.IN_CLOSE_WRITE
sys_IN_CREATE uint32 = syscall.IN_CREATE
sys_IN_DELETE uint32 = syscall.IN_DELETE
sys_IN_DELETE_SELF uint32 = syscall.IN_DELETE_SELF
sys_IN_MODIFY uint32 = syscall.IN_MODIFY
sys_IN_MOVE uint32 = syscall.IN_MOVE
sys_IN_MOVED_FROM uint32 = syscall.IN_MOVED_FROM
sys_IN_MOVED_TO uint32 = syscall.IN_MOVED_TO
sys_IN_MOVE_SELF uint32 = syscall.IN_MOVE_SELF
sys_IN_OPEN uint32 = syscall.IN_OPEN
sys_AGNOSTIC_EVENTS = sys_IN_MOVED_TO | sys_IN_MOVED_FROM | sys_IN_CREATE | sys_IN_ATTRIB | sys_IN_MODIFY | sys_IN_MOVE_SELF | sys_IN_DELETE | sys_IN_DELETE_SELF
// Special events
sys_IN_ISDIR uint32 = syscall.IN_ISDIR
sys_IN_IGNORED uint32 = syscall.IN_IGNORED
sys_IN_Q_OVERFLOW uint32 = syscall.IN_Q_OVERFLOW
sys_IN_UNMOUNT uint32 = syscall.IN_UNMOUNT
)
type FileEvent struct {
mask uint32 // Mask of events
cookie uint32 // Unique cookie associating related events (for rename(2))
Name string // File name (optional)
}
// IsCreate reports whether the FileEvent was triggered by a creation
func (e *FileEvent) IsCreate() bool {
return (e.mask&sys_IN_CREATE) == sys_IN_CREATE || (e.mask&sys_IN_MOVED_TO) == sys_IN_MOVED_TO
}
// IsDelete reports whether the FileEvent was triggered by a delete
func (e *FileEvent) IsDelete() bool {
return (e.mask&sys_IN_DELETE_SELF) == sys_IN_DELETE_SELF || (e.mask&sys_IN_DELETE) == sys_IN_DELETE
}
// IsModify reports whether the FileEvent was triggered by a file modification or attribute change
func (e *FileEvent) IsModify() bool {
return ((e.mask&sys_IN_MODIFY) == sys_IN_MODIFY || (e.mask&sys_IN_ATTRIB) == sys_IN_ATTRIB)
}
// IsRename reports whether the FileEvent was triggered by a change name
func (e *FileEvent) IsRename() bool {
return ((e.mask&sys_IN_MOVE_SELF) == sys_IN_MOVE_SELF || (e.mask&sys_IN_MOVED_FROM) == sys_IN_MOVED_FROM)
}
// IsAttrib reports whether the FileEvent was triggered by a change in the file metadata.
func (e *FileEvent) IsAttrib() bool {
return (e.mask & sys_IN_ATTRIB) == sys_IN_ATTRIB
}
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
}
type Watcher struct {
mu sync.Mutex // Map access
fd int // File descriptor (as returned by the inotify_init() syscall)
watches map[string]*watch // Map of inotify watches (key: path)
fsnFlags map[string]uint32 // Map of watched files to flags used for filter
fsnmut sync.Mutex // Protects access to fsnFlags.
paths map[int]string // Map of watched paths (key: watch descriptor)
Error chan error // Errors are sent on this channel
internalEvent chan *FileEvent // Events are queued on this channel
Event chan *FileEvent // Events are returned on this channel
done chan bool // Channel for sending a "quit message" to the reader goroutine
isClosed bool // Set to true when Close() is first called
}
// NewWatcher creates and returns a new inotify instance using inotify_init(2)
func NewWatcher() (*Watcher, error) {
fd, errno := syscall.InotifyInit()
if fd == -1 {
return nil, os.NewSyscallError("inotify_init", errno)
}
w := &Watcher{
fd: fd,
watches: make(map[string]*watch),
fsnFlags: make(map[string]uint32),
paths: make(map[int]string),
internalEvent: make(chan *FileEvent),
Event: make(chan *FileEvent),
Error: make(chan error),
done: make(chan bool, 1),
}
go w.readEvents()
go w.purgeEvents()
return w, nil
}
// Close closes an inotify watcher instance
// It sends a message to the reader goroutine to quit and removes all watches
// associated with the inotify instance
func (w *Watcher) Close() error {
if w.isClosed {
return nil
}
w.isClosed = true
// Remove all watches
for path := range w.watches {
w.RemoveWatch(path)
}
// Send "quit" message to the reader goroutine
w.done <- true
return nil
}
// AddWatch adds path to the watched file set.
// The flags are interpreted as described in inotify_add_watch(2).
func (w *Watcher) addWatch(path string, flags uint32) error {
if w.isClosed {
return errors.New("inotify instance already closed")
}
w.mu.Lock()
watchEntry, found := w.watches[path]
w.mu.Unlock()
if found {
watchEntry.flags |= flags
flags |= syscall.IN_MASK_ADD
}
wd, errno := syscall.InotifyAddWatch(w.fd, path, flags)
if wd == -1 {
return errno
}
w.mu.Lock()
w.watches[path] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = path
w.mu.Unlock()
return nil
}
// Watch adds path to the watched file set, watching all events.
func (w *Watcher) watch(path string) error {
return w.addWatch(path, sys_AGNOSTIC_EVENTS)
}
// RemoveWatch removes path from the watched file set.
func (w *Watcher) removeWatch(path string) error {
w.mu.Lock()
defer w.mu.Unlock()
watch, ok := w.watches[path]
if !ok {
return errors.New(fmt.Sprintf("can't remove non-existent inotify watch for: %s", path))
}
success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
if success == -1 {
return os.NewSyscallError("inotify_rm_watch", errno)
}
delete(w.watches, path)
return nil
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Event channel
func (w *Watcher) readEvents() {
var (
buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
n int // Number of bytes read with read()
errno error // Syscall errno
)
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
syscall.Close(w.fd)
close(w.internalEvent)
close(w.Error)
return
default:
}
n, errno = syscall.Read(w.fd, buf[:])
// If EOF is received
if n == 0 {
syscall.Close(w.fd)
close(w.internalEvent)
close(w.Error)
return
}
if n < 0 {
w.Error <- os.NewSyscallError("read", errno)
continue
}
if n < syscall.SizeofInotifyEvent {
w.Error <- errors.New("inotify: short read in readEvents()")
continue
}
var offset uint32 = 0
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
for offset <= uint32(n-syscall.SizeofInotifyEvent) {
// Point "raw" to the event in the buffer
raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
event := new(FileEvent)
event.mask = uint32(raw.Mask)
event.cookie = uint32(raw.Cookie)
nameLen := uint32(raw.Len)
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
event.Name = w.paths[int(raw.Wd)]
w.mu.Unlock()
watchedName := event.Name
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
// The filename is padded with NUL bytes. TrimRight() gets rid of those.
event.Name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
// Send the events that are not ignored on the events channel
if !event.ignoreLinux() {
// Setup FSNotify flags (inherit from directory watch)
w.fsnmut.Lock()
if _, fsnFound := w.fsnFlags[event.Name]; !fsnFound {
if fsnFlags, watchFound := w.fsnFlags[watchedName]; watchFound {
w.fsnFlags[event.Name] = fsnFlags
} else {
w.fsnFlags[event.Name] = FSN_ALL
}
}
w.fsnmut.Unlock()
w.internalEvent <- event
}
// Move to the next event in the buffer
offset += syscall.SizeofInotifyEvent + nameLen
}
}
}
// Certain types of events can be "ignored" and not sent over the Event
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
func (e *FileEvent) ignoreLinux() bool {
// Ignore anything the inotify API says to ignore
if e.mask&sys_IN_IGNORED == sys_IN_IGNORED {
return true
}
// If the event is not a DELETE or RENAME, the file must exist.
// Otherwise the event is ignored.
// *Note*: this was put in place because it was seen that a MODIFY
// event was sent after the DELETE. This ignores that MODIFY and
// assumes a DELETE will come or has come if the file doesn't exist.
if !(e.IsDelete() || e.IsRename()) {
_, statErr := os.Lstat(e.Name)
return os.IsNotExist(statErr)
}
return false
}

11
vendor/github.com/howeyc/fsnotify/fsnotify_open_bsd.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly
package fsnotify
import "syscall"
const open_FLAGS = syscall.O_NONBLOCK | syscall.O_RDONLY

View File

@ -0,0 +1,11 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin
package fsnotify
import "syscall"
const open_FLAGS = syscall.O_EVTONLY

598
vendor/github.com/howeyc/fsnotify/fsnotify_windows.go generated vendored Normal file
View File

@ -0,0 +1,598 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"syscall"
"unsafe"
)
const (
// Options for AddWatch
sys_FS_ONESHOT = 0x80000000
sys_FS_ONLYDIR = 0x1000000
// Events
sys_FS_ACCESS = 0x1
sys_FS_ALL_EVENTS = 0xfff
sys_FS_ATTRIB = 0x4
sys_FS_CLOSE = 0x18
sys_FS_CREATE = 0x100
sys_FS_DELETE = 0x200
sys_FS_DELETE_SELF = 0x400
sys_FS_MODIFY = 0x2
sys_FS_MOVE = 0xc0
sys_FS_MOVED_FROM = 0x40
sys_FS_MOVED_TO = 0x80
sys_FS_MOVE_SELF = 0x800
// Special events
sys_FS_IGNORED = 0x8000
sys_FS_Q_OVERFLOW = 0x4000
)
const (
// TODO(nj): Use syscall.ERROR_MORE_DATA from ztypes_windows in Go 1.3+
sys_ERROR_MORE_DATA syscall.Errno = 234
)
// Event is the type of the notification messages
// received on the watcher's Event channel.
type FileEvent struct {
mask uint32 // Mask of events
cookie uint32 // Unique cookie associating related events (for rename)
Name string // File name (optional)
}
// IsCreate reports whether the FileEvent was triggered by a creation
func (e *FileEvent) IsCreate() bool { return (e.mask & sys_FS_CREATE) == sys_FS_CREATE }
// IsDelete reports whether the FileEvent was triggered by a delete
func (e *FileEvent) IsDelete() bool {
return ((e.mask&sys_FS_DELETE) == sys_FS_DELETE || (e.mask&sys_FS_DELETE_SELF) == sys_FS_DELETE_SELF)
}
// IsModify reports whether the FileEvent was triggered by a file modification or attribute change
func (e *FileEvent) IsModify() bool {
return ((e.mask&sys_FS_MODIFY) == sys_FS_MODIFY || (e.mask&sys_FS_ATTRIB) == sys_FS_ATTRIB)
}
// IsRename reports whether the FileEvent was triggered by a change name
func (e *FileEvent) IsRename() bool {
return ((e.mask&sys_FS_MOVE) == sys_FS_MOVE || (e.mask&sys_FS_MOVE_SELF) == sys_FS_MOVE_SELF || (e.mask&sys_FS_MOVED_FROM) == sys_FS_MOVED_FROM || (e.mask&sys_FS_MOVED_TO) == sys_FS_MOVED_TO)
}
// IsAttrib reports whether the FileEvent was triggered by a change in the file metadata.
func (e *FileEvent) IsAttrib() bool {
return (e.mask & sys_FS_ATTRIB) == sys_FS_ATTRIB
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
reply chan error
}
type inode struct {
handle syscall.Handle
volume uint32
index uint64
}
type watch struct {
ov syscall.Overlapped
ino *inode // i-number
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf [4096]byte
}
type indexMap map[uint64]*watch
type watchMap map[uint32]indexMap
// A Watcher waits for and receives event notifications
// for a specific set of files and directories.
type Watcher struct {
mu sync.Mutex // Map access
port syscall.Handle // Handle to completion port
watches watchMap // Map of watches (key: i-number)
fsnFlags map[string]uint32 // Map of watched files to flags used for filter
fsnmut sync.Mutex // Protects access to fsnFlags.
input chan *input // Inputs to the reader are sent on this channel
internalEvent chan *FileEvent // Events are queued on this channel
Event chan *FileEvent // Events are returned on this channel
Error chan error // Errors are sent on this channel
isClosed bool // Set to true when Close() is first called
quit chan chan<- error
cookie uint32
}
// NewWatcher creates and returns a Watcher.
func NewWatcher() (*Watcher, error) {
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
if e != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
}
w := &Watcher{
port: port,
watches: make(watchMap),
fsnFlags: make(map[string]uint32),
input: make(chan *input, 1),
Event: make(chan *FileEvent, 50),
internalEvent: make(chan *FileEvent),
Error: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
go w.purgeEvents()
return w, nil
}
// Close closes a Watcher.
// It sends a message to the reader goroutine to quit and removes all watches
// associated with the watcher.
func (w *Watcher) Close() error {
if w.isClosed {
return nil
}
w.isClosed = true
// Send "quit" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
// AddWatch adds path to the watched file set.
func (w *Watcher) AddWatch(path string, flags uint32) error {
if w.isClosed {
return errors.New("watcher already closed")
}
in := &input{
op: opAddWatch,
path: filepath.Clean(path),
flags: flags,
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
// Watch adds path to the watched file set, watching all events.
func (w *Watcher) watch(path string) error {
return w.AddWatch(path, sys_FS_ALL_EVENTS)
}
// RemoveWatch removes path from the watched file set.
func (w *Watcher) removeWatch(path string) error {
in := &input{
op: opRemoveWatch,
path: filepath.Clean(path),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
func (w *Watcher) wakeupReader() error {
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if e != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", e)
}
return nil
}
func getDir(pathname string) (dir string, err error) {
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
if e != nil {
return "", os.NewSyscallError("GetFileAttributes", e)
}
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func getIno(path string) (ino *inode, err error) {
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
syscall.FILE_LIST_DIRECTORY,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil, syscall.OPEN_EXISTING,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
if e != nil {
return nil, os.NewSyscallError("CreateFile", e)
}
var fi syscall.ByHandleFileInformation
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
syscall.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
if flags&sys_FS_ONLYDIR != 0 && pathname != dir {
return nil
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
syscall.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", e)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
syscall.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
if err = w.startRead(watchEntry); err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
if watch == nil {
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
}
if pathname == dir {
w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *Watcher) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *Watcher) startRead(watch *watch) error {
if e := syscall.CancelIo(watch.ino.handle); e != nil {
w.Error <- os.NewSyscallError("CancelIo", e)
w.deleteWatch(watch)
}
mask := toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= toWindowsFlags(m)
}
if mask == 0 {
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
w.Error <- os.NewSyscallError("CloseHandle", e)
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
if e != nil {
err := os.NewSyscallError("ReadDirectoryChanges", e)
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) {
if watch.mask&sys_FS_ONESHOT != 0 {
watch.mask = 0
}
}
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Event channel.
// Entry point to the I/O thread.
func (w *Watcher) readEvents() {
var (
n, key uint32
ov *syscall.Overlapped
)
runtime.LockOSThread()
for {
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
var err error
if e := syscall.CloseHandle(w.port); e != nil {
err = os.NewSyscallError("CloseHandle", e)
}
close(w.internalEvent)
close(w.Error)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags))
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch e {
case sys_ERROR_MORE_DATA:
if watch == nil {
w.Error <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case syscall.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case syscall.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.Error <- os.NewSyscallError("GetQueuedCompletionPort", e)
continue
case nil:
}
var offset uint32
for {
if n == 0 {
w.internalEvent <- &FileEvent{mask: sys_FS_Q_OVERFLOW}
w.Error <- errors.New("short read in readEvents()")
break
}
// Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
fullname := watch.path + "\\" + name
var mask uint64
switch raw.Action {
case syscall.FILE_ACTION_REMOVED:
mask = sys_FS_DELETE_SELF
case syscall.FILE_ACTION_MODIFIED:
mask = sys_FS_MODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sys_FS_MOVE_SELF
}
}
sendNameEvent := func() {
if w.sendEvent(fullname, watch.names[name]&mask) {
if watch.names[name]&sys_FS_ONESHOT != 0 {
delete(watch.names, name)
}
}
}
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
sendNameEvent()
}
if raw.Action == syscall.FILE_ACTION_REMOVED {
w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED)
delete(watch.names, name)
}
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
if watch.mask&sys_FS_ONESHOT != 0 {
watch.mask = 0
}
}
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
fullname = watch.path + "\\" + watch.rename
sendNameEvent()
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
w.Error <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
break
}
}
if err := w.startRead(watch); err != nil {
w.Error <- err
}
}
}
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
}
event := &FileEvent{mask: uint32(mask), Name: name}
if mask&sys_FS_MOVE != 0 {
if mask&sys_FS_MOVED_FROM != 0 {
w.cookie++
}
event.cookie = w.cookie
}
select {
case ch := <-w.quit:
w.quit <- ch
case w.Event <- event:
}
return true
}
func toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sys_FS_ACCESS != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
}
if mask&sys_FS_MODIFY != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&sys_FS_ATTRIB != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func toFSnotifyFlags(action uint32) uint64 {
switch action {
case syscall.FILE_ACTION_ADDED:
return sys_FS_CREATE
case syscall.FILE_ACTION_REMOVED:
return sys_FS_DELETE
case syscall.FILE_ACTION_MODIFIED:
return sys_FS_MODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
return sys_FS_MOVED_FROM
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
return sys_FS_MOVED_TO
}
return 0
}

View File

@ -15,7 +15,6 @@
package collector
import (
"fmt"
"sync"
"time"
@ -83,10 +82,12 @@ func NewNodeCollector(filters ...string) (*nodeCollector, error) {
for _, filter := range filters {
enabled, exist := collectorState[filter]
if !exist {
return nil, fmt.Errorf("missing collector: %s", filter)
log.Warnf("missing collector: %s", filter)
continue
}
if !*enabled {
return nil, fmt.Errorf("disabled collector: %s", filter)
log.Warnf("disabled collector: %s", filter)
continue
}
f[filter] = true
}

View File

@ -1,25 +0,0 @@
BSON library for Go
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

738
vendor/gopkg.in.o/mgo.v2/bson/bson.go generated vendored
View File

@ -1,738 +0,0 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package bson is an implementation of the BSON specification for Go:
//
// http://bsonspec.org
//
// It was created as part of the mgo MongoDB driver for Go, but is standalone
// and may be used on its own without the driver.
package bson
import (
"bytes"
"crypto/md5"
"crypto/rand"
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
)
// --------------------------------------------------------------------------
// The public API.
// A value implementing the bson.Getter interface will have its GetBSON
// method called when the given value has to be marshalled, and the result
// of this method will be marshaled in place of the actual object.
//
// If GetBSON returns return a non-nil error, the marshalling procedure
// will stop and error out with the provided value.
type Getter interface {
GetBSON() (interface{}, error)
}
// A value implementing the bson.Setter interface will receive the BSON
// value via the SetBSON method during unmarshaling, and the object
// itself will not be changed as usual.
//
// If setting the value works, the method should return nil or alternatively
// bson.SetZero to set the respective field to its zero value (nil for
// pointer types). If SetBSON returns a value of type bson.TypeError, the
// BSON value will be omitted from a map or slice being decoded and the
// unmarshalling will continue. If it returns any other non-nil error, the
// unmarshalling procedure will stop and error out with the provided value.
//
// This interface is generally useful in pointer receivers, since the method
// will want to change the receiver. A type field that implements the Setter
// interface doesn't have to be a pointer, though.
//
// Unlike the usual behavior, unmarshalling onto a value that implements a
// Setter interface will NOT reset the value to its zero state. This allows
// the value to decide by itself how to be unmarshalled.
//
// For example:
//
// type MyString string
//
// func (s *MyString) SetBSON(raw bson.Raw) error {
// return raw.Unmarshal(s)
// }
//
type Setter interface {
SetBSON(raw Raw) error
}
// SetZero may be returned from a SetBSON method to have the value set to
// its respective zero value. When used in pointer values, this will set the
// field to nil rather than to the pre-allocated value.
var SetZero = errors.New("set to zero")
// M is a convenient alias for a map[string]interface{} map, useful for
// dealing with BSON in a native way. For instance:
//
// bson.M{"a": 1, "b": true}
//
// There's no special handling for this type in addition to what's done anyway
// for an equivalent map type. Elements in the map will be dumped in an
// undefined ordered. See also the bson.D type for an ordered alternative.
type M map[string]interface{}
// D represents a BSON document containing ordered elements. For example:
//
// bson.D{{"a", 1}, {"b", true}}
//
// In some situations, such as when creating indexes for MongoDB, the order in
// which the elements are defined is important. If the order is not important,
// using a map is generally more comfortable. See bson.M and bson.RawD.
type D []DocElem
// DocElem is an element of the bson.D document representation.
type DocElem struct {
Name string
Value interface{}
}
// Map returns a map out of the ordered element name/value pairs in d.
func (d D) Map() (m M) {
m = make(M, len(d))
for _, item := range d {
m[item.Name] = item.Value
}
return m
}
// The Raw type represents raw unprocessed BSON documents and elements.
// Kind is the kind of element as defined per the BSON specification, and
// Data is the raw unprocessed data for the respective element.
// Using this type it is possible to unmarshal or marshal values partially.
//
// Relevant documentation:
//
// http://bsonspec.org/#/specification
//
type Raw struct {
Kind byte
Data []byte
}
// RawD represents a BSON document containing raw unprocessed elements.
// This low-level representation may be useful when lazily processing
// documents of uncertain content, or when manipulating the raw content
// documents in general.
type RawD []RawDocElem
// See the RawD type.
type RawDocElem struct {
Name string
Value Raw
}
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
// long. MongoDB objects by default have such a property set in their "_id"
// property.
//
// http://www.mongodb.org/display/DOCS/Object+IDs
type ObjectId string
// ObjectIdHex returns an ObjectId from the provided hex representation.
// Calling this function with an invalid hex representation will
// cause a runtime panic. See the IsObjectIdHex function.
func ObjectIdHex(s string) ObjectId {
d, err := hex.DecodeString(s)
if err != nil || len(d) != 12 {
panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
}
return ObjectId(d)
}
// IsObjectIdHex returns whether s is a valid hex representation of
// an ObjectId. See the ObjectIdHex function.
func IsObjectIdHex(s string) bool {
if len(s) != 24 {
return false
}
_, err := hex.DecodeString(s)
return err == nil
}
// objectIdCounter is atomically incremented when generating a new ObjectId
// using NewObjectId() function. It's used as a counter part of an id.
var objectIdCounter uint32 = readRandomUint32()
// readRandomUint32 returns a random objectIdCounter.
func readRandomUint32() uint32 {
var b [4]byte
_, err := io.ReadFull(rand.Reader, b[:])
if err != nil {
panic(fmt.Errorf("cannot read random object id: %v", err))
}
return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
}
// machineId stores machine id generated once and used in subsequent calls
// to NewObjectId function.
var machineId = readMachineId()
var processId = os.Getpid()
// readMachineId generates and returns a machine id.
// If this function fails to get the hostname it will cause a runtime error.
func readMachineId() []byte {
var sum [3]byte
id := sum[:]
hostname, err1 := os.Hostname()
if err1 != nil {
_, err2 := io.ReadFull(rand.Reader, id)
if err2 != nil {
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
}
return id
}
hw := md5.New()
hw.Write([]byte(hostname))
copy(id, hw.Sum(nil))
return id
}
// NewObjectId returns a new unique ObjectId.
func NewObjectId() ObjectId {
var b [12]byte
// Timestamp, 4 bytes, big endian
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
// Machine, first 3 bytes of md5(hostname)
b[4] = machineId[0]
b[5] = machineId[1]
b[6] = machineId[2]
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
b[7] = byte(processId >> 8)
b[8] = byte(processId)
// Increment, 3 bytes, big endian
i := atomic.AddUint32(&objectIdCounter, 1)
b[9] = byte(i >> 16)
b[10] = byte(i >> 8)
b[11] = byte(i)
return ObjectId(b[:])
}
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
// with the provided number of seconds from epoch UTC, and all other parts
// filled with zeroes. It's not safe to insert a document with an id generated
// by this method, it is useful only for queries to find documents with ids
// generated before or after the specified timestamp.
func NewObjectIdWithTime(t time.Time) ObjectId {
var b [12]byte
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
return ObjectId(string(b[:]))
}
// String returns a hex string representation of the id.
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
func (id ObjectId) String() string {
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
}
// Hex returns a hex representation of the ObjectId.
func (id ObjectId) Hex() string {
return hex.EncodeToString([]byte(id))
}
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
func (id ObjectId) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
}
var nullBytes = []byte("null")
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
func (id *ObjectId) UnmarshalJSON(data []byte) error {
if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
var v struct {
Id json.RawMessage `json:"$oid"`
Func struct {
Id json.RawMessage
} `json:"$oidFunc"`
}
err := jdec(data, &v)
if err == nil {
if len(v.Id) > 0 {
data = []byte(v.Id)
} else {
data = []byte(v.Func.Id)
}
}
}
if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
*id = ""
return nil
}
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
}
var buf [12]byte
_, err := hex.Decode(buf[:], data[1:25])
if err != nil {
return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
}
*id = ObjectId(string(buf[:]))
return nil
}
// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
func (id ObjectId) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("%x", string(id))), nil
}
// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
func (id *ObjectId) UnmarshalText(data []byte) error {
if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
*id = ""
return nil
}
if len(data) != 24 {
return fmt.Errorf("invalid ObjectId: %s", data)
}
var buf [12]byte
_, err := hex.Decode(buf[:], data[:])
if err != nil {
return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
}
*id = ObjectId(string(buf[:]))
return nil
}
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
func (id ObjectId) Valid() bool {
return len(id) == 12
}
// byteSlice returns byte slice of id from start to end.
// Calling this function with an invalid id will cause a runtime panic.
func (id ObjectId) byteSlice(start, end int) []byte {
if len(id) != 12 {
panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
}
return []byte(string(id)[start:end])
}
// Time returns the timestamp part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Time() time.Time {
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
return time.Unix(secs, 0)
}
// Machine returns the 3-byte machine id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Machine() []byte {
return id.byteSlice(4, 7)
}
// Pid returns the process id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Pid() uint16 {
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
}
// Counter returns the incrementing value part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Counter() int32 {
b := id.byteSlice(9, 12)
// Counter is stored as big-endian 3-byte value
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
}
// The Symbol type is similar to a string and is used in languages with a
// distinct symbol type.
type Symbol string
// Now returns the current time with millisecond precision. MongoDB stores
// timestamps with the same precision, so a Time returned from this method
// will not change after a roundtrip to the database. That's the only reason
// why this function exists. Using the time.Now function also works fine
// otherwise.
func Now() time.Time {
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
}
// MongoTimestamp is a special internal type used by MongoDB that for some
// strange reason has its own datatype defined in BSON.
type MongoTimestamp int64
type orderKey int64
// MaxKey is a special value that compares higher than all other possible BSON
// values in a MongoDB database.
var MaxKey = orderKey(1<<63 - 1)
// MinKey is a special value that compares lower than all other possible BSON
// values in a MongoDB database.
var MinKey = orderKey(-1 << 63)
type undefined struct{}
// Undefined represents the undefined BSON value.
var Undefined undefined
// Binary is a representation for non-standard binary values. Any kind should
// work, but the following are known as of this writing:
//
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
// 0x01 - Function (!?)
// 0x02 - Obsolete generic.
// 0x03 - UUID
// 0x05 - MD5
// 0x80 - User defined.
//
type Binary struct {
Kind byte
Data []byte
}
// RegEx represents a regular expression. The Options field may contain
// individual characters defining the way in which the pattern should be
// applied, and must be sorted. Valid options as of this writing are 'i' for
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
// unicode. The value of the Options parameter is not verified before being
// marshaled into the BSON format.
type RegEx struct {
Pattern string
Options string
}
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
// will be marshaled as a mapping from identifiers to values that may be
// used when evaluating the provided Code.
type JavaScript struct {
Code string
Scope interface{}
}
// DBPointer refers to a document id in a namespace.
//
// This type is deprecated in the BSON specification and should not be used
// except for backwards compatibility with ancient applications.
type DBPointer struct {
Namespace string
Id ObjectId
}
const initialBufferSize = 64
func handleErr(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
} else if _, ok := r.(externalPanic); ok {
panic(r)
} else if s, ok := r.(string); ok {
*err = errors.New(s)
} else if e, ok := r.(error); ok {
*err = e
} else {
panic(r)
}
}
}
// Marshal serializes the in value, which may be a map or a struct value.
// In the case of struct values, only exported fields will be serialized,
// and the order of serialized fields will match that of the struct itself.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
//
// minsize Marshal an int64 value as an int32, if that's feasible
// while preserving the numeric value.
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the bson keys of other struct fields.
//
// Some examples:
//
// type T struct {
// A bool
// B int "myb"
// C string "myc,omitempty"
// D string `bson:",omitempty" json:"jsonkey"`
// E int64 ",minsize"
// F int64 "myf,omitempty,minsize"
// }
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := &encoder{make([]byte, 0, initialBufferSize)}
e.addDoc(reflect.ValueOf(in))
return e.out, nil
}
// Unmarshal deserializes data from in into the out value. The out value
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
// In the case of struct values, only exported fields will be deserialized.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported during unmarshal (see the
// Marshal method for other flags):
//
// inline Inline the field, which must be a struct or a map.
// Inlined structs are handled as if its fields were part
// of the outer struct. An inlined map causes keys that do
// not match any other struct field to be inserted in the
// map rather than being discarded as usual.
//
// The target field or element types of out may not necessarily match
// the BSON values of the provided data. The following conversions are
// made automatically:
//
// - Numeric types are converted if at least the integer part of the
// value would be preserved correctly
// - Bools are converted to numeric types as 1 or 0
// - Numeric types are converted to bools as true if not 0 or false otherwise
// - Binary and string BSON data is converted to a string, array or byte slice
//
// If the value would not fit the type and cannot be converted, it's
// silently skipped.
//
// Pointer values are initialized when necessary.
func Unmarshal(in []byte, out interface{}) (err error) {
if raw, ok := out.(*Raw); ok {
raw.Kind = 3
raw.Data = in
return nil
}
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Ptr:
fallthrough
case reflect.Map:
d := newDecoder(in)
d.readDocTo(v)
case reflect.Struct:
return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Unmarshal needs a map or a pointer to a struct.")
}
return nil
}
// Unmarshal deserializes raw into the out value. If the out value type
// is not compatible with raw, a *bson.TypeError is returned.
//
// See the Unmarshal function documentation for more details on the
// unmarshalling process.
func (raw Raw) Unmarshal(out interface{}) (err error) {
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Ptr:
v = v.Elem()
fallthrough
case reflect.Map:
d := newDecoder(raw.Data)
good := d.readElemTo(v, raw.Kind)
if !good {
return &TypeError{v.Type(), raw.Kind}
}
case reflect.Struct:
return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Raw Unmarshal needs a map or a valid pointer.")
}
return nil
}
type TypeError struct {
Type reflect.Type
Kind byte
}
func (e *TypeError) Error() string {
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
InlineMap int
Zero reflect.Value
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
MinSize bool
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var structMapMutex sync.RWMutex
type externalPanic string
func (e externalPanic) String() string {
return string(e)
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
structMapMutex.RLock()
sinfo, found := structMap[st]
structMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("bson")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "minsize":
info.MinSize = true
case "inline":
inline = true
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
panic(externalPanic(msg))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
panic("Option ,inline needs a struct value or map field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{
fieldsMap,
fieldsList,
inlineMap,
reflect.New(st).Elem(),
}
structMapMutex.Lock()
structMap[st] = sinfo
structMapMutex.Unlock()
return sinfo, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,310 +0,0 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package bson
import (
"fmt"
"strconv"
"strings"
)
// Decimal128 holds decimal128 BSON values.
type Decimal128 struct {
h, l uint64
}
func (d Decimal128) String() string {
var pos int // positive sign
var e int // exponent
var h, l uint64 // significand high/low
if d.h>>63&1 == 0 {
pos = 1
}
switch d.h >> 58 & (1<<5 - 1) {
case 0x1F:
return "NaN"
case 0x1E:
return "-Inf"[pos:]
}
l = d.l
if d.h>>61&3 == 3 {
// Bits: 1*sign 2*ignored 14*exponent 111*significand.
// Implicit 0b100 prefix in significand.
e = int(d.h>>47&(1<<14-1)) - 6176
//h = 4<<47 | d.h&(1<<47-1)
// Spec says all of these values are out of range.
h, l = 0, 0
} else {
// Bits: 1*sign 14*exponent 113*significand
e = int(d.h>>49&(1<<14-1)) - 6176
h = d.h & (1<<49 - 1)
}
// Would be handled by the logic below, but that's trivial and common.
if h == 0 && l == 0 && e == 0 {
return "-0"[pos:]
}
var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
var last = len(repr)
var i = len(repr)
var dot = len(repr) + e
var rem uint32
Loop:
for d9 := 0; d9 < 5; d9++ {
h, l, rem = divmod(h, l, 1e9)
for d1 := 0; d1 < 9; d1++ {
// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
e += len(repr) - i
i--
repr[i] = '.'
last = i - 1
dot = len(repr) // Unmark.
}
c := '0' + byte(rem%10)
rem /= 10
i--
repr[i] = c
// Handle "0E+3", "1E+3", etc.
if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
last = i
break Loop
}
if c != '0' {
last = i
}
// Break early. Works without it, but why.
if dot > i && l == 0 && h == 0 && rem == 0 {
break Loop
}
}
}
repr[last-1] = '-'
last--
if e > 0 {
return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
}
if e < 0 {
return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
}
return string(repr[last+pos:])
}
func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
div64 := uint64(div)
a := h >> 32
aq := a / div64
ar := a % div64
b := ar<<32 + h&(1<<32-1)
bq := b / div64
br := b % div64
c := br<<32 + l>>32
cq := c / div64
cr := c % div64
d := cr<<32 + l&(1<<32-1)
dq := d / div64
dr := d % div64
return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
}
var dNaN = Decimal128{0x1F << 58, 0}
var dPosInf = Decimal128{0x1E << 58, 0}
var dNegInf = Decimal128{0x3E << 58, 0}
func dErr(s string) (Decimal128, error) {
return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
}
func ParseDecimal128(s string) (Decimal128, error) {
orig := s
if s == "" {
return dErr(orig)
}
neg := s[0] == '-'
if neg || s[0] == '+' {
s = s[1:]
}
if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
return dNaN, nil
}
if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
if neg {
return dNegInf, nil
}
return dPosInf, nil
}
return dErr(orig)
}
var h, l uint64
var e int
var add, ovr uint32
var mul uint32 = 1
var dot = -1
var digits = 0
var i = 0
for i < len(s) {
c := s[i]
if mul == 1e9 {
h, l, ovr = muladd(h, l, mul, add)
mul, add = 1, 0
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
return dErr(orig)
}
}
if c >= '0' && c <= '9' {
i++
if c > '0' || digits > 0 {
digits++
}
if digits > 34 {
if c == '0' {
// Exact rounding.
e++
continue
}
return dErr(orig)
}
mul *= 10
add *= 10
add += uint32(c - '0')
continue
}
if c == '.' {
i++
if dot >= 0 || i == 1 && len(s) == 1 {
return dErr(orig)
}
if i == len(s) {
break
}
if s[i] < '0' || s[i] > '9' || e > 0 {
return dErr(orig)
}
dot = i
continue
}
break
}
if i == 0 {
return dErr(orig)
}
if mul > 1 {
h, l, ovr = muladd(h, l, mul, add)
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
return dErr(orig)
}
}
if dot >= 0 {
e += dot - i
}
if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
i++
eneg := s[i] == '-'
if eneg || s[i] == '+' {
i++
if i == len(s) {
return dErr(orig)
}
}
n := 0
for i < len(s) && n < 1e4 {
c := s[i]
i++
if c < '0' || c > '9' {
return dErr(orig)
}
n *= 10
n += int(c - '0')
}
if eneg {
n = -n
}
e += n
for e < -6176 {
// Subnormal.
var div uint32 = 1
for div < 1e9 && e < -6176 {
div *= 10
e++
}
var rem uint32
h, l, rem = divmod(h, l, div)
if rem > 0 {
return dErr(orig)
}
}
for e > 6111 {
// Clamped.
var mul uint32 = 1
for mul < 1e9 && e > 6111 {
mul *= 10
e--
}
h, l, ovr = muladd(h, l, mul, 0)
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
return dErr(orig)
}
}
if e < -6176 || e > 6111 {
return dErr(orig)
}
}
if i < len(s) {
return dErr(orig)
}
h |= uint64(e+6176) & uint64(1<<14-1) << 49
if neg {
h |= 1 << 63
}
return Decimal128{h, l}, nil
}
func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
mul64 := uint64(mul)
a := mul64 * (l & (1<<32 - 1))
b := a>>32 + mul64*(l>>32)
c := b>>32 + mul64*(h&(1<<32-1))
d := c>>32 + mul64*(h>>32)
a = a&(1<<32-1) + uint64(add)
b = b&(1<<32-1) + a>>32
c = c&(1<<32-1) + b>>32
d = d&(1<<32-1) + c>>32
return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,849 +0,0 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"fmt"
"math"
"net/url"
"reflect"
"strconv"
"sync"
"time"
)
type decoder struct {
in []byte
i int
docType reflect.Type
}
var typeM = reflect.TypeOf(M{})
func newDecoder(in []byte) *decoder {
return &decoder{in, 0, typeM}
}
// --------------------------------------------------------------------------
// Some helper functions.
func corrupted() {
panic("Document is corrupted")
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
// --------------------------------------------------------------------------
// Unmarshaling of documents.
const (
setterUnknown = iota
setterNone
setterType
setterAddr
)
var setterStyles map[reflect.Type]int
var setterIface reflect.Type
var setterMutex sync.RWMutex
func init() {
var iface Setter
setterIface = reflect.TypeOf(&iface).Elem()
setterStyles = make(map[reflect.Type]int)
}
func setterStyle(outt reflect.Type) int {
setterMutex.RLock()
style := setterStyles[outt]
setterMutex.RUnlock()
if style == setterUnknown {
setterMutex.Lock()
defer setterMutex.Unlock()
if outt.Implements(setterIface) {
setterStyles[outt] = setterType
} else if reflect.PtrTo(outt).Implements(setterIface) {
setterStyles[outt] = setterAddr
} else {
setterStyles[outt] = setterNone
}
style = setterStyles[outt]
}
return style
}
func getSetter(outt reflect.Type, out reflect.Value) Setter {
style := setterStyle(outt)
if style == setterNone {
return nil
}
if style == setterAddr {
if !out.CanAddr() {
return nil
}
out = out.Addr()
} else if outt.Kind() == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
return out.Interface().(Setter)
}
func clearMap(m reflect.Value) {
var none reflect.Value
for _, k := range m.MapKeys() {
m.SetMapIndex(k, none)
}
}
func (d *decoder) readDocTo(out reflect.Value) {
var elemType reflect.Type
outt := out.Type()
outk := outt.Kind()
for {
if outk == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
if setter := getSetter(outt, out); setter != nil {
var raw Raw
d.readDocTo(reflect.ValueOf(&raw))
err := setter.SetBSON(raw)
if _, ok := err.(*TypeError); err != nil && !ok {
panic(err)
}
return
}
if outk == reflect.Ptr {
out = out.Elem()
outt = out.Type()
outk = out.Kind()
continue
}
break
}
var fieldsMap map[string]fieldInfo
var inlineMap reflect.Value
start := d.i
origout := out
if outk == reflect.Interface {
if d.docType.Kind() == reflect.Map {
mv := reflect.MakeMap(d.docType)
out.Set(mv)
out = mv
} else {
dv := reflect.New(d.docType).Elem()
out.Set(dv)
out = dv
}
outt = out.Type()
outk = outt.Kind()
}
docType := d.docType
keyType := typeString
convertKey := false
switch outk {
case reflect.Map:
keyType = outt.Key()
if keyType.Kind() != reflect.String {
panic("BSON map must have string keys. Got: " + outt.String())
}
if keyType != typeString {
convertKey = true
}
elemType = outt.Elem()
if elemType == typeIface {
d.docType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(out.Type()))
} else if out.Len() > 0 {
clearMap(out)
}
case reflect.Struct:
if outt != typeRaw {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
fieldsMap = sinfo.FieldsMap
out.Set(sinfo.Zero)
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
if !inlineMap.IsNil() && inlineMap.Len() > 0 {
clearMap(inlineMap)
}
elemType = inlineMap.Type().Elem()
if elemType == typeIface {
d.docType = inlineMap.Type()
}
}
}
case reflect.Slice:
switch outt.Elem() {
case typeDocElem:
origout.Set(d.readDocElems(outt))
return
case typeRawDocElem:
origout.Set(d.readRawDocElems(outt))
return
}
fallthrough
default:
panic("Unsupported document type for unmarshalling: " + out.Type().String())
}
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
switch outk {
case reflect.Map:
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
k := reflect.ValueOf(name)
if convertKey {
k = k.Convert(keyType)
}
out.SetMapIndex(k, e)
}
case reflect.Struct:
if outt == typeRaw {
d.dropElem(kind)
} else {
if info, ok := fieldsMap[name]; ok {
if info.Inline == nil {
d.readElemTo(out.Field(info.Num), kind)
} else {
d.readElemTo(out.FieldByIndex(info.Inline), kind)
}
} else if inlineMap.IsValid() {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
inlineMap.SetMapIndex(reflect.ValueOf(name), e)
}
} else {
d.dropElem(kind)
}
}
case reflect.Slice:
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
d.docType = docType
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
}
}
func (d *decoder) readArrayDocTo(out reflect.Value) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
i := 0
l := out.Len()
for d.in[d.i] != '\x00' {
if i >= l {
panic("Length mismatch on array field")
}
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
d.readElemTo(out.Index(i), kind)
if d.i >= end {
corrupted()
}
i++
}
if i != l {
panic("Length mismatch on array field")
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
tmp := make([]reflect.Value, 0, 8)
elemType := t.Elem()
if elemType == typeRawDocElem {
d.dropElem(0x04)
return reflect.Zero(t).Interface()
}
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
tmp = append(tmp, e)
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
n := len(tmp)
slice := reflect.MakeSlice(t, n, n)
for i := 0; i != n; i++ {
slice.Index(i).Set(tmp[i])
}
return slice.Interface()
}
var typeSlice = reflect.TypeOf([]interface{}{})
var typeIface = typeSlice.Elem()
func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]DocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := DocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]RawDocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := RawDocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readDocWith(f func(kind byte, name string)) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
f(kind, name)
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
// --------------------------------------------------------------------------
// Unmarshaling of individual elements within a document.
var blackHole = settableValueOf(struct{}{})
func (d *decoder) dropElem(kind byte) {
d.readElemTo(blackHole, kind)
}
// Attempt to decode an element from the document and put it into out.
// If the types are not compatible, the returned ok value will be
// false and out will be unchanged.
func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
start := d.i
if kind == 0x03 {
// Delegate unmarshaling of documents.
outt := out.Type()
outk := out.Kind()
switch outk {
case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
d.readDocTo(out)
return true
}
if setterStyle(outt) != setterNone {
d.readDocTo(out)
return true
}
if outk == reflect.Slice {
switch outt.Elem() {
case typeDocElem:
out.Set(d.readDocElems(outt))
case typeRawDocElem:
out.Set(d.readRawDocElems(outt))
default:
d.readDocTo(blackHole)
}
return true
}
d.readDocTo(blackHole)
return true
}
var in interface{}
switch kind {
case 0x01: // Float64
in = d.readFloat64()
case 0x02: // UTF-8 string
in = d.readStr()
case 0x03: // Document
panic("Can't happen. Handled above.")
case 0x04: // Array
outt := out.Type()
if setterStyle(outt) != setterNone {
// Skip the value so its data is handed to the setter below.
d.dropElem(kind)
break
}
for outt.Kind() == reflect.Ptr {
outt = outt.Elem()
}
switch outt.Kind() {
case reflect.Array:
d.readArrayDocTo(out)
return true
case reflect.Slice:
in = d.readSliceDoc(outt)
default:
in = d.readSliceDoc(typeSlice)
}
case 0x05: // Binary
b := d.readBinary()
if b.Kind == 0x00 || b.Kind == 0x02 {
in = b.Data
} else {
in = b
}
case 0x06: // Undefined (obsolete, but still seen in the wild)
in = Undefined
case 0x07: // ObjectId
in = ObjectId(d.readBytes(12))
case 0x08: // Bool
in = d.readBool()
case 0x09: // Timestamp
// MongoDB handles timestamps as milliseconds.
i := d.readInt64()
if i == -62135596800000 {
in = time.Time{} // In UTC for convenience.
} else {
in = time.Unix(i/1e3, i%1e3*1e6)
}
case 0x0A: // Nil
in = nil
case 0x0B: // RegEx
in = d.readRegEx()
case 0x0C:
in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
case 0x0D: // JavaScript without scope
in = JavaScript{Code: d.readStr()}
case 0x0E: // Symbol
in = Symbol(d.readStr())
case 0x0F: // JavaScript with scope
d.i += 4 // Skip length
js := JavaScript{d.readStr(), make(M)}
d.readDocTo(reflect.ValueOf(js.Scope))
in = js
case 0x10: // Int32
in = int(d.readInt32())
case 0x11: // Mongo-specific timestamp
in = MongoTimestamp(d.readInt64())
case 0x12: // Int64
in = d.readInt64()
case 0x13: // Decimal128
in = Decimal128{
l: uint64(d.readInt64()),
h: uint64(d.readInt64()),
}
case 0x7F: // Max key
in = MaxKey
case 0xFF: // Min key
in = MinKey
default:
panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
}
outt := out.Type()
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
return true
}
if setter := getSetter(outt, out); setter != nil {
err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
if err == SetZero {
out.Set(reflect.Zero(outt))
return true
}
if err == nil {
return true
}
if _, ok := err.(*TypeError); !ok {
panic(err)
}
return false
}
if in == nil {
out.Set(reflect.Zero(outt))
return true
}
outk := outt.Kind()
// Dereference and initialize pointer if necessary.
first := true
for outk == reflect.Ptr {
if !out.IsNil() {
out = out.Elem()
} else {
elem := reflect.New(outt.Elem())
if first {
// Only set if value is compatible.
first = false
defer func(out, elem reflect.Value) {
if good {
out.Set(elem)
}
}(out, elem)
} else {
out.Set(elem)
}
out = elem
}
outt = out.Type()
outk = outt.Kind()
}
inv := reflect.ValueOf(in)
if outt == inv.Type() {
out.Set(inv)
return true
}
switch outk {
case reflect.Interface:
out.Set(inv)
return true
case reflect.String:
switch inv.Kind() {
case reflect.String:
out.SetString(inv.String())
return true
case reflect.Slice:
if b, ok := in.([]byte); ok {
out.SetString(string(b))
return true
}
case reflect.Int, reflect.Int64:
if outt == typeJSONNumber {
out.SetString(strconv.FormatInt(inv.Int(), 10))
return true
}
case reflect.Float64:
if outt == typeJSONNumber {
out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
return true
}
}
case reflect.Slice, reflect.Array:
// Remember, array (0x04) slices are built with the correct
// element type. If we are here, must be a cross BSON kind
// conversion (e.g. 0x05 unmarshalling on string).
if outt.Elem().Kind() != reflect.Uint8 {
break
}
switch inv.Kind() {
case reflect.String:
slice := []byte(inv.String())
out.Set(reflect.ValueOf(slice))
return true
case reflect.Slice:
switch outt.Kind() {
case reflect.Array:
reflect.Copy(out, inv)
case reflect.Slice:
out.SetBytes(inv.Bytes())
}
return true
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetInt(inv.Int())
return true
case reflect.Float32, reflect.Float64:
out.SetInt(int64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetInt(1)
} else {
out.SetInt(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("can't happen: no uint types in BSON (!?)")
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetUint(uint64(inv.Int()))
return true
case reflect.Float32, reflect.Float64:
out.SetUint(uint64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetUint(1)
} else {
out.SetUint(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON.")
}
case reflect.Float32, reflect.Float64:
switch inv.Kind() {
case reflect.Float32, reflect.Float64:
out.SetFloat(inv.Float())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetFloat(float64(inv.Int()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetFloat(1)
} else {
out.SetFloat(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Bool:
switch inv.Kind() {
case reflect.Bool:
out.SetBool(inv.Bool())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetBool(inv.Int() != 0)
return true
case reflect.Float32, reflect.Float64:
out.SetBool(inv.Float() != 0)
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Struct:
if outt == typeURL && inv.Kind() == reflect.String {
u, err := url.Parse(inv.String())
if err != nil {
panic(err)
}
out.Set(reflect.ValueOf(u).Elem())
return true
}
if outt == typeBinary {
if b, ok := in.([]byte); ok {
out.Set(reflect.ValueOf(Binary{Data: b}))
return true
}
}
}
return false
}
// --------------------------------------------------------------------------
// Parsers of basic types.
func (d *decoder) readRegEx() RegEx {
re := RegEx{}
re.Pattern = d.readCStr()
re.Options = d.readCStr()
return re
}
func (d *decoder) readBinary() Binary {
l := d.readInt32()
b := Binary{}
b.Kind = d.readByte()
b.Data = d.readBytes(l)
if b.Kind == 0x02 && len(b.Data) >= 4 {
// Weird obsolete format with redundant length.
b.Data = b.Data[4:]
}
return b
}
func (d *decoder) readStr() string {
l := d.readInt32()
b := d.readBytes(l - 1)
if d.readByte() != '\x00' {
corrupted()
}
return string(b)
}
func (d *decoder) readCStr() string {
start := d.i
end := start
l := len(d.in)
for ; end != l; end++ {
if d.in[end] == '\x00' {
break
}
}
d.i = end + 1
if d.i > l {
corrupted()
}
return string(d.in[start:end])
}
func (d *decoder) readBool() bool {
b := d.readByte()
if b == 0 {
return false
}
if b == 1 {
return true
}
panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
}
func (d *decoder) readFloat64() float64 {
return math.Float64frombits(uint64(d.readInt64()))
}
func (d *decoder) readInt32() int32 {
b := d.readBytes(4)
return int32((uint32(b[0]) << 0) |
(uint32(b[1]) << 8) |
(uint32(b[2]) << 16) |
(uint32(b[3]) << 24))
}
func (d *decoder) readInt64() int64 {
b := d.readBytes(8)
return int64((uint64(b[0]) << 0) |
(uint64(b[1]) << 8) |
(uint64(b[2]) << 16) |
(uint64(b[3]) << 24) |
(uint64(b[4]) << 32) |
(uint64(b[5]) << 40) |
(uint64(b[6]) << 48) |
(uint64(b[7]) << 56))
}
func (d *decoder) readByte() byte {
i := d.i
d.i++
if d.i > len(d.in) {
corrupted()
}
return d.in[i]
}
func (d *decoder) readBytes(length int32) []byte {
if length < 0 {
corrupted()
}
start := d.i
d.i += int(length)
if d.i < start || d.i > len(d.in) {
corrupted()
}
return d.in[start : start+int(length)]
}

View File

@ -1,514 +0,0 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"encoding/json"
"fmt"
"math"
"net/url"
"reflect"
"strconv"
"time"
)
// --------------------------------------------------------------------------
// Some internal infrastructure.
var (
typeBinary = reflect.TypeOf(Binary{})
typeObjectId = reflect.TypeOf(ObjectId(""))
typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
typeSymbol = reflect.TypeOf(Symbol(""))
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
typeOrderKey = reflect.TypeOf(MinKey)
typeDocElem = reflect.TypeOf(DocElem{})
typeRawDocElem = reflect.TypeOf(RawDocElem{})
typeRaw = reflect.TypeOf(Raw{})
typeURL = reflect.TypeOf(url.URL{})
typeTime = reflect.TypeOf(time.Time{})
typeString = reflect.TypeOf("")
typeJSONNumber = reflect.TypeOf(json.Number(""))
)
const itoaCacheSize = 32
var itoaCache []string
func init() {
itoaCache = make([]string, itoaCacheSize)
for i := 0; i != itoaCacheSize; i++ {
itoaCache[i] = strconv.Itoa(i)
}
}
func itoa(i int) string {
if i < itoaCacheSize {
return itoaCache[i]
}
return strconv.Itoa(i)
}
// --------------------------------------------------------------------------
// Marshaling of the document value itself.
type encoder struct {
out []byte
}
func (e *encoder) addDoc(v reflect.Value) {
for {
if vi, ok := v.Interface().(Getter); ok {
getv, err := vi.GetBSON()
if err != nil {
panic(err)
}
v = reflect.ValueOf(getv)
continue
}
if v.Kind() == reflect.Ptr {
v = v.Elem()
continue
}
break
}
if v.Type() == typeRaw {
raw := v.Interface().(Raw)
if raw.Kind != 0x03 && raw.Kind != 0x00 {
panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
}
if len(raw.Data) == 0 {
panic("Attempted to marshal empty Raw document")
}
e.addBytes(raw.Data...)
return
}
start := e.reserveInt32()
switch v.Kind() {
case reflect.Map:
e.addMap(v)
case reflect.Struct:
e.addStruct(v)
case reflect.Array, reflect.Slice:
e.addSlice(v)
default:
panic("Can't marshal " + v.Type().String() + " as a BSON document")
}
e.addBytes(0)
e.setInt32(start, int32(len(e.out)-start))
}
func (e *encoder) addMap(v reflect.Value) {
for _, k := range v.MapKeys() {
e.addElem(k.String(), v.MapIndex(k), false)
}
}
func (e *encoder) addStruct(v reflect.Value) {
sinfo, err := getStructInfo(v.Type())
if err != nil {
panic(err)
}
var value reflect.Value
if sinfo.InlineMap >= 0 {
m := v.Field(sinfo.InlineMap)
if m.Len() > 0 {
for _, k := range m.MapKeys() {
ks := k.String()
if _, found := sinfo.FieldsMap[ks]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
}
e.addElem(ks, m.MapIndex(k), false)
}
}
}
for _, info := range sinfo.FieldsList {
if info.Inline == nil {
value = v.Field(info.Num)
} else {
value = v.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.addElem(info.Key, value, info.MinSize)
}
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Ptr, reflect.Interface:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
if vt == typeTime {
return v.Interface().(time.Time).IsZero()
}
for i := 0; i < v.NumField(); i++ {
if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}
func (e *encoder) addSlice(v reflect.Value) {
vi := v.Interface()
if d, ok := vi.(D); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if d, ok := vi.(RawD); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
l := v.Len()
et := v.Type().Elem()
if et == typeDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(DocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if et == typeRawDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(RawDocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
for i := 0; i < l; i++ {
e.addElem(itoa(i), v.Index(i), false)
}
}
// --------------------------------------------------------------------------
// Marshaling of elements in a document.
func (e *encoder) addElemName(kind byte, name string) {
e.addBytes(kind)
e.addBytes([]byte(name)...)
e.addBytes(0)
}
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
if !v.IsValid() {
e.addElemName(0x0A, name)
return
}
if getter, ok := v.Interface().(Getter); ok {
getv, err := getter.GetBSON()
if err != nil {
panic(err)
}
e.addElem(name, reflect.ValueOf(getv), minSize)
return
}
switch v.Kind() {
case reflect.Interface:
e.addElem(name, v.Elem(), minSize)
case reflect.Ptr:
e.addElem(name, v.Elem(), minSize)
case reflect.String:
s := v.String()
switch v.Type() {
case typeObjectId:
if len(s) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s)) + ")")
}
e.addElemName(0x07, name)
e.addBytes([]byte(s)...)
case typeSymbol:
e.addElemName(0x0E, name)
e.addStr(s)
case typeJSONNumber:
n := v.Interface().(json.Number)
if i, err := n.Int64(); err == nil {
e.addElemName(0x12, name)
e.addInt64(i)
} else if f, err := n.Float64(); err == nil {
e.addElemName(0x01, name)
e.addFloat64(f)
} else {
panic("failed to convert json.Number to a number: " + s)
}
default:
e.addElemName(0x02, name)
e.addStr(s)
}
case reflect.Float32, reflect.Float64:
e.addElemName(0x01, name)
e.addFloat64(v.Float())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
u := v.Uint()
if int64(u) < 0 {
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
e.addElemName(0x10, name)
e.addInt32(int32(u))
} else {
e.addElemName(0x12, name)
e.addInt64(int64(u))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type() {
case typeMongoTimestamp:
e.addElemName(0x11, name)
e.addInt64(v.Int())
case typeOrderKey:
if v.Int() == int64(MaxKey) {
e.addElemName(0x7F, name)
} else {
e.addElemName(0xFF, name)
}
default:
i := v.Int()
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
// It fits into an int32, encode as such.
e.addElemName(0x10, name)
e.addInt32(int32(i))
} else {
e.addElemName(0x12, name)
e.addInt64(i)
}
}
case reflect.Bool:
e.addElemName(0x08, name)
if v.Bool() {
e.addBytes(1)
} else {
e.addBytes(0)
}
case reflect.Map:
e.addElemName(0x03, name)
e.addDoc(v)
case reflect.Slice:
vt := v.Type()
et := vt.Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName(0x05, name)
e.addBinary(0x00, v.Bytes())
} else if et == typeDocElem || et == typeRawDocElem {
e.addElemName(0x03, name)
e.addDoc(v)
} else {
e.addElemName(0x04, name)
e.addDoc(v)
}
case reflect.Array:
et := v.Type().Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName(0x05, name)
if v.CanAddr() {
e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
} else {
n := v.Len()
e.addInt32(int32(n))
e.addBytes(0x00)
for i := 0; i < n; i++ {
el := v.Index(i)
e.addBytes(byte(el.Uint()))
}
}
} else {
e.addElemName(0x04, name)
e.addDoc(v)
}
case reflect.Struct:
switch s := v.Interface().(type) {
case Raw:
kind := s.Kind
if kind == 0x00 {
kind = 0x03
}
if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
panic("Attempted to marshal empty Raw document")
}
e.addElemName(kind, name)
e.addBytes(s.Data...)
case Binary:
e.addElemName(0x05, name)
e.addBinary(s.Kind, s.Data)
case Decimal128:
e.addElemName(0x13, name)
e.addInt64(int64(s.l))
e.addInt64(int64(s.h))
case DBPointer:
e.addElemName(0x0C, name)
e.addStr(s.Namespace)
if len(s.Id) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s.Id)) + ")")
}
e.addBytes([]byte(s.Id)...)
case RegEx:
e.addElemName(0x0B, name)
e.addCStr(s.Pattern)
e.addCStr(s.Options)
case JavaScript:
if s.Scope == nil {
e.addElemName(0x0D, name)
e.addStr(s.Code)
} else {
e.addElemName(0x0F, name)
start := e.reserveInt32()
e.addStr(s.Code)
e.addDoc(reflect.ValueOf(s.Scope))
e.setInt32(start, int32(len(e.out)-start))
}
case time.Time:
// MongoDB handles timestamps as milliseconds.
e.addElemName(0x09, name)
e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
case url.URL:
e.addElemName(0x02, name)
e.addStr(s.String())
case undefined:
e.addElemName(0x06, name)
default:
e.addElemName(0x03, name)
e.addDoc(v)
}
default:
panic("Can't marshal " + v.Type().String() + " in a BSON document")
}
}
// --------------------------------------------------------------------------
// Marshaling of base types.
func (e *encoder) addBinary(subtype byte, v []byte) {
if subtype == 0x02 {
// Wonder how that brilliant idea came to life. Obsolete, luckily.
e.addInt32(int32(len(v) + 4))
e.addBytes(subtype)
e.addInt32(int32(len(v)))
} else {
e.addInt32(int32(len(v)))
e.addBytes(subtype)
}
e.addBytes(v...)
}
func (e *encoder) addStr(v string) {
e.addInt32(int32(len(v) + 1))
e.addCStr(v)
}
func (e *encoder) addCStr(v string) {
e.addBytes([]byte(v)...)
e.addBytes(0)
}
func (e *encoder) reserveInt32() (pos int) {
pos = len(e.out)
e.addBytes(0, 0, 0, 0)
return pos
}
func (e *encoder) setInt32(pos int, v int32) {
e.out[pos+0] = byte(v)
e.out[pos+1] = byte(v >> 8)
e.out[pos+2] = byte(v >> 16)
e.out[pos+3] = byte(v >> 24)
}
func (e *encoder) addInt32(v int32) {
u := uint32(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
}
func (e *encoder) addInt64(v int64) {
u := uint64(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
}
func (e *encoder) addFloat64(v float64) {
e.addInt64(int64(math.Float64bits(v)))
}
func (e *encoder) addBytes(v ...byte) {
e.out = append(e.out, v...)
}

380
vendor/gopkg.in.o/mgo.v2/bson/json.go generated vendored
View File

@ -1,380 +0,0 @@
package bson
import (
"bytes"
"encoding/base64"
"fmt"
"gopkg.in/mgo.v2/internal/json"
"strconv"
"time"
)
// UnmarshalJSON unmarshals a JSON value that may hold non-standard
// syntax as defined in BSON's extended JSON specification.
func UnmarshalJSON(data []byte, value interface{}) error {
d := json.NewDecoder(bytes.NewBuffer(data))
d.Extend(&jsonExt)
return d.Decode(value)
}
// MarshalJSON marshals a JSON value that may hold non-standard
// syntax as defined in BSON's extended JSON specification.
func MarshalJSON(value interface{}) ([]byte, error) {
var buf bytes.Buffer
e := json.NewEncoder(&buf)
e.Extend(&jsonExt)
err := e.Encode(value)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// jdec is used internally by the JSON decoding functions
// so they may unmarshal functions without getting into endless
// recursion due to keyed objects.
func jdec(data []byte, value interface{}) error {
d := json.NewDecoder(bytes.NewBuffer(data))
d.Extend(&funcExt)
return d.Decode(value)
}
var jsonExt json.Extension
var funcExt json.Extension
// TODO
// - Shell regular expressions ("/regexp/opts")
func init() {
jsonExt.DecodeUnquotedKeys(true)
jsonExt.DecodeTrailingCommas(true)
funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
jsonExt.DecodeKeyed("$binary", jdecBinary)
jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
jsonExt.EncodeType([]byte(nil), jencBinarySlice)
jsonExt.EncodeType(Binary{}, jencBinaryType)
funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
funcExt.DecodeFunc("new Date", "$dateFunc", "S")
jsonExt.DecodeKeyed("$date", jdecDate)
jsonExt.DecodeKeyed("$dateFunc", jdecDate)
jsonExt.EncodeType(time.Time{}, jencDate)
funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
funcExt.DecodeConst("undefined", Undefined)
jsonExt.DecodeKeyed("$regex", jdecRegEx)
jsonExt.EncodeType(RegEx{}, jencRegEx)
funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
jsonExt.DecodeKeyed("$oid", jdecObjectId)
jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
jsonExt.EncodeType(ObjectId(""), jencObjectId)
funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
jsonExt.EncodeType(int64(0), jencNumberLong)
jsonExt.EncodeType(int(0), jencInt)
funcExt.DecodeConst("MinKey", MinKey)
funcExt.DecodeConst("MaxKey", MaxKey)
jsonExt.DecodeKeyed("$minKey", jdecMinKey)
jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
jsonExt.DecodeKeyed("$undefined", jdecUndefined)
jsonExt.EncodeType(Undefined, jencUndefined)
jsonExt.Extend(&funcExt)
}
func fbytes(format string, args ...interface{}) []byte {
var buf bytes.Buffer
fmt.Fprintf(&buf, format, args...)
return buf.Bytes()
}
func jdecBinary(data []byte) (interface{}, error) {
var v struct {
Binary []byte `json:"$binary"`
Type string `json:"$type"`
Func struct {
Binary []byte `json:"$binary"`
Type int64 `json:"$type"`
} `json:"$binaryFunc"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
var binData []byte
var binKind int64
if v.Type == "" && v.Binary == nil {
binData = v.Func.Binary
binKind = v.Func.Type
} else if v.Type == "" {
return v.Binary, nil
} else {
binData = v.Binary
binKind, err = strconv.ParseInt(v.Type, 0, 64)
if err != nil {
binKind = -1
}
}
if binKind == 0 {
return binData, nil
}
if binKind < 0 || binKind > 255 {
return nil, fmt.Errorf("invalid type in binary object: %s", data)
}
return Binary{Kind: byte(binKind), Data: binData}, nil
}
func jencBinarySlice(v interface{}) ([]byte, error) {
in := v.([]byte)
out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
base64.StdEncoding.Encode(out, in)
return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
}
func jencBinaryType(v interface{}) ([]byte, error) {
in := v.(Binary)
out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
base64.StdEncoding.Encode(out, in.Data)
return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
}
const jdateFormat = "2006-01-02T15:04:05.999Z"
func jdecDate(data []byte) (interface{}, error) {
var v struct {
S string `json:"$date"`
Func struct {
S string
} `json:"$dateFunc"`
}
_ = jdec(data, &v)
if v.S == "" {
v.S = v.Func.S
}
if v.S != "" {
for _, format := range []string{jdateFormat, "2006-01-02"} {
t, err := time.Parse(format, v.S)
if err == nil {
return t, nil
}
}
return nil, fmt.Errorf("cannot parse date: %q", v.S)
}
var vn struct {
Date struct {
N int64 `json:"$numberLong,string"`
} `json:"$date"`
Func struct {
S int64
} `json:"$dateFunc"`
}
err := jdec(data, &vn)
if err != nil {
return nil, fmt.Errorf("cannot parse date: %q", data)
}
n := vn.Date.N
if n == 0 {
n = vn.Func.S
}
return time.Unix(n/1000, n%1000*1e6).UTC(), nil
}
func jencDate(v interface{}) ([]byte, error) {
t := v.(time.Time)
return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
}
func jdecTimestamp(data []byte) (interface{}, error) {
var v struct {
Func struct {
T int32 `json:"t"`
I int32 `json:"i"`
} `json:"$timestamp"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
}
func jencTimestamp(v interface{}) ([]byte, error) {
ts := uint64(v.(MongoTimestamp))
return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
}
func jdecRegEx(data []byte) (interface{}, error) {
var v struct {
Regex string `json:"$regex"`
Options string `json:"$options"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
return RegEx{v.Regex, v.Options}, nil
}
func jencRegEx(v interface{}) ([]byte, error) {
re := v.(RegEx)
type regex struct {
Regex string `json:"$regex"`
Options string `json:"$options"`
}
return json.Marshal(regex{re.Pattern, re.Options})
}
func jdecObjectId(data []byte) (interface{}, error) {
var v struct {
Id string `json:"$oid"`
Func struct {
Id string
} `json:"$oidFunc"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if v.Id == "" {
v.Id = v.Func.Id
}
return ObjectIdHex(v.Id), nil
}
func jencObjectId(v interface{}) ([]byte, error) {
return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
}
func jdecDBRef(data []byte) (interface{}, error) {
// TODO Support unmarshaling $ref and $id into the input value.
var v struct {
Obj map[string]interface{} `json:"$dbrefFunc"`
}
// TODO Fix this. Must not be required.
v.Obj = make(map[string]interface{})
err := jdec(data, &v)
if err != nil {
return nil, err
}
return v.Obj, nil
}
func jdecNumberLong(data []byte) (interface{}, error) {
var v struct {
N int64 `json:"$numberLong,string"`
Func struct {
N int64 `json:",string"`
} `json:"$numberLongFunc"`
}
var vn struct {
N int64 `json:"$numberLong"`
Func struct {
N int64
} `json:"$numberLongFunc"`
}
err := jdec(data, &v)
if err != nil {
err = jdec(data, &vn)
v.N = vn.N
v.Func.N = vn.Func.N
}
if err != nil {
return nil, err
}
if v.N != 0 {
return v.N, nil
}
return v.Func.N, nil
}
func jencNumberLong(v interface{}) ([]byte, error) {
n := v.(int64)
f := `{"$numberLong":"%d"}`
if n <= 1<<53 {
f = `{"$numberLong":%d}`
}
return fbytes(f, n), nil
}
func jencInt(v interface{}) ([]byte, error) {
n := v.(int)
f := `{"$numberLong":"%d"}`
if int64(n) <= 1<<53 {
f = `%d`
}
return fbytes(f, n), nil
}
func jdecMinKey(data []byte) (interface{}, error) {
var v struct {
N int64 `json:"$minKey"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if v.N != 1 {
return nil, fmt.Errorf("invalid $minKey object: %s", data)
}
return MinKey, nil
}
func jdecMaxKey(data []byte) (interface{}, error) {
var v struct {
N int64 `json:"$maxKey"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if v.N != 1 {
return nil, fmt.Errorf("invalid $maxKey object: %s", data)
}
return MaxKey, nil
}
func jencMinMaxKey(v interface{}) ([]byte, error) {
switch v.(orderKey) {
case MinKey:
return []byte(`{"$minKey":1}`), nil
case MaxKey:
return []byte(`{"$maxKey":1}`), nil
}
panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
}
func jdecUndefined(data []byte) (interface{}, error) {
var v struct {
B bool `json:"$undefined"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if !v.B {
return nil, fmt.Errorf("invalid $undefined object: %s", data)
}
return Undefined, nil
}
func jencUndefined(v interface{}) ([]byte, error) {
return []byte(`{"$undefined":true}`), nil
}

View File

@ -1,184 +0,0 @@
package bson_test
import (
"gopkg.in/mgo.v2/bson"
. "gopkg.in/check.v1"
"reflect"
"strings"
"time"
)
type jsonTest struct {
a interface{} // value encoded into JSON (optional)
b string // JSON expected as output of <a>, and used as input to <c>
c interface{} // Value expected from decoding <b>, defaults to <a>
e string // error string, if decoding (b) should fail
}
var jsonTests = []jsonTest{
// $binary
{
a: []byte("foo"),
b: `{"$binary":"Zm9v","$type":"0x0"}`,
}, {
a: bson.Binary{Kind: 2, Data: []byte("foo")},
b: `{"$binary":"Zm9v","$type":"0x2"}`,
}, {
b: `BinData(2,"Zm9v")`,
c: bson.Binary{Kind: 2, Data: []byte("foo")},
},
// $date
{
a: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
b: `{"$date":"2016-05-15T01:02:03.004Z"}`,
}, {
b: `{"$date": {"$numberLong": "1002"}}`,
c: time.Date(1970, 1, 1, 0, 0, 1, 2e6, time.UTC),
}, {
b: `ISODate("2016-05-15T01:02:03.004Z")`,
c: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
}, {
b: `new Date(1000)`,
c: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
}, {
b: `new Date("2016-05-15")`,
c: time.Date(2016, 5, 15, 0, 0, 0, 0, time.UTC),
},
// $timestamp
{
a: bson.MongoTimestamp(4294967298),
b: `{"$timestamp":{"t":1,"i":2}}`,
}, {
b: `Timestamp(1, 2)`,
c: bson.MongoTimestamp(4294967298),
},
// $regex
{
a: bson.RegEx{"pattern", "options"},
b: `{"$regex":"pattern","$options":"options"}`,
},
// $oid
{
a: bson.ObjectIdHex("0123456789abcdef01234567"),
b: `{"$oid":"0123456789abcdef01234567"}`,
}, {
b: `ObjectId("0123456789abcdef01234567")`,
c: bson.ObjectIdHex("0123456789abcdef01234567"),
},
// $ref (no special type)
{
b: `DBRef("name", "id")`,
c: map[string]interface{}{"$ref": "name", "$id": "id"},
},
// $numberLong
{
a: 123,
b: `123`,
}, {
a: int64(9007199254740992),
b: `{"$numberLong":9007199254740992}`,
}, {
a: int64(1<<53 + 1),
b: `{"$numberLong":"9007199254740993"}`,
}, {
a: 1<<53 + 1,
b: `{"$numberLong":"9007199254740993"}`,
c: int64(9007199254740993),
}, {
b: `NumberLong(9007199254740992)`,
c: int64(1 << 53),
}, {
b: `NumberLong("9007199254740993")`,
c: int64(1<<53 + 1),
},
// $minKey, $maxKey
{
a: bson.MinKey,
b: `{"$minKey":1}`,
}, {
a: bson.MaxKey,
b: `{"$maxKey":1}`,
}, {
b: `MinKey`,
c: bson.MinKey,
}, {
b: `MaxKey`,
c: bson.MaxKey,
}, {
b: `{"$minKey":0}`,
e: `invalid $minKey object: {"$minKey":0}`,
}, {
b: `{"$maxKey":0}`,
e: `invalid $maxKey object: {"$maxKey":0}`,
},
{
a: bson.Undefined,
b: `{"$undefined":true}`,
}, {
b: `undefined`,
c: bson.Undefined,
}, {
b: `{"v": undefined}`,
c: struct{ V interface{} }{bson.Undefined},
},
// Unquoted keys and trailing commas
{
b: `{$foo: ["bar",],}`,
c: map[string]interface{}{"$foo": []interface{}{"bar"}},
},
}
func (s *S) TestJSON(c *C) {
for i, item := range jsonTests {
c.Logf("------------ (#%d)", i)
c.Logf("A: %#v", item.a)
c.Logf("B: %#v", item.b)
if item.c == nil {
item.c = item.a
} else {
c.Logf("C: %#v", item.c)
}
if item.e != "" {
c.Logf("E: %s", item.e)
}
if item.a != nil {
data, err := bson.MarshalJSON(item.a)
c.Assert(err, IsNil)
c.Logf("Dumped: %#v", string(data))
c.Assert(strings.TrimSuffix(string(data), "\n"), Equals, item.b)
}
var zero interface{}
if item.c == nil {
zero = &struct{}{}
} else {
zero = reflect.New(reflect.TypeOf(item.c)).Interface()
}
err := bson.UnmarshalJSON([]byte(item.b), zero)
if item.e != "" {
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, item.e)
continue
}
c.Assert(err, IsNil)
zerov := reflect.ValueOf(zero)
value := zerov.Interface()
if zerov.Kind() == reflect.Ptr {
value = zerov.Elem().Interface()
}
c.Logf("Loaded: %#v", value)
c.Assert(value, DeepEquals, item.c)
}
}

View File

@ -1,27 +0,0 @@
#!/bin/sh
set -e
if [ ! -d specifications ]; then
git clone -b bson git@github.com:jyemin/specifications
fi
TESTFILE="../specdata_test.go"
cat <<END > $TESTFILE
package bson_test
var specTests = []string{
END
for file in specifications/source/bson/tests/*.yml; do
(
echo '`'
cat $file
echo -n '`,'
) >> $TESTFILE
done
echo '}' >> $TESTFILE
gofmt -w $TESTFILE

View File

@ -1,241 +0,0 @@
package bson_test
var specTests = []string{
`
---
description: "Array type"
documents:
-
decoded:
a : []
encoded: 0D000000046100050000000000
-
decoded:
a: [10]
encoded: 140000000461000C0000001030000A0000000000
-
# Decode an array that uses an empty string as the key
decodeOnly : true
decoded:
a: [10]
encoded: 130000000461000B00000010000A0000000000
-
# Decode an array that uses a non-numeric string as the key
decodeOnly : true
decoded:
a: [10]
encoded: 150000000461000D000000106162000A0000000000
`, `
---
description: "Boolean type"
documents:
-
encoded: "090000000862000100"
decoded: { "b" : true }
-
encoded: "090000000862000000"
decoded: { "b" : false }
`, `
---
description: "Corrupted BSON"
documents:
-
encoded: "09000000016600"
error: "truncated double"
-
encoded: "09000000026600"
error: "truncated string"
-
encoded: "09000000036600"
error: "truncated document"
-
encoded: "09000000046600"
error: "truncated array"
-
encoded: "09000000056600"
error: "truncated binary"
-
encoded: "09000000076600"
error: "truncated objectid"
-
encoded: "09000000086600"
error: "truncated boolean"
-
encoded: "09000000096600"
error: "truncated date"
-
encoded: "090000000b6600"
error: "truncated regex"
-
encoded: "090000000c6600"
error: "truncated db pointer"
-
encoded: "0C0000000d6600"
error: "truncated javascript"
-
encoded: "0C0000000e6600"
error: "truncated symbol"
-
encoded: "0C0000000f6600"
error: "truncated javascript with scope"
-
encoded: "0C000000106600"
error: "truncated int32"
-
encoded: "0C000000116600"
error: "truncated timestamp"
-
encoded: "0C000000126600"
error: "truncated int64"
-
encoded: "0400000000"
error: basic
-
encoded: "0500000001"
error: basic
-
encoded: "05000000"
error: basic
-
encoded: "0700000002610078563412"
error: basic
-
encoded: "090000001061000500"
error: basic
-
encoded: "00000000000000000000"
error: basic
-
encoded: "1300000002666f6f00040000006261720000"
error: "basic"
-
encoded: "1800000003666f6f000f0000001062617200ffffff7f0000"
error: basic
-
encoded: "1500000003666f6f000c0000000862617200010000"
error: basic
-
encoded: "1c00000003666f6f001200000002626172000500000062617a000000"
error: basic
-
encoded: "1000000002610004000000616263ff00"
error: string is not null-terminated
-
encoded: "0c0000000200000000000000"
error: bad_string_length
-
encoded: "120000000200ffffffff666f6f6261720000"
error: bad_string_length
-
encoded: "0c0000000e00000000000000"
error: bad_string_length
-
encoded: "120000000e00ffffffff666f6f6261720000"
error: bad_string_length
-
encoded: "180000000c00fa5bd841d6585d9900"
error: ""
-
encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900"
error: bad_string_length
-
encoded: "0c0000000d00000000000000"
error: bad_string_length
-
encoded: "0c0000000d00ffffffff0000"
error: bad_string_length
-
encoded: "1c0000000f001500000000000000000c000000020001000000000000"
error: bad_string_length
-
encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000"
error: bad_string_length
-
encoded: "1c0000000f001500000001000000000c000000020000000000000000"
error: bad_string_length
-
encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000"
error: bad_string_length
-
encoded: "0E00000008616263646566676869707172737475"
error: "Run-on CString"
-
encoded: "0100000000"
error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)"
-
encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000"
error: "One object, but with object size listed smaller than it is in the data"
-
encoded: "05000000"
error: "One object, missing the EOO at the end"
-
encoded: "0500000001"
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01"
-
encoded: "05000000ff"
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff"
-
encoded: "0500000070"
error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70"
-
encoded: "07000000000000"
error: "Invalid BSON type low range"
-
encoded: "07000000800000"
error: "Invalid BSON type high range"
-
encoded: "090000000862000200"
error: "Invalid boolean value of 2"
-
encoded: "09000000086200ff00"
error: "Invalid boolean value of -1"
`, `
---
description: "Int32 type"
documents:
-
decoded:
i: -2147483648
encoded: 0C0000001069000000008000
-
decoded:
i: 2147483647
encoded: 0C000000106900FFFFFF7F00
-
decoded:
i: -1
encoded: 0C000000106900FFFFFFFF00
-
decoded:
i: 0
encoded: 0C0000001069000000000000
-
decoded:
i: 1
encoded: 0C0000001069000100000000
`, `
---
description: "String type"
documents:
-
decoded:
s : ""
encoded: 0D000000027300010000000000
-
decoded:
s: "a"
encoded: 0E00000002730002000000610000
-
decoded:
s: "This is a string"
encoded: 1D0000000273001100000054686973206973206120737472696E670000
-
decoded:
s: "κόσμε"
encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000
`}

View File

@ -1,223 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Large data benchmark.
// The JSON data is a summary of agl's changes in the
// go, webkit, and chromium open source projects.
// We benchmark converting between the JSON form
// and in-memory data structures.
package json
import (
"bytes"
"compress/gzip"
"io/ioutil"
"os"
"strings"
"testing"
)
type codeResponse struct {
Tree *codeNode `json:"tree"`
Username string `json:"username"`
}
type codeNode struct {
Name string `json:"name"`
Kids []*codeNode `json:"kids"`
CLWeight float64 `json:"cl_weight"`
Touches int `json:"touches"`
MinT int64 `json:"min_t"`
MaxT int64 `json:"max_t"`
MeanT int64 `json:"mean_t"`
}
var codeJSON []byte
var codeStruct codeResponse
func codeInit() {
f, err := os.Open("testdata/code.json.gz")
if err != nil {
panic(err)
}
defer f.Close()
gz, err := gzip.NewReader(f)
if err != nil {
panic(err)
}
data, err := ioutil.ReadAll(gz)
if err != nil {
panic(err)
}
codeJSON = data
if err := Unmarshal(codeJSON, &codeStruct); err != nil {
panic("unmarshal code.json: " + err.Error())
}
if data, err = Marshal(&codeStruct); err != nil {
panic("marshal code.json: " + err.Error())
}
if !bytes.Equal(data, codeJSON) {
println("different lengths", len(data), len(codeJSON))
for i := 0; i < len(data) && i < len(codeJSON); i++ {
if data[i] != codeJSON[i] {
println("re-marshal: changed at byte", i)
println("orig: ", string(codeJSON[i-10:i+10]))
println("new: ", string(data[i-10:i+10]))
break
}
}
panic("re-marshal code.json: different result")
}
}
func BenchmarkCodeEncoder(b *testing.B) {
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
enc := NewEncoder(ioutil.Discard)
for i := 0; i < b.N; i++ {
if err := enc.Encode(&codeStruct); err != nil {
b.Fatal("Encode:", err)
}
}
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkCodeMarshal(b *testing.B) {
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
for i := 0; i < b.N; i++ {
if _, err := Marshal(&codeStruct); err != nil {
b.Fatal("Marshal:", err)
}
}
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkCodeDecoder(b *testing.B) {
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
var buf bytes.Buffer
dec := NewDecoder(&buf)
var r codeResponse
for i := 0; i < b.N; i++ {
buf.Write(codeJSON)
// hide EOF
buf.WriteByte('\n')
buf.WriteByte('\n')
buf.WriteByte('\n')
if err := dec.Decode(&r); err != nil {
b.Fatal("Decode:", err)
}
}
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkDecoderStream(b *testing.B) {
b.StopTimer()
var buf bytes.Buffer
dec := NewDecoder(&buf)
buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n")
var x interface{}
if err := dec.Decode(&x); err != nil {
b.Fatal("Decode:", err)
}
ones := strings.Repeat(" 1\n", 300000) + "\n\n\n"
b.StartTimer()
for i := 0; i < b.N; i++ {
if i%300000 == 0 {
buf.WriteString(ones)
}
x = nil
if err := dec.Decode(&x); err != nil || x != 1.0 {
b.Fatalf("Decode: %v after %d", err, i)
}
}
}
func BenchmarkCodeUnmarshal(b *testing.B) {
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
for i := 0; i < b.N; i++ {
var r codeResponse
if err := Unmarshal(codeJSON, &r); err != nil {
b.Fatal("Unmarshal:", err)
}
}
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkCodeUnmarshalReuse(b *testing.B) {
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
var r codeResponse
for i := 0; i < b.N; i++ {
if err := Unmarshal(codeJSON, &r); err != nil {
b.Fatal("Unmarshal:", err)
}
}
}
func BenchmarkUnmarshalString(b *testing.B) {
data := []byte(`"hello, world"`)
var s string
for i := 0; i < b.N; i++ {
if err := Unmarshal(data, &s); err != nil {
b.Fatal("Unmarshal:", err)
}
}
}
func BenchmarkUnmarshalFloat64(b *testing.B) {
var f float64
data := []byte(`3.14`)
for i := 0; i < b.N; i++ {
if err := Unmarshal(data, &f); err != nil {
b.Fatal("Unmarshal:", err)
}
}
}
func BenchmarkUnmarshalInt64(b *testing.B) {
var x int64
data := []byte(`3`)
for i := 0; i < b.N; i++ {
if err := Unmarshal(data, &x); err != nil {
b.Fatal("Unmarshal:", err)
}
}
}
func BenchmarkIssue10335(b *testing.B) {
b.ReportAllocs()
var s struct{}
j := []byte(`{"a":{ }}`)
for n := 0; n < b.N; n++ {
if err := Unmarshal(j, &s); err != nil {
b.Fatal(err)
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,613 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"fmt"
"math"
"reflect"
"testing"
"unicode"
)
type Optionals struct {
Sr string `json:"sr"`
So string `json:"so,omitempty"`
Sw string `json:"-"`
Ir int `json:"omitempty"` // actually named omitempty, not an option
Io int `json:"io,omitempty"`
Slr []string `json:"slr,random"`
Slo []string `json:"slo,omitempty"`
Mr map[string]interface{} `json:"mr"`
Mo map[string]interface{} `json:",omitempty"`
Fr float64 `json:"fr"`
Fo float64 `json:"fo,omitempty"`
Br bool `json:"br"`
Bo bool `json:"bo,omitempty"`
Ur uint `json:"ur"`
Uo uint `json:"uo,omitempty"`
Str struct{} `json:"str"`
Sto struct{} `json:"sto,omitempty"`
}
var optionalsExpected = `{
"sr": "",
"omitempty": 0,
"slr": null,
"mr": {},
"fr": 0,
"br": false,
"ur": 0,
"str": {},
"sto": {}
}`
func TestOmitEmpty(t *testing.T) {
var o Optionals
o.Sw = "something"
o.Mr = map[string]interface{}{}
o.Mo = map[string]interface{}{}
got, err := MarshalIndent(&o, "", " ")
if err != nil {
t.Fatal(err)
}
if got := string(got); got != optionalsExpected {
t.Errorf(" got: %s\nwant: %s\n", got, optionalsExpected)
}
}
type StringTag struct {
BoolStr bool `json:",string"`
IntStr int64 `json:",string"`
StrStr string `json:",string"`
}
var stringTagExpected = `{
"BoolStr": "true",
"IntStr": "42",
"StrStr": "\"xzbit\""
}`
func TestStringTag(t *testing.T) {
var s StringTag
s.BoolStr = true
s.IntStr = 42
s.StrStr = "xzbit"
got, err := MarshalIndent(&s, "", " ")
if err != nil {
t.Fatal(err)
}
if got := string(got); got != stringTagExpected {
t.Fatalf(" got: %s\nwant: %s\n", got, stringTagExpected)
}
// Verify that it round-trips.
var s2 StringTag
err = NewDecoder(bytes.NewReader(got)).Decode(&s2)
if err != nil {
t.Fatalf("Decode: %v", err)
}
if !reflect.DeepEqual(s, s2) {
t.Fatalf("decode didn't match.\nsource: %#v\nEncoded as:\n%s\ndecode: %#v", s, string(got), s2)
}
}
// byte slices are special even if they're renamed types.
type renamedByte byte
type renamedByteSlice []byte
type renamedRenamedByteSlice []renamedByte
func TestEncodeRenamedByteSlice(t *testing.T) {
s := renamedByteSlice("abc")
result, err := Marshal(s)
if err != nil {
t.Fatal(err)
}
expect := `"YWJj"`
if string(result) != expect {
t.Errorf(" got %s want %s", result, expect)
}
r := renamedRenamedByteSlice("abc")
result, err = Marshal(r)
if err != nil {
t.Fatal(err)
}
if string(result) != expect {
t.Errorf(" got %s want %s", result, expect)
}
}
var unsupportedValues = []interface{}{
math.NaN(),
math.Inf(-1),
math.Inf(1),
}
func TestUnsupportedValues(t *testing.T) {
for _, v := range unsupportedValues {
if _, err := Marshal(v); err != nil {
if _, ok := err.(*UnsupportedValueError); !ok {
t.Errorf("for %v, got %T want UnsupportedValueError", v, err)
}
} else {
t.Errorf("for %v, expected error", v)
}
}
}
// Ref has Marshaler and Unmarshaler methods with pointer receiver.
type Ref int
func (*Ref) MarshalJSON() ([]byte, error) {
return []byte(`"ref"`), nil
}
func (r *Ref) UnmarshalJSON([]byte) error {
*r = 12
return nil
}
// Val has Marshaler methods with value receiver.
type Val int
func (Val) MarshalJSON() ([]byte, error) {
return []byte(`"val"`), nil
}
// RefText has Marshaler and Unmarshaler methods with pointer receiver.
type RefText int
func (*RefText) MarshalText() ([]byte, error) {
return []byte(`"ref"`), nil
}
func (r *RefText) UnmarshalText([]byte) error {
*r = 13
return nil
}
// ValText has Marshaler methods with value receiver.
type ValText int
func (ValText) MarshalText() ([]byte, error) {
return []byte(`"val"`), nil
}
func TestRefValMarshal(t *testing.T) {
var s = struct {
R0 Ref
R1 *Ref
R2 RefText
R3 *RefText
V0 Val
V1 *Val
V2 ValText
V3 *ValText
}{
R0: 12,
R1: new(Ref),
R2: 14,
R3: new(RefText),
V0: 13,
V1: new(Val),
V2: 15,
V3: new(ValText),
}
const want = `{"R0":"ref","R1":"ref","R2":"\"ref\"","R3":"\"ref\"","V0":"val","V1":"val","V2":"\"val\"","V3":"\"val\""}`
b, err := Marshal(&s)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
if got := string(b); got != want {
t.Errorf("got %q, want %q", got, want)
}
}
// C implements Marshaler and returns unescaped JSON.
type C int
func (C) MarshalJSON() ([]byte, error) {
return []byte(`"<&>"`), nil
}
// CText implements Marshaler and returns unescaped text.
type CText int
func (CText) MarshalText() ([]byte, error) {
return []byte(`"<&>"`), nil
}
func TestMarshalerEscaping(t *testing.T) {
var c C
want := `"\u003c\u0026\u003e"`
b, err := Marshal(c)
if err != nil {
t.Fatalf("Marshal(c): %v", err)
}
if got := string(b); got != want {
t.Errorf("Marshal(c) = %#q, want %#q", got, want)
}
var ct CText
want = `"\"\u003c\u0026\u003e\""`
b, err = Marshal(ct)
if err != nil {
t.Fatalf("Marshal(ct): %v", err)
}
if got := string(b); got != want {
t.Errorf("Marshal(ct) = %#q, want %#q", got, want)
}
}
type IntType int
type MyStruct struct {
IntType
}
func TestAnonymousNonstruct(t *testing.T) {
var i IntType = 11
a := MyStruct{i}
const want = `{"IntType":11}`
b, err := Marshal(a)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
if got := string(b); got != want {
t.Errorf("got %q, want %q", got, want)
}
}
type BugA struct {
S string
}
type BugB struct {
BugA
S string
}
type BugC struct {
S string
}
// Legal Go: We never use the repeated embedded field (S).
type BugX struct {
A int
BugA
BugB
}
// Issue 5245.
func TestEmbeddedBug(t *testing.T) {
v := BugB{
BugA{"A"},
"B",
}
b, err := Marshal(v)
if err != nil {
t.Fatal("Marshal:", err)
}
want := `{"S":"B"}`
got := string(b)
if got != want {
t.Fatalf("Marshal: got %s want %s", got, want)
}
// Now check that the duplicate field, S, does not appear.
x := BugX{
A: 23,
}
b, err = Marshal(x)
if err != nil {
t.Fatal("Marshal:", err)
}
want = `{"A":23}`
got = string(b)
if got != want {
t.Fatalf("Marshal: got %s want %s", got, want)
}
}
type BugD struct { // Same as BugA after tagging.
XXX string `json:"S"`
}
// BugD's tagged S field should dominate BugA's.
type BugY struct {
BugA
BugD
}
// Test that a field with a tag dominates untagged fields.
func TestTaggedFieldDominates(t *testing.T) {
v := BugY{
BugA{"BugA"},
BugD{"BugD"},
}
b, err := Marshal(v)
if err != nil {
t.Fatal("Marshal:", err)
}
want := `{"S":"BugD"}`
got := string(b)
if got != want {
t.Fatalf("Marshal: got %s want %s", got, want)
}
}
// There are no tags here, so S should not appear.
type BugZ struct {
BugA
BugC
BugY // Contains a tagged S field through BugD; should not dominate.
}
func TestDuplicatedFieldDisappears(t *testing.T) {
v := BugZ{
BugA{"BugA"},
BugC{"BugC"},
BugY{
BugA{"nested BugA"},
BugD{"nested BugD"},
},
}
b, err := Marshal(v)
if err != nil {
t.Fatal("Marshal:", err)
}
want := `{}`
got := string(b)
if got != want {
t.Fatalf("Marshal: got %s want %s", got, want)
}
}
func TestStringBytes(t *testing.T) {
// Test that encodeState.stringBytes and encodeState.string use the same encoding.
var r []rune
for i := '\u0000'; i <= unicode.MaxRune; i++ {
r = append(r, i)
}
s := string(r) + "\xff\xff\xffhello" // some invalid UTF-8 too
for _, escapeHTML := range []bool{true, false} {
es := &encodeState{}
es.string(s, escapeHTML)
esBytes := &encodeState{}
esBytes.stringBytes([]byte(s), escapeHTML)
enc := es.Buffer.String()
encBytes := esBytes.Buffer.String()
if enc != encBytes {
i := 0
for i < len(enc) && i < len(encBytes) && enc[i] == encBytes[i] {
i++
}
enc = enc[i:]
encBytes = encBytes[i:]
i = 0
for i < len(enc) && i < len(encBytes) && enc[len(enc)-i-1] == encBytes[len(encBytes)-i-1] {
i++
}
enc = enc[:len(enc)-i]
encBytes = encBytes[:len(encBytes)-i]
if len(enc) > 20 {
enc = enc[:20] + "..."
}
if len(encBytes) > 20 {
encBytes = encBytes[:20] + "..."
}
t.Errorf("with escapeHTML=%t, encodings differ at %#q vs %#q",
escapeHTML, enc, encBytes)
}
}
}
func TestIssue6458(t *testing.T) {
type Foo struct {
M RawMessage
}
x := Foo{RawMessage(`"foo"`)}
b, err := Marshal(&x)
if err != nil {
t.Fatal(err)
}
if want := `{"M":"foo"}`; string(b) != want {
t.Errorf("Marshal(&x) = %#q; want %#q", b, want)
}
b, err = Marshal(x)
if err != nil {
t.Fatal(err)
}
if want := `{"M":"ImZvbyI="}`; string(b) != want {
t.Errorf("Marshal(x) = %#q; want %#q", b, want)
}
}
func TestIssue10281(t *testing.T) {
type Foo struct {
N Number
}
x := Foo{Number(`invalid`)}
b, err := Marshal(&x)
if err == nil {
t.Errorf("Marshal(&x) = %#q; want error", b)
}
}
func TestHTMLEscape(t *testing.T) {
var b, want bytes.Buffer
m := `{"M":"<html>foo &` + "\xe2\x80\xa8 \xe2\x80\xa9" + `</html>"}`
want.Write([]byte(`{"M":"\u003chtml\u003efoo \u0026\u2028 \u2029\u003c/html\u003e"}`))
HTMLEscape(&b, []byte(m))
if !bytes.Equal(b.Bytes(), want.Bytes()) {
t.Errorf("HTMLEscape(&b, []byte(m)) = %s; want %s", b.Bytes(), want.Bytes())
}
}
// golang.org/issue/8582
func TestEncodePointerString(t *testing.T) {
type stringPointer struct {
N *int64 `json:"n,string"`
}
var n int64 = 42
b, err := Marshal(stringPointer{N: &n})
if err != nil {
t.Fatalf("Marshal: %v", err)
}
if got, want := string(b), `{"n":"42"}`; got != want {
t.Errorf("Marshal = %s, want %s", got, want)
}
var back stringPointer
err = Unmarshal(b, &back)
if err != nil {
t.Fatalf("Unmarshal: %v", err)
}
if back.N == nil {
t.Fatalf("Unmarshalled nil N field")
}
if *back.N != 42 {
t.Fatalf("*N = %d; want 42", *back.N)
}
}
var encodeStringTests = []struct {
in string
out string
}{
{"\x00", `"\u0000"`},
{"\x01", `"\u0001"`},
{"\x02", `"\u0002"`},
{"\x03", `"\u0003"`},
{"\x04", `"\u0004"`},
{"\x05", `"\u0005"`},
{"\x06", `"\u0006"`},
{"\x07", `"\u0007"`},
{"\x08", `"\u0008"`},
{"\x09", `"\t"`},
{"\x0a", `"\n"`},
{"\x0b", `"\u000b"`},
{"\x0c", `"\u000c"`},
{"\x0d", `"\r"`},
{"\x0e", `"\u000e"`},
{"\x0f", `"\u000f"`},
{"\x10", `"\u0010"`},
{"\x11", `"\u0011"`},
{"\x12", `"\u0012"`},
{"\x13", `"\u0013"`},
{"\x14", `"\u0014"`},
{"\x15", `"\u0015"`},
{"\x16", `"\u0016"`},
{"\x17", `"\u0017"`},
{"\x18", `"\u0018"`},
{"\x19", `"\u0019"`},
{"\x1a", `"\u001a"`},
{"\x1b", `"\u001b"`},
{"\x1c", `"\u001c"`},
{"\x1d", `"\u001d"`},
{"\x1e", `"\u001e"`},
{"\x1f", `"\u001f"`},
}
func TestEncodeString(t *testing.T) {
for _, tt := range encodeStringTests {
b, err := Marshal(tt.in)
if err != nil {
t.Errorf("Marshal(%q): %v", tt.in, err)
continue
}
out := string(b)
if out != tt.out {
t.Errorf("Marshal(%q) = %#q, want %#q", tt.in, out, tt.out)
}
}
}
type jsonbyte byte
func (b jsonbyte) MarshalJSON() ([]byte, error) { return tenc(`{"JB":%d}`, b) }
type textbyte byte
func (b textbyte) MarshalText() ([]byte, error) { return tenc(`TB:%d`, b) }
type jsonint int
func (i jsonint) MarshalJSON() ([]byte, error) { return tenc(`{"JI":%d}`, i) }
type textint int
func (i textint) MarshalText() ([]byte, error) { return tenc(`TI:%d`, i) }
func tenc(format string, a ...interface{}) ([]byte, error) {
var buf bytes.Buffer
fmt.Fprintf(&buf, format, a...)
return buf.Bytes(), nil
}
// Issue 13783
func TestEncodeBytekind(t *testing.T) {
testdata := []struct {
data interface{}
want string
}{
{byte(7), "7"},
{jsonbyte(7), `{"JB":7}`},
{textbyte(4), `"TB:4"`},
{jsonint(5), `{"JI":5}`},
{textint(1), `"TI:1"`},
{[]byte{0, 1}, `"AAE="`},
{[]jsonbyte{0, 1}, `[{"JB":0},{"JB":1}]`},
{[][]jsonbyte{{0, 1}, {3}}, `[[{"JB":0},{"JB":1}],[{"JB":3}]]`},
{[]textbyte{2, 3}, `["TB:2","TB:3"]`},
{[]jsonint{5, 4}, `[{"JI":5},{"JI":4}]`},
{[]textint{9, 3}, `["TI:9","TI:3"]`},
{[]int{9, 3}, `[9,3]`},
}
for _, d := range testdata {
js, err := Marshal(d.data)
if err != nil {
t.Error(err)
continue
}
got, want := string(js), d.want
if got != want {
t.Errorf("got %s, want %s", got, want)
}
}
}
func TestTextMarshalerMapKeysAreSorted(t *testing.T) {
b, err := Marshal(map[unmarshalerText]int{
{"x", "y"}: 1,
{"y", "x"}: 2,
{"a", "z"}: 3,
{"z", "a"}: 4,
})
if err != nil {
t.Fatalf("Failed to Marshal text.Marshaler: %v", err)
}
const want = `{"a:z":3,"x:y":1,"y:x":2,"z:a":4}`
if string(b) != want {
t.Errorf("Marshal map with text.Marshaler keys: got %#q, want %#q", b, want)
}
}

View File

@ -1,252 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json_test
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"strings"
)
func ExampleMarshal() {
type ColorGroup struct {
ID int
Name string
Colors []string
}
group := ColorGroup{
ID: 1,
Name: "Reds",
Colors: []string{"Crimson", "Red", "Ruby", "Maroon"},
}
b, err := json.Marshal(group)
if err != nil {
fmt.Println("error:", err)
}
os.Stdout.Write(b)
// Output:
// {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]}
}
func ExampleUnmarshal() {
var jsonBlob = []byte(`[
{"Name": "Platypus", "Order": "Monotremata"},
{"Name": "Quoll", "Order": "Dasyuromorphia"}
]`)
type Animal struct {
Name string
Order string
}
var animals []Animal
err := json.Unmarshal(jsonBlob, &animals)
if err != nil {
fmt.Println("error:", err)
}
fmt.Printf("%+v", animals)
// Output:
// [{Name:Platypus Order:Monotremata} {Name:Quoll Order:Dasyuromorphia}]
}
// This example uses a Decoder to decode a stream of distinct JSON values.
func ExampleDecoder() {
const jsonStream = `
{"Name": "Ed", "Text": "Knock knock."}
{"Name": "Sam", "Text": "Who's there?"}
{"Name": "Ed", "Text": "Go fmt."}
{"Name": "Sam", "Text": "Go fmt who?"}
{"Name": "Ed", "Text": "Go fmt yourself!"}
`
type Message struct {
Name, Text string
}
dec := json.NewDecoder(strings.NewReader(jsonStream))
for {
var m Message
if err := dec.Decode(&m); err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
fmt.Printf("%s: %s\n", m.Name, m.Text)
}
// Output:
// Ed: Knock knock.
// Sam: Who's there?
// Ed: Go fmt.
// Sam: Go fmt who?
// Ed: Go fmt yourself!
}
// This example uses a Decoder to decode a stream of distinct JSON values.
func ExampleDecoder_Token() {
const jsonStream = `
{"Message": "Hello", "Array": [1, 2, 3], "Null": null, "Number": 1.234}
`
dec := json.NewDecoder(strings.NewReader(jsonStream))
for {
t, err := dec.Token()
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err)
}
fmt.Printf("%T: %v", t, t)
if dec.More() {
fmt.Printf(" (more)")
}
fmt.Printf("\n")
}
// Output:
// json.Delim: { (more)
// string: Message (more)
// string: Hello (more)
// string: Array (more)
// json.Delim: [ (more)
// float64: 1 (more)
// float64: 2 (more)
// float64: 3
// json.Delim: ] (more)
// string: Null (more)
// <nil>: <nil> (more)
// string: Number (more)
// float64: 1.234
// json.Delim: }
}
// This example uses a Decoder to decode a streaming array of JSON objects.
func ExampleDecoder_Decode_stream() {
const jsonStream = `
[
{"Name": "Ed", "Text": "Knock knock."},
{"Name": "Sam", "Text": "Who's there?"},
{"Name": "Ed", "Text": "Go fmt."},
{"Name": "Sam", "Text": "Go fmt who?"},
{"Name": "Ed", "Text": "Go fmt yourself!"}
]
`
type Message struct {
Name, Text string
}
dec := json.NewDecoder(strings.NewReader(jsonStream))
// read open bracket
t, err := dec.Token()
if err != nil {
log.Fatal(err)
}
fmt.Printf("%T: %v\n", t, t)
var m Message
// while the array contains values
for dec.More() {
// decode an array value (Message)
err := dec.Decode(&m)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%v: %v\n", m.Name, m.Text)
}
// read closing bracket
t, err = dec.Token()
if err != nil {
log.Fatal(err)
}
fmt.Printf("%T: %v\n", t, t)
// Output:
// json.Delim: [
// Ed: Knock knock.
// Sam: Who's there?
// Ed: Go fmt.
// Sam: Go fmt who?
// Ed: Go fmt yourself!
// json.Delim: ]
}
// This example uses RawMessage to delay parsing part of a JSON message.
func ExampleRawMessage() {
type Color struct {
Space string
Point json.RawMessage // delay parsing until we know the color space
}
type RGB struct {
R uint8
G uint8
B uint8
}
type YCbCr struct {
Y uint8
Cb int8
Cr int8
}
var j = []byte(`[
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
]`)
var colors []Color
err := json.Unmarshal(j, &colors)
if err != nil {
log.Fatalln("error:", err)
}
for _, c := range colors {
var dst interface{}
switch c.Space {
case "RGB":
dst = new(RGB)
case "YCbCr":
dst = new(YCbCr)
}
err := json.Unmarshal(c.Point, dst)
if err != nil {
log.Fatalln("error:", err)
}
fmt.Println(c.Space, dst)
}
// Output:
// YCbCr &{255 0 -10}
// RGB &{98 218 255}
}
func ExampleIndent() {
type Road struct {
Name string
Number int
}
roads := []Road{
{"Diamond Fork", 29},
{"Sheep Creek", 51},
}
b, err := json.Marshal(roads)
if err != nil {
log.Fatal(err)
}
var out bytes.Buffer
json.Indent(&out, b, "=", "\t")
out.WriteTo(os.Stdout)
// Output:
// [
// = {
// = "Name": "Diamond Fork",
// = "Number": 29
// = },
// = {
// = "Name": "Sheep Creek",
// = "Number": 51
// = }
// =]
}

View File

@ -1,95 +0,0 @@
package json
import (
"reflect"
)
// Extension holds a set of additional rules to be used when unmarshaling
// strict JSON or JSON-like content.
type Extension struct {
funcs map[string]funcExt
consts map[string]interface{}
keyed map[string]func([]byte) (interface{}, error)
encode map[reflect.Type]func(v interface{}) ([]byte, error)
unquotedKeys bool
trailingCommas bool
}
type funcExt struct {
key string
args []string
}
// Extend changes the decoder behavior to consider the provided extension.
func (dec *Decoder) Extend(ext *Extension) { dec.d.ext = *ext }
// Extend changes the encoder behavior to consider the provided extension.
func (enc *Encoder) Extend(ext *Extension) { enc.ext = *ext }
// Extend includes in e the extensions defined in ext.
func (e *Extension) Extend(ext *Extension) {
for name, fext := range ext.funcs {
e.DecodeFunc(name, fext.key, fext.args...)
}
for name, value := range ext.consts {
e.DecodeConst(name, value)
}
for key, decode := range ext.keyed {
e.DecodeKeyed(key, decode)
}
for typ, encode := range ext.encode {
if e.encode == nil {
e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
}
e.encode[typ] = encode
}
}
// DecodeFunc defines a function call that may be observed inside JSON content.
// A function with the provided name will be unmarshaled as the document
// {key: {args[0]: ..., args[N]: ...}}.
func (e *Extension) DecodeFunc(name string, key string, args ...string) {
if e.funcs == nil {
e.funcs = make(map[string]funcExt)
}
e.funcs[name] = funcExt{key, args}
}
// DecodeConst defines a constant name that may be observed inside JSON content
// and will be decoded with the provided value.
func (e *Extension) DecodeConst(name string, value interface{}) {
if e.consts == nil {
e.consts = make(map[string]interface{})
}
e.consts[name] = value
}
// DecodeKeyed defines a key that when observed as the first element inside a
// JSON document triggers the decoding of that document via the provided
// decode function.
func (e *Extension) DecodeKeyed(key string, decode func(data []byte) (interface{}, error)) {
if e.keyed == nil {
e.keyed = make(map[string]func([]byte) (interface{}, error))
}
e.keyed[key] = decode
}
// DecodeUnquotedKeys defines whether to accept map keys that are unquoted strings.
func (e *Extension) DecodeUnquotedKeys(accept bool) {
e.unquotedKeys = accept
}
// DecodeTrailingCommas defines whether to accept trailing commas in maps and arrays.
func (e *Extension) DecodeTrailingCommas(accept bool) {
e.trailingCommas = accept
}
// EncodeType registers a function to encode values with the same type of the
// provided sample.
func (e *Extension) EncodeType(sample interface{}, encode func(v interface{}) ([]byte, error)) {
if e.encode == nil {
e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
}
e.encode[reflect.TypeOf(sample)] = encode
}

View File

@ -1,218 +0,0 @@
package json
import (
"bytes"
"fmt"
"reflect"
"strconv"
"testing"
)
type funcN struct {
Arg1 int `json:"arg1"`
Arg2 int `json:"arg2"`
}
type funcs struct {
Func2 *funcN `json:"$func2"`
Func1 *funcN `json:"$func1"`
}
type funcsText struct {
Func1 jsonText `json:"$func1"`
Func2 jsonText `json:"$func2"`
}
type jsonText struct {
json string
}
func (jt *jsonText) UnmarshalJSON(data []byte) error {
jt.json = string(data)
return nil
}
type nestedText struct {
F jsonText
B bool
}
type unquotedKey struct {
S string `json:"$k_1"`
}
var ext Extension
type keyed string
func decodeKeyed(data []byte) (interface{}, error) {
return keyed(data), nil
}
type keyedType struct {
K keyed
I int
}
type docint int
type const1Type struct{}
var const1 = new(const1Type)
func init() {
ext.DecodeFunc("Func1", "$func1")
ext.DecodeFunc("Func2", "$func2", "arg1", "arg2")
ext.DecodeFunc("Func3", "$func3", "arg1")
ext.DecodeFunc("new Func4", "$func4", "arg1")
ext.DecodeConst("Const1", const1)
ext.DecodeKeyed("$key1", decodeKeyed)
ext.DecodeKeyed("$func3", decodeKeyed)
ext.EncodeType(docint(0), func(v interface{}) ([]byte, error) {
s := `{"$docint": ` + strconv.Itoa(int(v.(docint))) + `}`
return []byte(s), nil
})
ext.DecodeUnquotedKeys(true)
ext.DecodeTrailingCommas(true)
}
type extDecodeTest struct {
in string
ptr interface{}
out interface{}
err error
noext bool
}
var extDecodeTests = []extDecodeTest{
// Functions
{in: `Func1()`, ptr: new(interface{}), out: map[string]interface{}{
"$func1": map[string]interface{}{},
}},
{in: `{"v": Func1()}`, ptr: new(interface{}), out: map[string]interface{}{
"v": map[string]interface{}{"$func1": map[string]interface{}{}},
}},
{in: `Func2(1)`, ptr: new(interface{}), out: map[string]interface{}{
"$func2": map[string]interface{}{"arg1": float64(1)},
}},
{in: `Func2(1, 2)`, ptr: new(interface{}), out: map[string]interface{}{
"$func2": map[string]interface{}{"arg1": float64(1), "arg2": float64(2)},
}},
{in: `Func2(Func1())`, ptr: new(interface{}), out: map[string]interface{}{
"$func2": map[string]interface{}{"arg1": map[string]interface{}{"$func1": map[string]interface{}{}}},
}},
{in: `Func2(1, 2, 3)`, ptr: new(interface{}), err: fmt.Errorf("json: too many arguments for function Func2")},
{in: `BadFunc()`, ptr: new(interface{}), err: fmt.Errorf(`json: unknown function "BadFunc"`)},
{in: `Func1()`, ptr: new(funcs), out: funcs{Func1: &funcN{}}},
{in: `Func2(1)`, ptr: new(funcs), out: funcs{Func2: &funcN{Arg1: 1}}},
{in: `Func2(1, 2)`, ptr: new(funcs), out: funcs{Func2: &funcN{Arg1: 1, Arg2: 2}}},
{in: `Func2(1, 2, 3)`, ptr: new(funcs), err: fmt.Errorf("json: too many arguments for function Func2")},
{in: `BadFunc()`, ptr: new(funcs), err: fmt.Errorf(`json: unknown function "BadFunc"`)},
{in: `Func2(1)`, ptr: new(jsonText), out: jsonText{"Func2(1)"}},
{in: `Func2(1, 2)`, ptr: new(funcsText), out: funcsText{Func2: jsonText{"Func2(1, 2)"}}},
{in: `{"f": Func2(1, 2), "b": true}`, ptr: new(nestedText), out: nestedText{jsonText{"Func2(1, 2)"}, true}},
{in: `Func1()`, ptr: new(struct{}), out: struct{}{}},
// Functions with "new" prefix
{in: `new Func4(1)`, ptr: new(interface{}), out: map[string]interface{}{
"$func4": map[string]interface{}{"arg1": float64(1)},
}},
// Constants
{in: `Const1`, ptr: new(interface{}), out: const1},
{in: `{"c": Const1}`, ptr: new(struct{ C *const1Type }), out: struct{ C *const1Type }{const1}},
// Keyed documents
{in: `{"v": {"$key1": 1}}`, ptr: new(interface{}), out: map[string]interface{}{"v": keyed(`{"$key1": 1}`)}},
{in: `{"k": {"$key1": 1}}`, ptr: new(keyedType), out: keyedType{K: keyed(`{"$key1": 1}`)}},
{in: `{"i": {"$key1": 1}}`, ptr: new(keyedType), err: &UnmarshalTypeError{"object", reflect.TypeOf(0), 18}},
// Keyed function documents
{in: `{"v": Func3()}`, ptr: new(interface{}), out: map[string]interface{}{"v": keyed(`Func3()`)}},
{in: `{"k": Func3()}`, ptr: new(keyedType), out: keyedType{K: keyed(`Func3()`)}},
{in: `{"i": Func3()}`, ptr: new(keyedType), err: &UnmarshalTypeError{"object", reflect.TypeOf(0), 13}},
// Unquoted keys
{in: `{$k_1: "bar"}`, ptr: new(interface{}), out: map[string]interface{}{"$k_1": "bar"}},
{in: `{$k_1: "bar"}`, ptr: new(unquotedKey), out: unquotedKey{"bar"}},
{in: `{$k_1: "bar"}`, noext: true, ptr: new(interface{}),
err: &SyntaxError{"invalid character '$' looking for beginning of object key string", 2}},
{in: `{$k_1: "bar"}`, noext: true, ptr: new(unquotedKey),
err: &SyntaxError{"invalid character '$' looking for beginning of object key string", 2}},
// Trailing commas
{in: `{"k": "v",}`, ptr: new(interface{}), out: map[string]interface{}{"k": "v"}},
{in: `{"k": "v",}`, ptr: new(struct{}), out: struct{}{}},
{in: `["v",]`, ptr: new(interface{}), out: []interface{}{"v"}},
{in: `{"k": "v",}`, noext: true, ptr: new(interface{}),
err: &SyntaxError{"invalid character '}' looking for beginning of object key string", 11}},
{in: `{"k": "v",}`, noext: true, ptr: new(struct{}),
err: &SyntaxError{"invalid character '}' looking for beginning of object key string", 11}},
{in: `["a",]`, noext: true, ptr: new(interface{}),
err: &SyntaxError{"invalid character ']' looking for beginning of value", 6}},
}
type extEncodeTest struct {
in interface{}
out string
err error
}
var extEncodeTests = []extEncodeTest{
{in: docint(13), out: "{\"$docint\":13}\n"},
}
func TestExtensionDecode(t *testing.T) {
for i, tt := range extDecodeTests {
in := []byte(tt.in)
// v = new(right-type)
v := reflect.New(reflect.TypeOf(tt.ptr).Elem())
dec := NewDecoder(bytes.NewReader(in))
if !tt.noext {
dec.Extend(&ext)
}
if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) {
t.Errorf("#%d: %v, want %v", i, err, tt.err)
continue
} else if err != nil {
continue
}
if !reflect.DeepEqual(v.Elem().Interface(), tt.out) {
t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out)
data, _ := Marshal(v.Elem().Interface())
t.Logf("%s", string(data))
data, _ = Marshal(tt.out)
t.Logf("%s", string(data))
continue
}
}
}
func TestExtensionEncode(t *testing.T) {
var buf bytes.Buffer
for i, tt := range extEncodeTests {
buf.Truncate(0)
enc := NewEncoder(&buf)
enc.Extend(&ext)
err := enc.Encode(tt.in)
if !reflect.DeepEqual(err, tt.err) {
t.Errorf("#%d: %v, want %v", i, err, tt.err)
continue
}
if buf.String() != tt.out {
t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, buf.String(), tt.out)
}
}
}

View File

@ -1,143 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"unicode/utf8"
)
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See https://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}

View File

@ -1,116 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"strings"
"testing"
"unicode/utf8"
)
var foldTests = []struct {
fn func(s, t []byte) bool
s, t string
want bool
}{
{equalFoldRight, "", "", true},
{equalFoldRight, "a", "a", true},
{equalFoldRight, "", "a", false},
{equalFoldRight, "a", "", false},
{equalFoldRight, "a", "A", true},
{equalFoldRight, "AB", "ab", true},
{equalFoldRight, "AB", "ac", false},
{equalFoldRight, "sbkKc", "ſbKc", true},
{equalFoldRight, "SbKkc", "ſbKc", true},
{equalFoldRight, "SbKkc", "ſbKK", false},
{equalFoldRight, "e", "é", false},
{equalFoldRight, "s", "S", true},
{simpleLetterEqualFold, "", "", true},
{simpleLetterEqualFold, "abc", "abc", true},
{simpleLetterEqualFold, "abc", "ABC", true},
{simpleLetterEqualFold, "abc", "ABCD", false},
{simpleLetterEqualFold, "abc", "xxx", false},
{asciiEqualFold, "a_B", "A_b", true},
{asciiEqualFold, "aa@", "aa`", false}, // verify 0x40 and 0x60 aren't case-equivalent
}
func TestFold(t *testing.T) {
for i, tt := range foldTests {
if got := tt.fn([]byte(tt.s), []byte(tt.t)); got != tt.want {
t.Errorf("%d. %q, %q = %v; want %v", i, tt.s, tt.t, got, tt.want)
}
truth := strings.EqualFold(tt.s, tt.t)
if truth != tt.want {
t.Errorf("strings.EqualFold doesn't agree with case %d", i)
}
}
}
func TestFoldAgainstUnicode(t *testing.T) {
const bufSize = 5
buf1 := make([]byte, 0, bufSize)
buf2 := make([]byte, 0, bufSize)
var runes []rune
for i := 0x20; i <= 0x7f; i++ {
runes = append(runes, rune(i))
}
runes = append(runes, kelvin, smallLongEss)
funcs := []struct {
name string
fold func(s, t []byte) bool
letter bool // must be ASCII letter
simple bool // must be simple ASCII letter (not 'S' or 'K')
}{
{
name: "equalFoldRight",
fold: equalFoldRight,
},
{
name: "asciiEqualFold",
fold: asciiEqualFold,
simple: true,
},
{
name: "simpleLetterEqualFold",
fold: simpleLetterEqualFold,
simple: true,
letter: true,
},
}
for _, ff := range funcs {
for _, r := range runes {
if r >= utf8.RuneSelf {
continue
}
if ff.letter && !isASCIILetter(byte(r)) {
continue
}
if ff.simple && (r == 's' || r == 'S' || r == 'k' || r == 'K') {
continue
}
for _, r2 := range runes {
buf1 := append(buf1[:0], 'x')
buf2 := append(buf2[:0], 'x')
buf1 = buf1[:1+utf8.EncodeRune(buf1[1:bufSize], r)]
buf2 = buf2[:1+utf8.EncodeRune(buf2[1:bufSize], r2)]
buf1 = append(buf1, 'x')
buf2 = append(buf2, 'x')
want := bytes.EqualFold(buf1, buf2)
if got := ff.fold(buf1, buf2); got != want {
t.Errorf("%s(%q, %q) = %v; want %v", ff.name, buf1, buf2, got, want)
}
}
}
}
}
func isASCIILetter(b byte) bool {
return ('A' <= b && b <= 'Z') || ('a' <= b && b <= 'z')
}

View File

@ -1,141 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import "bytes"
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
var scan scanner
scan.reset()
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(&scan, c)
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst.Write(src[start:i])
}
start = i + 1
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
if start < len(src) {
dst.Write(src[start:])
}
return nil
}
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
dst.WriteByte('\n')
dst.WriteString(prefix)
for i := 0; i < depth; i++ {
dst.WriteString(indent)
}
}
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst does not begin with the prefix nor
// any indentation, to make it easier to embed inside other formatted JSON data.
// Although leading space characters (space, tab, carriage return, newline)
// at the beginning of src are dropped, trailing space characters
// at the end of src are preserved and copied to dst.
// For example, if src has no trailing spaces, neither will dst;
// if src ends in a trailing newline, so will dst.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
origLen := dst.Len()
var scan scanner
scan.reset()
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(&scan, c)
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
newline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst.WriteByte(c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst.WriteByte(c)
case ',':
dst.WriteByte(c)
newline(dst, prefix, indent, depth)
case ':':
dst.WriteByte(c)
dst.WriteByte(' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
newline(dst, prefix, indent, depth)
}
dst.WriteByte(c)
default:
dst.WriteByte(c)
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
return nil
}

View File

@ -1,133 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"regexp"
"testing"
)
func TestNumberIsValid(t *testing.T) {
// From: http://stackoverflow.com/a/13340826
var jsonNumberRegexp = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`)
validTests := []string{
"0",
"-0",
"1",
"-1",
"0.1",
"-0.1",
"1234",
"-1234",
"12.34",
"-12.34",
"12E0",
"12E1",
"12e34",
"12E-0",
"12e+1",
"12e-34",
"-12E0",
"-12E1",
"-12e34",
"-12E-0",
"-12e+1",
"-12e-34",
"1.2E0",
"1.2E1",
"1.2e34",
"1.2E-0",
"1.2e+1",
"1.2e-34",
"-1.2E0",
"-1.2E1",
"-1.2e34",
"-1.2E-0",
"-1.2e+1",
"-1.2e-34",
"0E0",
"0E1",
"0e34",
"0E-0",
"0e+1",
"0e-34",
"-0E0",
"-0E1",
"-0e34",
"-0E-0",
"-0e+1",
"-0e-34",
}
for _, test := range validTests {
if !isValidNumber(test) {
t.Errorf("%s should be valid", test)
}
var f float64
if err := Unmarshal([]byte(test), &f); err != nil {
t.Errorf("%s should be valid but Unmarshal failed: %v", test, err)
}
if !jsonNumberRegexp.MatchString(test) {
t.Errorf("%s should be valid but regexp does not match", test)
}
}
invalidTests := []string{
"",
"invalid",
"1.0.1",
"1..1",
"-1-2",
"012a42",
"01.2",
"012",
"12E12.12",
"1e2e3",
"1e+-2",
"1e--23",
"1e",
"e1",
"1e+",
"1ea",
"1a",
"1.a",
"1.",
"01",
"1.e1",
}
for _, test := range invalidTests {
if isValidNumber(test) {
t.Errorf("%s should be invalid", test)
}
var f float64
if err := Unmarshal([]byte(test), &f); err == nil {
t.Errorf("%s should be invalid but unmarshal wrote %v", test, f)
}
if jsonNumberRegexp.MatchString(test) {
t.Errorf("%s should be invalid but matches regexp", test)
}
}
}
func BenchmarkNumberIsValid(b *testing.B) {
s := "-61657.61667E+61673"
for i := 0; i < b.N; i++ {
isValidNumber(s)
}
}
func BenchmarkNumberIsValidRegexp(b *testing.B) {
var jsonNumberRegexp = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`)
s := "-61657.61667E+61673"
for i := 0; i < b.N; i++ {
jsonNumberRegexp.MatchString(s)
}
}

View File

@ -1,697 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, nextValue, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import "strconv"
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, c) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// nextValue splits data after the next whole JSON value,
// returning that value and the bytes that follow it as separate slices.
// scan is passed in for use by nextValue to avoid an allocation.
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
scan.reset()
for i, c := range data {
v := scan.step(scan, c)
if v >= scanEndObject {
switch v {
// probe the scanner with a space to determine whether we will
// get scanEnd on the next character. Otherwise, if the next character
// is not a space, scanEndTop allocates a needless error.
case scanEndObject, scanEndArray, scanEndParams:
if scan.step(scan, ' ') == scanEnd {
return data[:i+1], data[i+1:], nil
}
case scanError:
return nil, nil, scan.err
case scanEnd:
return data[:i], data[i:], nil
}
}
}
if scan.eof() == scanError {
return nil, nil, scan.err
}
return data, nil, nil
}
// A SyntaxError is a description of a JSON syntax error.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset() and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, byte) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// 1-byte redo (see undo method)
redo bool
redoCode int
redoState func(*scanner, byte) int
// total bytes consumed, updated by decoder.Decode
bytes int64
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanBeginName // begin function call
scanParam // begin function argument
scanEndParams // end function call
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
parseName // parsing unquoted name
parseParam // parsing function argument value
)
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.redo = false
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state p onto the parse stack.
func (s *scanner) pushParseState(p int) {
s.parseState = append(s.parseState, p)
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
s.redo = false
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c byte) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
s.pushParseState(parseObjectKey)
return scanBeginObject
case '[':
s.step = stateBeginValueOrEmpty
s.pushParseState(parseArrayValue)
return scanBeginArray
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 'n':
s.step = stateNew0
return scanBeginName
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
if isName(c) {
s.step = stateName
return scanBeginName
}
return s.error(c, "looking for beginning of value")
}
func isName(c byte) bool {
return c == '$' || c == '_' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9'
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
if isName(c) {
s.step = stateName
return scanBeginName
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c byte) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if c <= ' ' && isSpace(c) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginStringOrEmpty
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValueOrEmpty
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
case parseParam:
if c == ',' {
s.step = stateBeginValue
return scanParam
}
if c == ')' {
s.popParseState()
return scanEndParams
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c byte) int {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c byte) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c byte) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
case 'u':
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c byte) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c byte) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c byte) int {
if c == '+' || c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
return stateEndValue(s, c)
}
// stateNew0 is the state after reading `n`.
func stateNew0(s *scanner, c byte) int {
if c == 'e' {
s.step = stateNew1
return scanContinue
}
s.step = stateName
return stateName(s, c)
}
// stateNew1 is the state after reading `ne`.
func stateNew1(s *scanner, c byte) int {
if c == 'w' {
s.step = stateNew2
return scanContinue
}
s.step = stateName
return stateName(s, c)
}
// stateNew2 is the state after reading `new`.
func stateNew2(s *scanner, c byte) int {
s.step = stateName
if c == ' ' {
return scanContinue
}
return stateName(s, c)
}
// stateName is the state while reading an unquoted function name.
func stateName(s *scanner, c byte) int {
if isName(c) {
return scanContinue
}
if c == '(' {
s.step = stateParamOrEmpty
s.pushParseState(parseParam)
return scanParam
}
return stateEndValue(s, c)
}
// stateParamOrEmpty is the state after reading `(`.
func stateParamOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == ')' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c byte) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c byte) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c byte) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c byte) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c byte) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c byte) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c byte) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c byte) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c byte) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c byte, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal
func quoteChar(c byte) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}
// undo causes the scanner to return scanCode from the next state transition.
// This gives callers a simple 1-byte undo mechanism.
func (s *scanner) undo(scanCode int) {
if s.redo {
panic("json: invalid use of scanner")
}
s.redoCode = scanCode
s.redoState = s.step
s.step = stateRedo
s.redo = true
}
// stateRedo helps implement the scanner's 1-byte undo.
func stateRedo(s *scanner, c byte) int {
s.redo = false
s.step = s.redoState
return s.redoCode
}

View File

@ -1,316 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"math"
"math/rand"
"reflect"
"testing"
)
// Tests of simple examples.
type example struct {
compact string
indent string
}
var examples = []example{
{`1`, `1`},
{`{}`, `{}`},
{`[]`, `[]`},
{`{"":2}`, "{\n\t\"\": 2\n}"},
{`[3]`, "[\n\t3\n]"},
{`[1,2,3]`, "[\n\t1,\n\t2,\n\t3\n]"},
{`{"x":1}`, "{\n\t\"x\": 1\n}"},
{ex1, ex1i},
}
var ex1 = `[true,false,null,"x",1,1.5,0,-5e+2]`
var ex1i = `[
true,
false,
null,
"x",
1,
1.5,
0,
-5e+2
]`
func TestCompact(t *testing.T) {
var buf bytes.Buffer
for _, tt := range examples {
buf.Reset()
if err := Compact(&buf, []byte(tt.compact)); err != nil {
t.Errorf("Compact(%#q): %v", tt.compact, err)
} else if s := buf.String(); s != tt.compact {
t.Errorf("Compact(%#q) = %#q, want original", tt.compact, s)
}
buf.Reset()
if err := Compact(&buf, []byte(tt.indent)); err != nil {
t.Errorf("Compact(%#q): %v", tt.indent, err)
continue
} else if s := buf.String(); s != tt.compact {
t.Errorf("Compact(%#q) = %#q, want %#q", tt.indent, s, tt.compact)
}
}
}
func TestCompactSeparators(t *testing.T) {
// U+2028 and U+2029 should be escaped inside strings.
// They should not appear outside strings.
tests := []struct {
in, compact string
}{
{"{\"\u2028\": 1}", `{"\u2028":1}`},
{"{\"\u2029\" :2}", `{"\u2029":2}`},
}
for _, tt := range tests {
var buf bytes.Buffer
if err := Compact(&buf, []byte(tt.in)); err != nil {
t.Errorf("Compact(%q): %v", tt.in, err)
} else if s := buf.String(); s != tt.compact {
t.Errorf("Compact(%q) = %q, want %q", tt.in, s, tt.compact)
}
}
}
func TestIndent(t *testing.T) {
var buf bytes.Buffer
for _, tt := range examples {
buf.Reset()
if err := Indent(&buf, []byte(tt.indent), "", "\t"); err != nil {
t.Errorf("Indent(%#q): %v", tt.indent, err)
} else if s := buf.String(); s != tt.indent {
t.Errorf("Indent(%#q) = %#q, want original", tt.indent, s)
}
buf.Reset()
if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
t.Errorf("Indent(%#q): %v", tt.compact, err)
continue
} else if s := buf.String(); s != tt.indent {
t.Errorf("Indent(%#q) = %#q, want %#q", tt.compact, s, tt.indent)
}
}
}
// Tests of a large random structure.
func TestCompactBig(t *testing.T) {
initBig()
var buf bytes.Buffer
if err := Compact(&buf, jsonBig); err != nil {
t.Fatalf("Compact: %v", err)
}
b := buf.Bytes()
if !bytes.Equal(b, jsonBig) {
t.Error("Compact(jsonBig) != jsonBig")
diff(t, b, jsonBig)
return
}
}
func TestIndentBig(t *testing.T) {
initBig()
var buf bytes.Buffer
if err := Indent(&buf, jsonBig, "", "\t"); err != nil {
t.Fatalf("Indent1: %v", err)
}
b := buf.Bytes()
if len(b) == len(jsonBig) {
// jsonBig is compact (no unnecessary spaces);
// indenting should make it bigger
t.Fatalf("Indent(jsonBig) did not get bigger")
}
// should be idempotent
var buf1 bytes.Buffer
if err := Indent(&buf1, b, "", "\t"); err != nil {
t.Fatalf("Indent2: %v", err)
}
b1 := buf1.Bytes()
if !bytes.Equal(b1, b) {
t.Error("Indent(Indent(jsonBig)) != Indent(jsonBig)")
diff(t, b1, b)
return
}
// should get back to original
buf1.Reset()
if err := Compact(&buf1, b); err != nil {
t.Fatalf("Compact: %v", err)
}
b1 = buf1.Bytes()
if !bytes.Equal(b1, jsonBig) {
t.Error("Compact(Indent(jsonBig)) != jsonBig")
diff(t, b1, jsonBig)
return
}
}
type indentErrorTest struct {
in string
err error
}
var indentErrorTests = []indentErrorTest{
{`{"X": "foo", "Y"}`, &SyntaxError{"invalid character '}' after object key", 17}},
{`{"X": "foo" "Y": "bar"}`, &SyntaxError{"invalid character '\"' after object key:value pair", 13}},
}
func TestIndentErrors(t *testing.T) {
for i, tt := range indentErrorTests {
slice := make([]uint8, 0)
buf := bytes.NewBuffer(slice)
if err := Indent(buf, []uint8(tt.in), "", ""); err != nil {
if !reflect.DeepEqual(err, tt.err) {
t.Errorf("#%d: Indent: %#v", i, err)
continue
}
}
}
}
func TestNextValueBig(t *testing.T) {
initBig()
var scan scanner
item, rest, err := nextValue(jsonBig, &scan)
if err != nil {
t.Fatalf("nextValue: %s", err)
}
if len(item) != len(jsonBig) || &item[0] != &jsonBig[0] {
t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
}
if len(rest) != 0 {
t.Errorf("invalid rest: %d", len(rest))
}
item, rest, err = nextValue(append(jsonBig, "HELLO WORLD"...), &scan)
if err != nil {
t.Fatalf("nextValue extra: %s", err)
}
if len(item) != len(jsonBig) {
t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
}
if string(rest) != "HELLO WORLD" {
t.Errorf("invalid rest: %d", len(rest))
}
}
var benchScan scanner
func BenchmarkSkipValue(b *testing.B) {
initBig()
b.ResetTimer()
for i := 0; i < b.N; i++ {
nextValue(jsonBig, &benchScan)
}
b.SetBytes(int64(len(jsonBig)))
}
func diff(t *testing.T, a, b []byte) {
for i := 0; ; i++ {
if i >= len(a) || i >= len(b) || a[i] != b[i] {
j := i - 10
if j < 0 {
j = 0
}
t.Errorf("diverge at %d: «%s» vs «%s»", i, trim(a[j:]), trim(b[j:]))
return
}
}
}
func trim(b []byte) []byte {
if len(b) > 20 {
return b[0:20]
}
return b
}
// Generate a random JSON object.
var jsonBig []byte
func initBig() {
n := 10000
if testing.Short() {
n = 100
}
b, err := Marshal(genValue(n))
if err != nil {
panic(err)
}
jsonBig = b
}
func genValue(n int) interface{} {
if n > 1 {
switch rand.Intn(2) {
case 0:
return genArray(n)
case 1:
return genMap(n)
}
}
switch rand.Intn(3) {
case 0:
return rand.Intn(2) == 0
case 1:
return rand.NormFloat64()
case 2:
return genString(30)
}
panic("unreachable")
}
func genString(stddev float64) string {
n := int(math.Abs(rand.NormFloat64()*stddev + stddev/2))
c := make([]rune, n)
for i := range c {
f := math.Abs(rand.NormFloat64()*64 + 32)
if f > 0x10ffff {
f = 0x10ffff
}
c[i] = rune(f)
}
return string(c)
}
func genArray(n int) []interface{} {
f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
if f > n {
f = n
}
if f < 1 {
f = 1
}
x := make([]interface{}, f)
for i := range x {
x[i] = genValue(((i+1)*n)/f - (i*n)/f)
}
return x
}
func genMap(n int) map[string]interface{} {
f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
if f > n {
f = n
}
if n > 0 && f == 0 {
f = 1
}
x := make(map[string]interface{})
for i := 0; i < f; i++ {
x[genString(10)] = genValue(((i+1)*n)/f - (i*n)/f)
}
return x
}

View File

@ -1,510 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"errors"
"io"
)
// A Decoder reads and decodes JSON values from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scanp int // start of unread data in buf
scan scanner
err error
tokenState int
tokenStack []int
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v interface{}) error {
if dec.err != nil {
return dec.err
}
if err := dec.tokenPrepareForDecode(); err != nil {
return err
}
if !dec.tokenValueAllowed() {
return &SyntaxError{msg: "not at beginning of value"}
}
// Read whole value into buffer.
n, err := dec.readValue()
if err != nil {
return err
}
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
dec.scanp += n
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
err = dec.d.unmarshal(v)
// fixup token streaming state
dec.tokenValueEnd()
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to Decode.
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf[dec.scanp:])
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := dec.scanp
var err error
Input:
for {
// Look in the buffer for a new value.
for i, c := range dec.buf[scanp:] {
dec.scan.bytes++
v := dec.scan.step(&dec.scan, c)
if v == scanEnd {
scanp += i
break Input
}
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
scanp += i + 1
break Input
}
if v == scanError {
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
scanp = len(dec.buf)
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
n := scanp - dec.scanp
err = dec.refill()
scanp = dec.scanp + n
}
return scanp - dec.scanp, nil
}
func (dec *Decoder) refill() error {
// Make room to read more into the buffer.
// First slide down data already consumed.
if dec.scanp > 0 {
n := copy(dec.buf, dec.buf[dec.scanp:])
dec.buf = dec.buf[:n]
dec.scanp = 0
}
// Grow buffer if not large enough.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
return err
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(c) {
return true
}
}
return false
}
// An Encoder writes JSON values to an output stream.
type Encoder struct {
w io.Writer
err error
escapeHTML bool
indentBuf *bytes.Buffer
indentPrefix string
indentValue string
ext Extension
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w, escapeHTML: true}
}
// Encode writes the JSON encoding of v to the stream,
// followed by a newline character.
//
// See the documentation for Marshal for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v interface{}) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState()
e.ext = enc.ext
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
if err != nil {
return err
}
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
b := e.Bytes()
if enc.indentBuf != nil {
enc.indentBuf.Reset()
err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
if err != nil {
return err
}
b = enc.indentBuf.Bytes()
}
if _, err = enc.w.Write(b); err != nil {
enc.err = err
}
encodeStatePool.Put(e)
return err
}
// Indent sets the encoder to format each encoded value with Indent.
func (enc *Encoder) Indent(prefix, indent string) {
enc.indentBuf = new(bytes.Buffer)
enc.indentPrefix = prefix
enc.indentValue = indent
}
// DisableHTMLEscaping causes the encoder not to escape angle brackets
// ("<" and ">") or ampersands ("&") in JSON strings.
func (enc *Encoder) DisableHTMLEscaping() {
enc.escapeHTML = false
}
// RawMessage is a raw encoded JSON value.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte
// MarshalJSON returns *m as the JSON encoding of m.
func (m *RawMessage) MarshalJSON() ([]byte, error) {
return *m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// A Token holds a value of one of these types:
//
// Delim, for the four JSON delimiters [ ] { }
// bool, for JSON booleans
// float64, for JSON numbers
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
//
type Token interface{}
const (
tokenTopValue = iota
tokenArrayStart
tokenArrayValue
tokenArrayComma
tokenObjectStart
tokenObjectKey
tokenObjectColon
tokenObjectValue
tokenObjectComma
)
// advance tokenstate from a separator state to a value state
func (dec *Decoder) tokenPrepareForDecode() error {
// Note: Not calling peek before switch, to avoid
// putting peek into the standard Decode path.
// peek is only called when using the Token API.
switch dec.tokenState {
case tokenArrayComma:
c, err := dec.peek()
if err != nil {
return err
}
if c != ',' {
return &SyntaxError{"expected comma after array element", 0}
}
dec.scanp++
dec.tokenState = tokenArrayValue
case tokenObjectColon:
c, err := dec.peek()
if err != nil {
return err
}
if c != ':' {
return &SyntaxError{"expected colon after object key", 0}
}
dec.scanp++
dec.tokenState = tokenObjectValue
}
return nil
}
func (dec *Decoder) tokenValueAllowed() bool {
switch dec.tokenState {
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
return true
}
return false
}
func (dec *Decoder) tokenValueEnd() {
switch dec.tokenState {
case tokenArrayStart, tokenArrayValue:
dec.tokenState = tokenArrayComma
case tokenObjectValue:
dec.tokenState = tokenObjectComma
}
}
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
type Delim rune
func (d Delim) String() string {
return string(d)
}
// Token returns the next JSON token in the input stream.
// At the end of the input stream, Token returns nil, io.EOF.
//
// Token guarantees that the delimiters [ ] { } it returns are
// properly nested and matched: if Token encounters an unexpected
// delimiter in the input, it will return an error.
//
// The input stream consists of basic JSON values—bool, string,
// number, and null—along with delimiters [ ] { } of type Delim
// to mark the start and end of arrays and objects.
// Commas and colons are elided.
func (dec *Decoder) Token() (Token, error) {
for {
c, err := dec.peek()
if err != nil {
return nil, err
}
switch c {
case '[':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenArrayStart
return Delim('['), nil
case ']':
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim(']'), nil
case '{':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenObjectStart
return Delim('{'), nil
case '}':
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim('}'), nil
case ':':
if dec.tokenState != tokenObjectColon {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = tokenObjectValue
continue
case ',':
if dec.tokenState == tokenArrayComma {
dec.scanp++
dec.tokenState = tokenArrayValue
continue
}
if dec.tokenState == tokenObjectComma {
dec.scanp++
dec.tokenState = tokenObjectKey
continue
}
return dec.tokenError(c)
case '"':
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
var x string
old := dec.tokenState
dec.tokenState = tokenTopValue
err := dec.Decode(&x)
dec.tokenState = old
if err != nil {
clearOffset(err)
return nil, err
}
dec.tokenState = tokenObjectColon
return x, nil
}
fallthrough
default:
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
var x interface{}
if err := dec.Decode(&x); err != nil {
clearOffset(err)
return nil, err
}
return x, nil
}
}
}
func clearOffset(err error) {
if s, ok := err.(*SyntaxError); ok {
s.Offset = 0
}
}
func (dec *Decoder) tokenError(c byte) (Token, error) {
var context string
switch dec.tokenState {
case tokenTopValue:
context = " looking for beginning of value"
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
context = " looking for beginning of value"
case tokenArrayComma:
context = " after array element"
case tokenObjectKey:
context = " looking for beginning of object key string"
case tokenObjectColon:
context = " after object key"
case tokenObjectComma:
context = " after object key:value pair"
}
return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
}
// More reports whether there is another element in the
// current array or object being parsed.
func (dec *Decoder) More() bool {
c, err := dec.peek()
return err == nil && c != ']' && c != '}'
}
func (dec *Decoder) peek() (byte, error) {
var err error
for {
for i := dec.scanp; i < len(dec.buf); i++ {
c := dec.buf[i]
if isSpace(c) {
continue
}
dec.scanp = i
return c, nil
}
// buffer has been scanned, now report any error
if err != nil {
return 0, err
}
err = dec.refill()
}
}
/*
TODO
// EncodeToken writes the given JSON token to the stream.
// It returns an error if the delimiters [ ] { } are not properly used.
//
// EncodeToken does not call Flush, because usually it is part of
// a larger operation such as Encode, and those will call Flush when finished.
// Callers that create an Encoder and then invoke EncodeToken directly,
// without using Encode, need to call Flush when finished to ensure that
// the JSON is written to the underlying writer.
func (e *Encoder) EncodeToken(t Token) error {
...
}
*/

View File

@ -1,418 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
)
// Test values for the stream test.
// One of each JSON kind.
var streamTest = []interface{}{
0.1,
"hello",
nil,
true,
false,
[]interface{}{"a", "b", "c"},
map[string]interface{}{"": "Kelvin", "ß": "long s"},
3.14, // another value to make sure something can follow map
}
var streamEncoded = `0.1
"hello"
null
true
false
["a","b","c"]
{"ß":"long s","":"Kelvin"}
3.14
`
func TestEncoder(t *testing.T) {
for i := 0; i <= len(streamTest); i++ {
var buf bytes.Buffer
enc := NewEncoder(&buf)
for j, v := range streamTest[0:i] {
if err := enc.Encode(v); err != nil {
t.Fatalf("encode #%d: %v", j, err)
}
}
if have, want := buf.String(), nlines(streamEncoded, i); have != want {
t.Errorf("encoding %d items: mismatch", i)
diff(t, []byte(have), []byte(want))
break
}
}
}
var streamEncodedIndent = `0.1
"hello"
null
true
false
[
>."a",
>."b",
>."c"
>]
{
>."ß": "long s",
>."": "Kelvin"
>}
3.14
`
func TestEncoderIndent(t *testing.T) {
var buf bytes.Buffer
enc := NewEncoder(&buf)
enc.Indent(">", ".")
for _, v := range streamTest {
enc.Encode(v)
}
if have, want := buf.String(), streamEncodedIndent; have != want {
t.Error("indented encoding mismatch")
diff(t, []byte(have), []byte(want))
}
}
func TestEncoderDisableHTMLEscaping(t *testing.T) {
var c C
var ct CText
for _, tt := range []struct {
name string
v interface{}
wantEscape string
want string
}{
{"c", c, `"\u003c\u0026\u003e"`, `"<&>"`},
{"ct", ct, `"\"\u003c\u0026\u003e\""`, `"\"<&>\""`},
{`"<&>"`, "<&>", `"\u003c\u0026\u003e"`, `"<&>"`},
} {
var buf bytes.Buffer
enc := NewEncoder(&buf)
if err := enc.Encode(tt.v); err != nil {
t.Fatalf("Encode(%s): %s", tt.name, err)
}
if got := strings.TrimSpace(buf.String()); got != tt.wantEscape {
t.Errorf("Encode(%s) = %#q, want %#q", tt.name, got, tt.wantEscape)
}
buf.Reset()
enc.DisableHTMLEscaping()
if err := enc.Encode(tt.v); err != nil {
t.Fatalf("DisableHTMLEscaping Encode(%s): %s", tt.name, err)
}
if got := strings.TrimSpace(buf.String()); got != tt.want {
t.Errorf("DisableHTMLEscaping Encode(%s) = %#q, want %#q",
tt.name, got, tt.want)
}
}
}
func TestDecoder(t *testing.T) {
for i := 0; i <= len(streamTest); i++ {
// Use stream without newlines as input,
// just to stress the decoder even more.
// Our test input does not include back-to-back numbers.
// Otherwise stripping the newlines would
// merge two adjacent JSON values.
var buf bytes.Buffer
for _, c := range nlines(streamEncoded, i) {
// That's stupid isn't it!? nulltrue!?!? :/
//if c != '\n' {
buf.WriteRune(c)
//}
}
out := make([]interface{}, i)
dec := NewDecoder(&buf)
for j := range out {
if err := dec.Decode(&out[j]); err != nil {
t.Fatalf("decode #%d/%d: %v", j, i, err)
}
}
if !reflect.DeepEqual(out, streamTest[0:i]) {
t.Errorf("decoding %d items: mismatch", i)
for j := range out {
if !reflect.DeepEqual(out[j], streamTest[j]) {
t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j])
}
}
break
}
}
}
func TestDecoderBuffered(t *testing.T) {
r := strings.NewReader(`{"Name": "Gopher"} extra `)
var m struct {
Name string
}
d := NewDecoder(r)
err := d.Decode(&m)
if err != nil {
t.Fatal(err)
}
if m.Name != "Gopher" {
t.Errorf("Name = %q; want Gopher", m.Name)
}
rest, err := ioutil.ReadAll(d.Buffered())
if err != nil {
t.Fatal(err)
}
if g, w := string(rest), " extra "; g != w {
t.Errorf("Remaining = %q; want %q", g, w)
}
}
func nlines(s string, n int) string {
if n <= 0 {
return ""
}
for i, c := range s {
if c == '\n' {
if n--; n == 0 {
return s[0 : i+1]
}
}
}
return s
}
func TestRawMessage(t *testing.T) {
// TODO(rsc): Should not need the * in *RawMessage
var data struct {
X float64
Id *RawMessage
Y float32
}
const raw = `["\u0056",null]`
const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
err := Unmarshal([]byte(msg), &data)
if err != nil {
t.Fatalf("Unmarshal: %v", err)
}
if string([]byte(*data.Id)) != raw {
t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw)
}
b, err := Marshal(&data)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
if string(b) != msg {
t.Fatalf("Marshal: have %#q want %#q", b, msg)
}
}
func TestNullRawMessage(t *testing.T) {
// TODO(rsc): Should not need the * in *RawMessage
var data struct {
X float64
Id *RawMessage
Y float32
}
data.Id = new(RawMessage)
const msg = `{"X":0.1,"Id":null,"Y":0.2}`
err := Unmarshal([]byte(msg), &data)
if err != nil {
t.Fatalf("Unmarshal: %v", err)
}
if data.Id != nil {
t.Fatalf("Raw mismatch: have non-nil, want nil")
}
b, err := Marshal(&data)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
if string(b) != msg {
t.Fatalf("Marshal: have %#q want %#q", b, msg)
}
}
var blockingTests = []string{
`{"x": 1}`,
`[1, 2, 3]`,
}
func TestBlocking(t *testing.T) {
for _, enc := range blockingTests {
r, w := net.Pipe()
go w.Write([]byte(enc))
var val interface{}
// If Decode reads beyond what w.Write writes above,
// it will block, and the test will deadlock.
if err := NewDecoder(r).Decode(&val); err != nil {
t.Errorf("decoding %s: %v", enc, err)
}
r.Close()
w.Close()
}
}
func BenchmarkEncoderEncode(b *testing.B) {
b.ReportAllocs()
type T struct {
X, Y string
}
v := &T{"foo", "bar"}
for i := 0; i < b.N; i++ {
if err := NewEncoder(ioutil.Discard).Encode(v); err != nil {
b.Fatal(err)
}
}
}
type tokenStreamCase struct {
json string
expTokens []interface{}
}
type decodeThis struct {
v interface{}
}
var tokenStreamCases []tokenStreamCase = []tokenStreamCase{
// streaming token cases
{json: `10`, expTokens: []interface{}{float64(10)}},
{json: ` [10] `, expTokens: []interface{}{
Delim('['), float64(10), Delim(']')}},
{json: ` [false,10,"b"] `, expTokens: []interface{}{
Delim('['), false, float64(10), "b", Delim(']')}},
{json: `{ "a": 1 }`, expTokens: []interface{}{
Delim('{'), "a", float64(1), Delim('}')}},
{json: `{"a": 1, "b":"3"}`, expTokens: []interface{}{
Delim('{'), "a", float64(1), "b", "3", Delim('}')}},
{json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
Delim('['),
Delim('{'), "a", float64(1), Delim('}'),
Delim('{'), "a", float64(2), Delim('}'),
Delim(']')}},
{json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
Delim('{'), "obj", Delim('{'), "a", float64(1), Delim('}'),
Delim('}')}},
{json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
Delim('{'), "obj", Delim('['),
Delim('{'), "a", float64(1), Delim('}'),
Delim(']'), Delim('}')}},
// streaming tokens with intermittent Decode()
{json: `{ "a": 1 }`, expTokens: []interface{}{
Delim('{'), "a",
decodeThis{float64(1)},
Delim('}')}},
{json: ` [ { "a" : 1 } ] `, expTokens: []interface{}{
Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
Delim(']')}},
{json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
decodeThis{map[string]interface{}{"a": float64(2)}},
Delim(']')}},
{json: `{ "obj" : [ { "a" : 1 } ] }`, expTokens: []interface{}{
Delim('{'), "obj", Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
Delim(']'), Delim('}')}},
{json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
Delim('{'), "obj",
decodeThis{map[string]interface{}{"a": float64(1)}},
Delim('}')}},
{json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
Delim('{'), "obj",
decodeThis{[]interface{}{
map[string]interface{}{"a": float64(1)},
}},
Delim('}')}},
{json: ` [{"a": 1} {"a": 2}] `, expTokens: []interface{}{
Delim('['),
decodeThis{map[string]interface{}{"a": float64(1)}},
decodeThis{&SyntaxError{"expected comma after array element", 0}},
}},
{json: `{ "a" 1 }`, expTokens: []interface{}{
Delim('{'), "a",
decodeThis{&SyntaxError{"expected colon after object key", 0}},
}},
}
func TestDecodeInStream(t *testing.T) {
for ci, tcase := range tokenStreamCases {
dec := NewDecoder(strings.NewReader(tcase.json))
for i, etk := range tcase.expTokens {
var tk interface{}
var err error
if dt, ok := etk.(decodeThis); ok {
etk = dt.v
err = dec.Decode(&tk)
} else {
tk, err = dec.Token()
}
if experr, ok := etk.(error); ok {
if err == nil || err.Error() != experr.Error() {
t.Errorf("case %v: Expected error %v in %q, but was %v", ci, experr, tcase.json, err)
}
break
} else if err == io.EOF {
t.Errorf("case %v: Unexpected EOF in %q", ci, tcase.json)
break
} else if err != nil {
t.Errorf("case %v: Unexpected error '%v' in %q", ci, err, tcase.json)
break
}
if !reflect.DeepEqual(tk, etk) {
t.Errorf(`case %v: %q @ %v expected %T(%v) was %T(%v)`, ci, tcase.json, i, etk, etk, tk, tk)
break
}
}
}
}
// Test from golang.org/issue/11893
func TestHTTPDecoding(t *testing.T) {
const raw = `{ "foo": "bar" }`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(raw))
}))
defer ts.Close()
res, err := http.Get(ts.URL)
if err != nil {
log.Fatalf("GET failed: %v", err)
}
defer res.Body.Close()
foo := struct {
Foo string
}{}
d := NewDecoder(res.Body)
err = d.Decode(&foo)
if err != nil {
t.Fatalf("Decode: %v", err)
}
if foo.Foo != "bar" {
t.Errorf("decoded %q; want \"bar\"", foo.Foo)
}
// make sure we get the EOF the second time
err = d.Decode(&foo)
if err != io.EOF {
t.Errorf("err = %v; want io.EOF", err)
}
}

View File

@ -1,115 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"testing"
)
type basicLatin2xTag struct {
V string `json:"$%-/"`
}
type basicLatin3xTag struct {
V string `json:"0123456789"`
}
type basicLatin4xTag struct {
V string `json:"ABCDEFGHIJKLMO"`
}
type basicLatin5xTag struct {
V string `json:"PQRSTUVWXYZ_"`
}
type basicLatin6xTag struct {
V string `json:"abcdefghijklmno"`
}
type basicLatin7xTag struct {
V string `json:"pqrstuvwxyz"`
}
type miscPlaneTag struct {
V string `json:"色は匂へど"`
}
type percentSlashTag struct {
V string `json:"text/html%"` // https://golang.org/issue/2718
}
type punctuationTag struct {
V string `json:"!#$%&()*+-./:<=>?@[]^_{|}~"` // https://golang.org/issue/3546
}
type emptyTag struct {
W string
}
type misnamedTag struct {
X string `jsom:"Misnamed"`
}
type badFormatTag struct {
Y string `:"BadFormat"`
}
type badCodeTag struct {
Z string `json:" !\"#&'()*+,."`
}
type spaceTag struct {
Q string `json:"With space"`
}
type unicodeTag struct {
W string `json:"Ελλάδα"`
}
var structTagObjectKeyTests = []struct {
raw interface{}
value string
key string
}{
{basicLatin2xTag{"2x"}, "2x", "$%-/"},
{basicLatin3xTag{"3x"}, "3x", "0123456789"},
{basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
{basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
{basicLatin6xTag{"6x"}, "6x", "abcdefghijklmno"},
{basicLatin7xTag{"7x"}, "7x", "pqrstuvwxyz"},
{miscPlaneTag{"いろはにほへと"}, "いろはにほへと", "色は匂へど"},
{emptyTag{"Pour Moi"}, "Pour Moi", "W"},
{misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
{badFormatTag{"Orfevre"}, "Orfevre", "Y"},
{badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
{percentSlashTag{"brut"}, "brut", "text/html%"},
{punctuationTag{"Union Rags"}, "Union Rags", "!#$%&()*+-./:<=>?@[]^_{|}~"},
{spaceTag{"Perreddu"}, "Perreddu", "With space"},
{unicodeTag{"Loukanikos"}, "Loukanikos", "Ελλάδα"},
}
func TestStructTagObjectKey(t *testing.T) {
for _, tt := range structTagObjectKeyTests {
b, err := Marshal(tt.raw)
if err != nil {
t.Fatalf("Marshal(%#q) failed: %v", tt.raw, err)
}
var f interface{}
err = Unmarshal(b, &f)
if err != nil {
t.Fatalf("Unmarshal(%#q) failed: %v", b, err)
}
for i, v := range f.(map[string]interface{}) {
switch i {
case tt.key:
if s, ok := v.(string); !ok || s != tt.value {
t.Fatalf("Unexpected value: %#q, want %v", s, tt.value)
}
default:
t.Fatalf("Unexpected key: %#q, from %#q", i, b)
}
}
}
}

View File

@ -1,44 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

View File

@ -1,28 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"testing"
)
func TestTagParsing(t *testing.T) {
name, opts := parseTag("field,foobar,foo")
if name != "field" {
t.Fatalf("name = %q, want field", name)
}
for _, tt := range []struct {
opt string
want bool
}{
{"foobar", true},
{"foo", true},
{"bar", false},
} {
if opts.Contains(tt.opt) != tt.want {
t.Errorf("Contains(%q) = %v", tt.opt, !tt.want)
}
}
}

Binary file not shown.

View File

@ -1,77 +0,0 @@
// +build !windows
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sasl/sasl.h>
static int mgo_sasl_simple(void *context, int id, const char **result, unsigned int *len)
{
if (!result) {
return SASL_BADPARAM;
}
switch (id) {
case SASL_CB_USER:
*result = (char *)context;
break;
case SASL_CB_AUTHNAME:
*result = (char *)context;
break;
case SASL_CB_LANGUAGE:
*result = NULL;
break;
default:
return SASL_BADPARAM;
}
if (len) {
*len = *result ? strlen(*result) : 0;
}
return SASL_OK;
}
typedef int (*callback)(void);
static int mgo_sasl_secret(sasl_conn_t *conn, void *context, int id, sasl_secret_t **result)
{
if (!conn || !result || id != SASL_CB_PASS) {
return SASL_BADPARAM;
}
*result = (sasl_secret_t *)context;
return SASL_OK;
}
sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password)
{
sasl_callback_t *cb = malloc(4 * sizeof(sasl_callback_t));
int n = 0;
size_t len = strlen(password);
sasl_secret_t *secret = (sasl_secret_t*)malloc(sizeof(sasl_secret_t) + len);
if (!secret) {
free(cb);
return NULL;
}
strcpy((char *)secret->data, password);
secret->len = len;
cb[n].id = SASL_CB_PASS;
cb[n].proc = (callback)&mgo_sasl_secret;
cb[n].context = secret;
n++;
cb[n].id = SASL_CB_USER;
cb[n].proc = (callback)&mgo_sasl_simple;
cb[n].context = (char*)username;
n++;
cb[n].id = SASL_CB_AUTHNAME;
cb[n].proc = (callback)&mgo_sasl_simple;
cb[n].context = (char*)username;
n++;
cb[n].id = SASL_CB_LIST_END;
cb[n].proc = NULL;
cb[n].context = NULL;
return cb;
}

View File

@ -1,138 +0,0 @@
// Package sasl is an implementation detail of the mgo package.
//
// This package is not meant to be used by itself.
//
// +build !windows
package sasl
// #cgo LDFLAGS: -lsasl2
//
// struct sasl_conn {};
//
// #include <stdlib.h>
// #include <sasl/sasl.h>
//
// sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password);
//
import "C"
import (
"fmt"
"strings"
"sync"
"unsafe"
)
type saslStepper interface {
Step(serverData []byte) (clientData []byte, done bool, err error)
Close()
}
type saslSession struct {
conn *C.sasl_conn_t
step int
mech string
cstrings []*C.char
callbacks *C.sasl_callback_t
}
var initError error
var initOnce sync.Once
func initSASL() {
rc := C.sasl_client_init(nil)
if rc != C.SASL_OK {
initError = saslError(rc, nil, "cannot initialize SASL library")
}
}
func New(username, password, mechanism, service, host string) (saslStepper, error) {
initOnce.Do(initSASL)
if initError != nil {
return nil, initError
}
ss := &saslSession{mech: mechanism}
if service == "" {
service = "mongodb"
}
if i := strings.Index(host, ":"); i >= 0 {
host = host[:i]
}
ss.callbacks = C.mgo_sasl_callbacks(ss.cstr(username), ss.cstr(password))
rc := C.sasl_client_new(ss.cstr(service), ss.cstr(host), nil, nil, ss.callbacks, 0, &ss.conn)
if rc != C.SASL_OK {
ss.Close()
return nil, saslError(rc, nil, "cannot create new SASL client")
}
return ss, nil
}
func (ss *saslSession) cstr(s string) *C.char {
cstr := C.CString(s)
ss.cstrings = append(ss.cstrings, cstr)
return cstr
}
func (ss *saslSession) Close() {
for _, cstr := range ss.cstrings {
C.free(unsafe.Pointer(cstr))
}
ss.cstrings = nil
if ss.callbacks != nil {
C.free(unsafe.Pointer(ss.callbacks))
}
// The documentation of SASL dispose makes it clear that this should only
// be done when the connection is done, not when the authentication phase
// is done, because an encryption layer may have been negotiated.
// Even then, we'll do this for now, because it's simpler and prevents
// keeping track of this state for every socket. If it breaks, we'll fix it.
C.sasl_dispose(&ss.conn)
}
func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
ss.step++
if ss.step > 10 {
return nil, false, fmt.Errorf("too many SASL steps without authentication")
}
var cclientData *C.char
var cclientDataLen C.uint
var rc C.int
if ss.step == 1 {
var mechanism *C.char // ignored - must match cred
rc = C.sasl_client_start(ss.conn, ss.cstr(ss.mech), nil, &cclientData, &cclientDataLen, &mechanism)
} else {
var cserverData *C.char
var cserverDataLen C.uint
if len(serverData) > 0 {
cserverData = (*C.char)(unsafe.Pointer(&serverData[0]))
cserverDataLen = C.uint(len(serverData))
}
rc = C.sasl_client_step(ss.conn, cserverData, cserverDataLen, nil, &cclientData, &cclientDataLen)
}
if cclientData != nil && cclientDataLen > 0 {
clientData = C.GoBytes(unsafe.Pointer(cclientData), C.int(cclientDataLen))
}
if rc == C.SASL_OK {
return clientData, true, nil
}
if rc == C.SASL_CONTINUE {
return clientData, false, nil
}
return nil, false, saslError(rc, ss.conn, "cannot establish SASL session")
}
func saslError(rc C.int, conn *C.sasl_conn_t, msg string) error {
var detail string
if conn == nil {
detail = C.GoString(C.sasl_errstring(rc, nil, nil))
} else {
detail = C.GoString(C.sasl_errdetail(conn))
}
return fmt.Errorf(msg + ": " + detail)
}

View File

@ -1,122 +0,0 @@
#include "sasl_windows.h"
static const LPSTR SSPI_PACKAGE_NAME = "kerberos";
SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle *cred_handle, char *username, char *password, char *domain)
{
SEC_WINNT_AUTH_IDENTITY auth_identity;
SECURITY_INTEGER ignored;
auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
auth_identity.User = (LPSTR) username;
auth_identity.UserLength = strlen(username);
auth_identity.Password = NULL;
auth_identity.PasswordLength = 0;
if(password){
auth_identity.Password = (LPSTR) password;
auth_identity.PasswordLength = strlen(password);
}
auth_identity.Domain = (LPSTR) domain;
auth_identity.DomainLength = strlen(domain);
return call_sspi_acquire_credentials_handle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, cred_handle, &ignored);
}
int sspi_step(CredHandle *cred_handle, int has_context, CtxtHandle *context, PVOID buffer, ULONG buffer_length, PVOID *out_buffer, ULONG *out_buffer_length, char *target)
{
SecBufferDesc inbuf;
SecBuffer in_bufs[1];
SecBufferDesc outbuf;
SecBuffer out_bufs[1];
if (has_context > 0) {
// If we already have a context, we now have data to send.
// Put this data in an inbuf.
inbuf.ulVersion = SECBUFFER_VERSION;
inbuf.cBuffers = 1;
inbuf.pBuffers = in_bufs;
in_bufs[0].pvBuffer = buffer;
in_bufs[0].cbBuffer = buffer_length;
in_bufs[0].BufferType = SECBUFFER_TOKEN;
}
outbuf.ulVersion = SECBUFFER_VERSION;
outbuf.cBuffers = 1;
outbuf.pBuffers = out_bufs;
out_bufs[0].pvBuffer = NULL;
out_bufs[0].cbBuffer = 0;
out_bufs[0].BufferType = SECBUFFER_TOKEN;
ULONG context_attr = 0;
int ret = call_sspi_initialize_security_context(cred_handle,
has_context > 0 ? context : NULL,
(LPSTR) target,
ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH,
0,
SECURITY_NETWORK_DREP,
has_context > 0 ? &inbuf : NULL,
0,
context,
&outbuf,
&context_attr,
NULL);
*out_buffer = malloc(out_bufs[0].cbBuffer);
*out_buffer_length = out_bufs[0].cbBuffer;
memcpy(*out_buffer, out_bufs[0].pvBuffer, *out_buffer_length);
return ret;
}
int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *user_plus_realm)
{
SecPkgContext_Sizes sizes;
SECURITY_STATUS status = call_sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes);
if (status != SEC_E_OK) {
return status;
}
size_t user_plus_realm_length = strlen(user_plus_realm);
int msgSize = 4 + user_plus_realm_length;
char *msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char));
msg[sizes.cbSecurityTrailer + 0] = 1;
msg[sizes.cbSecurityTrailer + 1] = 0;
msg[sizes.cbSecurityTrailer + 2] = 0;
msg[sizes.cbSecurityTrailer + 3] = 0;
memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, user_plus_realm_length);
SecBuffer wrapBufs[3];
SecBufferDesc wrapBufDesc;
wrapBufDesc.cBuffers = 3;
wrapBufDesc.pBuffers = wrapBufs;
wrapBufDesc.ulVersion = SECBUFFER_VERSION;
wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer;
wrapBufs[0].BufferType = SECBUFFER_TOKEN;
wrapBufs[0].pvBuffer = msg;
wrapBufs[1].cbBuffer = msgSize;
wrapBufs[1].BufferType = SECBUFFER_DATA;
wrapBufs[1].pvBuffer = msg + sizes.cbSecurityTrailer;
wrapBufs[2].cbBuffer = sizes.cbBlockSize;
wrapBufs[2].BufferType = SECBUFFER_PADDING;
wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize;
status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0);
if (status != SEC_E_OK) {
free(msg);
return status;
}
*buffer_length = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer;
*buffer = malloc(*buffer_length);
memcpy(*buffer, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer);
memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer);
memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer);
free(msg);
return SEC_E_OK;
}

View File

@ -1,142 +0,0 @@
package sasl
// #include "sasl_windows.h"
import "C"
import (
"fmt"
"strings"
"sync"
"unsafe"
)
type saslStepper interface {
Step(serverData []byte) (clientData []byte, done bool, err error)
Close()
}
type saslSession struct {
// Credentials
mech string
service string
host string
userPlusRealm string
target string
domain string
// Internal state
authComplete bool
errored bool
step int
// C internal state
credHandle C.CredHandle
context C.CtxtHandle
hasContext C.int
// Keep track of pointers we need to explicitly free
stringsToFree []*C.char
}
var initError error
var initOnce sync.Once
func initSSPI() {
rc := C.load_secur32_dll()
if rc != 0 {
initError = fmt.Errorf("Error loading libraries: %v", rc)
}
}
func New(username, password, mechanism, service, host string) (saslStepper, error) {
initOnce.Do(initSSPI)
ss := &saslSession{mech: mechanism, hasContext: 0, userPlusRealm: username}
if service == "" {
service = "mongodb"
}
if i := strings.Index(host, ":"); i >= 0 {
host = host[:i]
}
ss.service = service
ss.host = host
usernameComponents := strings.Split(username, "@")
if len(usernameComponents) < 2 {
return nil, fmt.Errorf("Username '%v' doesn't contain a realm!", username)
}
user := usernameComponents[0]
ss.domain = usernameComponents[1]
ss.target = fmt.Sprintf("%s/%s", ss.service, ss.host)
var status C.SECURITY_STATUS
// Step 0: call AcquireCredentialsHandle to get a nice SSPI CredHandle
if len(password) > 0 {
status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(ss.domain))
} else {
status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(ss.domain))
}
if status != C.SEC_E_OK {
ss.errored = true
return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status)
}
return ss, nil
}
func (ss *saslSession) cstr(s string) *C.char {
cstr := C.CString(s)
ss.stringsToFree = append(ss.stringsToFree, cstr)
return cstr
}
func (ss *saslSession) Close() {
for _, cstr := range ss.stringsToFree {
C.free(unsafe.Pointer(cstr))
}
}
func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
ss.step++
if ss.step > 10 {
return nil, false, fmt.Errorf("too many SSPI steps without authentication")
}
var buffer C.PVOID
var bufferLength C.ULONG
var outBuffer C.PVOID
var outBufferLength C.ULONG
if len(serverData) > 0 {
buffer = (C.PVOID)(unsafe.Pointer(&serverData[0]))
bufferLength = C.ULONG(len(serverData))
}
var status C.int
if ss.authComplete {
// Step 3: last bit of magic to use the correct server credentials
status = C.sspi_send_client_authz_id(&ss.context, &outBuffer, &outBufferLength, ss.cstr(ss.userPlusRealm))
} else {
// Step 1 + Step 2: set up security context with the server and TGT
status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, buffer, bufferLength, &outBuffer, &outBufferLength, ss.cstr(ss.target))
}
if outBuffer != C.PVOID(nil) {
defer C.free(unsafe.Pointer(outBuffer))
}
if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED {
ss.errored = true
return nil, false, ss.handleSSPIErrorCode(status)
}
clientData = C.GoBytes(unsafe.Pointer(outBuffer), C.int(outBufferLength))
if status == C.SEC_E_OK {
ss.authComplete = true
return clientData, true, nil
} else {
ss.hasContext = 1
return clientData, false, nil
}
}
func (ss *saslSession) handleSSPIErrorCode(code C.int) error {
switch {
case code == C.SEC_E_TARGET_UNKNOWN:
return fmt.Errorf("Target %v@%v not found", ss.target, ss.domain)
}
return fmt.Errorf("Unknown error doing step %v, error code %v", ss.step, code)
}

View File

@ -1,7 +0,0 @@
#include <windows.h>
#include "sspi_windows.h"
SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain);
int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID buffer, ULONG buffer_length, PVOID* out_buffer, ULONG* out_buffer_length, char* target);
int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm);

View File

@ -1,96 +0,0 @@
// Code adapted from the NodeJS kerberos library:
//
// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.c
//
// Under the terms of the Apache License, Version 2.0:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
#include <stdlib.h>
#include "sspi_windows.h"
static HINSTANCE sspi_secur32_dll = NULL;
int load_secur32_dll()
{
sspi_secur32_dll = LoadLibrary("secur32.dll");
if (sspi_secur32_dll == NULL) {
return GetLastError();
}
return 0;
}
SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo)
{
if (sspi_secur32_dll == NULL) {
return -1;
}
encryptMessage_fn pfn_encryptMessage = (encryptMessage_fn) GetProcAddress(sspi_secur32_dll, "EncryptMessage");
if (!pfn_encryptMessage) {
return -2;
}
return (*pfn_encryptMessage)(phContext, fQOP, pMessage, MessageSeqNo);
}
SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
PCredHandle phCredential, PTimeStamp ptsExpiry)
{
if (sspi_secur32_dll == NULL) {
return -1;
}
acquireCredentialsHandle_fn pfn_acquireCredentialsHandle;
#ifdef _UNICODE
pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleW");
#else
pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleA");
#endif
if (!pfn_acquireCredentialsHandle) {
return -2;
}
return (*pfn_acquireCredentialsHandle)(
pszPrincipal, pszPackage, fCredentialUse, pvLogonId, pAuthData,
pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry);
}
SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName,
unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep,
PSecBufferDesc pInput, unsigned long Reserved2, PCtxtHandle phNewContext,
PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry)
{
if (sspi_secur32_dll == NULL) {
return -1;
}
initializeSecurityContext_fn pfn_initializeSecurityContext;
#ifdef _UNICODE
pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextW");
#else
pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextA");
#endif
if (!pfn_initializeSecurityContext) {
return -2;
}
return (*pfn_initializeSecurityContext)(
phCredential, phContext, pszTargetName, fContextReq, Reserved1, TargetDataRep,
pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry);
}
SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer)
{
if (sspi_secur32_dll == NULL) {
return -1;
}
queryContextAttributes_fn pfn_queryContextAttributes;
#ifdef _UNICODE
pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesW");
#else
pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesA");
#endif
if (!pfn_queryContextAttributes) {
return -2;
}
return (*pfn_queryContextAttributes)(phContext, ulAttribute, pBuffer);
}

View File

@ -1,70 +0,0 @@
// Code adapted from the NodeJS kerberos library:
//
// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.h
//
// Under the terms of the Apache License, Version 2.0:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
#ifndef SSPI_WINDOWS_H
#define SSPI_WINDOWS_H
#define SECURITY_WIN32 1
#include <windows.h>
#include <sspi.h>
int load_secur32_dll();
SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo);
typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo);
SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
LPSTR pszPrincipal, // Name of principal
LPSTR pszPackage, // Name of package
unsigned long fCredentialUse, // Flags indicating use
void *pvLogonId, // Pointer to logon ID
void *pAuthData, // Package specific data
SEC_GET_KEY_FN pGetKeyFn, // Pointer to GetKey() func
void *pvGetKeyArgument, // Value to pass to GetKey()
PCredHandle phCredential, // (out) Cred Handle
PTimeStamp ptsExpiry // (out) Lifetime (optional)
);
typedef DWORD (WINAPI *acquireCredentialsHandle_fn)(
LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
PCredHandle phCredential, PTimeStamp ptsExpiry
);
SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
PCredHandle phCredential, // Cred to base context
PCtxtHandle phContext, // Existing context (OPT)
LPSTR pszTargetName, // Name of target
unsigned long fContextReq, // Context Requirements
unsigned long Reserved1, // Reserved, MBZ
unsigned long TargetDataRep, // Data rep of target
PSecBufferDesc pInput, // Input Buffers
unsigned long Reserved2, // Reserved, MBZ
PCtxtHandle phNewContext, // (out) New Context handle
PSecBufferDesc pOutput, // (inout) Output Buffers
unsigned long *pfContextAttr, // (out) Context attrs
PTimeStamp ptsExpiry // (out) Life span (OPT)
);
typedef DWORD (WINAPI *initializeSecurityContext_fn)(
PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq,
unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2,
PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry);
SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(
PCtxtHandle phContext, // Context to query
unsigned long ulAttribute, // Attribute to query
void *pBuffer // Buffer for attributes
);
typedef DWORD (WINAPI *queryContextAttributes_fn)(
PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer);
#endif // SSPI_WINDOWS_H

View File

@ -1,266 +0,0 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Pacakage scram implements a SCRAM-{SHA-1,etc} client per RFC5802.
//
// http://tools.ietf.org/html/rfc5802
//
package scram
import (
"bytes"
"crypto/hmac"
"crypto/rand"
"encoding/base64"
"fmt"
"hash"
"strconv"
"strings"
)
// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc).
//
// A Client may be used within a SASL conversation with logic resembling:
//
// var in []byte
// var client = scram.NewClient(sha1.New, user, pass)
// for client.Step(in) {
// out := client.Out()
// // send out to server
// in := serverOut
// }
// if client.Err() != nil {
// // auth failed
// }
//
type Client struct {
newHash func() hash.Hash
user string
pass string
step int
out bytes.Buffer
err error
clientNonce []byte
serverNonce []byte
saltedPass []byte
authMsg bytes.Buffer
}
// NewClient returns a new SCRAM-* client with the provided hash algorithm.
//
// For SCRAM-SHA-1, for example, use:
//
// client := scram.NewClient(sha1.New, user, pass)
//
func NewClient(newHash func() hash.Hash, user, pass string) *Client {
c := &Client{
newHash: newHash,
user: user,
pass: pass,
}
c.out.Grow(256)
c.authMsg.Grow(256)
return c
}
// Out returns the data to be sent to the server in the current step.
func (c *Client) Out() []byte {
if c.out.Len() == 0 {
return nil
}
return c.out.Bytes()
}
// Err returns the error that ocurred, or nil if there were no errors.
func (c *Client) Err() error {
return c.err
}
// SetNonce sets the client nonce to the provided value.
// If not set, the nonce is generated automatically out of crypto/rand on the first step.
func (c *Client) SetNonce(nonce []byte) {
c.clientNonce = nonce
}
var escaper = strings.NewReplacer("=", "=3D", ",", "=2C")
// Step processes the incoming data from the server and makes the
// next round of data for the server available via Client.Out.
// Step returns false if there are no errors and more data is
// still expected.
func (c *Client) Step(in []byte) bool {
c.out.Reset()
if c.step > 2 || c.err != nil {
return false
}
c.step++
switch c.step {
case 1:
c.err = c.step1(in)
case 2:
c.err = c.step2(in)
case 3:
c.err = c.step3(in)
}
return c.step > 2 || c.err != nil
}
func (c *Client) step1(in []byte) error {
if len(c.clientNonce) == 0 {
const nonceLen = 6
buf := make([]byte, nonceLen + b64.EncodedLen(nonceLen))
if _, err := rand.Read(buf[:nonceLen]); err != nil {
return fmt.Errorf("cannot read random SCRAM-SHA-1 nonce from operating system: %v", err)
}
c.clientNonce = buf[nonceLen:]
b64.Encode(c.clientNonce, buf[:nonceLen])
}
c.authMsg.WriteString("n=")
escaper.WriteString(&c.authMsg, c.user)
c.authMsg.WriteString(",r=")
c.authMsg.Write(c.clientNonce)
c.out.WriteString("n,,")
c.out.Write(c.authMsg.Bytes())
return nil
}
var b64 = base64.StdEncoding
func (c *Client) step2(in []byte) error {
c.authMsg.WriteByte(',')
c.authMsg.Write(in)
fields := bytes.Split(in, []byte(","))
if len(fields) != 3 {
return fmt.Errorf("expected 3 fields in first SCRAM-SHA-1 server message, got %d: %q", len(fields), in)
}
if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 nonce: %q", fields[0])
}
if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 salt: %q", fields[1])
}
if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2])
}
c.serverNonce = fields[0][2:]
if !bytes.HasPrefix(c.serverNonce, c.clientNonce) {
return fmt.Errorf("server SCRAM-SHA-1 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce)
}
salt := make([]byte, b64.DecodedLen(len(fields[1][2:])))
n, err := b64.Decode(salt, fields[1][2:])
if err != nil {
return fmt.Errorf("cannot decode SCRAM-SHA-1 salt sent by server: %q", fields[1])
}
salt = salt[:n]
iterCount, err := strconv.Atoi(string(fields[2][2:]))
if err != nil {
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2])
}
c.saltPassword(salt, iterCount)
c.authMsg.WriteString(",c=biws,r=")
c.authMsg.Write(c.serverNonce)
c.out.WriteString("c=biws,r=")
c.out.Write(c.serverNonce)
c.out.WriteString(",p=")
c.out.Write(c.clientProof())
return nil
}
func (c *Client) step3(in []byte) error {
var isv, ise bool
var fields = bytes.Split(in, []byte(","))
if len(fields) == 1 {
isv = bytes.HasPrefix(fields[0], []byte("v="))
ise = bytes.HasPrefix(fields[0], []byte("e="))
}
if ise {
return fmt.Errorf("SCRAM-SHA-1 authentication error: %s", fields[0][2:])
} else if !isv {
return fmt.Errorf("unsupported SCRAM-SHA-1 final message from server: %q", in)
}
if !bytes.Equal(c.serverSignature(), fields[0][2:]) {
return fmt.Errorf("cannot authenticate SCRAM-SHA-1 server signature: %q", fields[0][2:])
}
return nil
}
func (c *Client) saltPassword(salt []byte, iterCount int) {
mac := hmac.New(c.newHash, []byte(c.pass))
mac.Write(salt)
mac.Write([]byte{0, 0, 0, 1})
ui := mac.Sum(nil)
hi := make([]byte, len(ui))
copy(hi, ui)
for i := 1; i < iterCount; i++ {
mac.Reset()
mac.Write(ui)
mac.Sum(ui[:0])
for j, b := range ui {
hi[j] ^= b
}
}
c.saltedPass = hi
}
func (c *Client) clientProof() []byte {
mac := hmac.New(c.newHash, c.saltedPass)
mac.Write([]byte("Client Key"))
clientKey := mac.Sum(nil)
hash := c.newHash()
hash.Write(clientKey)
storedKey := hash.Sum(nil)
mac = hmac.New(c.newHash, storedKey)
mac.Write(c.authMsg.Bytes())
clientProof := mac.Sum(nil)
for i, b := range clientKey {
clientProof[i] ^= b
}
clientProof64 := make([]byte, b64.EncodedLen(len(clientProof)))
b64.Encode(clientProof64, clientProof)
return clientProof64
}
func (c *Client) serverSignature() []byte {
mac := hmac.New(c.newHash, c.saltedPass)
mac.Write([]byte("Server Key"))
serverKey := mac.Sum(nil)
mac = hmac.New(c.newHash, serverKey)
mac.Write(c.authMsg.Bytes())
serverSignature := mac.Sum(nil)
encoded := make([]byte, b64.EncodedLen(len(serverSignature)))
b64.Encode(encoded, serverSignature)
return encoded
}

View File

@ -1,67 +0,0 @@
package scram_test
import (
"crypto/sha1"
"testing"
. "gopkg.in/check.v1"
"gopkg.in/mgo.v2/internal/scram"
"strings"
)
var _ = Suite(&S{})
func Test(t *testing.T) { TestingT(t) }
type S struct{}
var tests = [][]string{{
"U: user pencil",
"N: fyko+d2lbbFgONRv9qkxdawL",
"C: n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL",
"S: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096",
"C: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=",
"S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ=",
}, {
"U: root fe8c89e308ec08763df36333cbf5d3a2",
"N: OTcxNDk5NjM2MzE5",
"C: n,,n=root,r=OTcxNDk5NjM2MzE5",
"S: r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,s=XRDkVrFC9JuL7/F4tG0acQ==,i=10000",
"C: c=biws,r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,p=6y1jp9R7ETyouTXS9fW9k5UHdBc=",
"S: v=LBnd9dUJRxdqZiEq91NKP3z/bHA=",
}}
func (s *S) TestExamples(c *C) {
for _, steps := range tests {
if len(steps) < 2 || len(steps[0]) < 3 || !strings.HasPrefix(steps[0], "U: ") {
c.Fatalf("Invalid test: %#v", steps)
}
auth := strings.Fields(steps[0][3:])
client := scram.NewClient(sha1.New, auth[0], auth[1])
first, done := true, false
c.Logf("-----")
c.Logf("%s", steps[0])
for _, step := range steps[1:] {
c.Logf("%s", step)
switch step[:3] {
case "N: ":
client.SetNonce([]byte(step[3:]))
case "C: ":
if first {
first = false
done = client.Step(nil)
}
c.Assert(done, Equals, false)
c.Assert(client.Err(), IsNil)
c.Assert(string(client.Out()), Equals, step[3:])
case "S: ":
first = false
done = client.Step([]byte(step[3:]))
default:
panic("invalid test line: " + step)
}
}
c.Assert(done, Equals, true)
c.Assert(client.Err(), IsNil)
}
}

201
vendor/gopkg.in/yaml.v2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

31
vendor/gopkg.in/yaml.v2/LICENSE.libyaml generated vendored Normal file
View File

@ -0,0 +1,31 @@
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original copyright and license:
apic.go
emitterc.go
parserc.go
readerc.go
scannerc.go
writerc.go
yamlh.go
yamlprivateh.go
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

133
vendor/gopkg.in/yaml.v2/README.md generated vendored Normal file
View File

@ -0,0 +1,133 @@
# YAML support for the Go language
Introduction
------------
The yaml package enables Go programs to comfortably encode and decode YAML
values. It was developed within [Canonical](https://www.canonical.com) as
part of the [juju](https://juju.ubuntu.com) project, and is based on a
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
C library to parse and generate YAML data quickly and reliably.
Compatibility
-------------
The yaml package supports most of YAML 1.1 and 1.2, including support for
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
implemented, and base-60 floats from YAML 1.1 are purposefully not
supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
The import path for the package is *gopkg.in/yaml.v2*.
To install it, run:
go get gopkg.in/yaml.v2
API documentation
-----------------
If opened in a browser, the import path itself leads to the API documentation:
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
API stability
-------------
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
License
-------
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
Example
-------
Some more examples can be found in the "examples" folder.
```Go
package main
import (
"fmt"
"log"
"gopkg.in/yaml.v2"
)
var data = `
a: Easy!
b:
c: 2
d: [3, 4]
`
type T struct {
A string
B struct {
RenamedC int `yaml:"c"`
D []int `yaml:",flow"`
}
}
func main() {
t := T{}
err := yaml.Unmarshal([]byte(data), &t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t:\n%v\n\n", t)
d, err := yaml.Marshal(&t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t dump:\n%s\n\n", string(d))
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m:\n%v\n\n", m)
d, err = yaml.Marshal(&m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m dump:\n%s\n\n", string(d))
}
```
This example will generate the following output:
```
--- t:
{Easy! {2 [3 4]}}
--- t dump:
a: Easy!
b:
c: 2
d: [3, 4]
--- m:
map[a:Easy! b:map[c:2 d:[3 4]]]
--- m dump:
a: Easy!
b:
c: 2
d:
- 3
- 4
```

742
vendor/gopkg.in/yaml.v2/apic.go generated vendored Normal file
View File

@ -0,0 +1,742 @@
package yaml
import (
"io"
"os"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// File read handler.
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_file.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_file_read_handler
parser.input_file = file
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
return true
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// File write handler.
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_file.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_file_write_handler
emitter.output_file = file
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
//// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
return true
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
return true
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
return true
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
return true
}
///*
// * Create ALIAS.
// */
//
//YAML_DECLARE(int)
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
//{
// mark yaml_mark_t = { 0, 0, 0 }
// anchor_copy *yaml_char_t = NULL
//
// assert(event) // Non-NULL event object is expected.
// assert(anchor) // Non-NULL anchor is expected.
//
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
//
// anchor_copy = yaml_strdup(anchor)
// if (!anchor_copy)
// return 0
//
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
//
// return 1
//}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
return true
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

685
vendor/gopkg.in/yaml.v2/decode.go generated vendored Normal file
View File

@ -0,0 +1,685 @@
package yaml
import (
"encoding"
"encoding/base64"
"fmt"
"math"
"reflect"
"strconv"
"time"
)
const (
documentNode = 1 << iota
mappingNode
sequenceNode
scalarNode
aliasNode
)
type node struct {
kind int
line, column int
tag string
value string
implicit bool
children []*node
anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
}
func newParser(b []byte) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
p.skip()
if p.event.typ != yaml_STREAM_START_EVENT {
panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return &p
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
}
yaml_parser_delete(&p.parser)
}
func (p *parser) skip() {
if p.event.typ != yaml_NO_EVENT {
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
}
yaml_event_delete(&p.event)
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
}
func (p *parser) fail() {
var where string
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
if line != 0 {
where = "line " + strconv.Itoa(line) + ": "
}
var msg string
if len(p.parser.problem) > 0 {
msg = p.parser.problem
} else {
msg = "unknown problem parsing YAML content"
}
failf("%s%s", where, msg)
}
func (p *parser) anchor(n *node, anchor []byte) {
if anchor != nil {
p.doc.anchors[string(anchor)] = n
}
}
func (p *parser) parse() *node {
switch p.event.typ {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
return p.alias()
case yaml_MAPPING_START_EVENT:
return p.mapping()
case yaml_SEQUENCE_START_EVENT:
return p.sequence()
case yaml_DOCUMENT_START_EVENT:
return p.document()
case yaml_STREAM_END_EVENT:
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
}
}
func (p *parser) node(kind int) *node {
return &node{
kind: kind,
line: p.event.start_mark.line,
column: p.event.start_mark.column,
}
}
func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
p.skip()
n.children = append(n.children, p.parse())
if p.event.typ != yaml_DOCUMENT_END_EVENT {
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
p.skip()
return n
}
func (p *parser) scalar() *node {
n := p.node(scalarNode)
n.value = string(p.event.value)
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
p.skip()
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
p.skip()
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
p.skip()
return n
}
// ----------------------------------------------------------------------------
// Decoder, unmarshals a node into a provided value.
type decoder struct {
doc *node
aliases map[string]bool
mapType reflect.Type
terrors []string
strict bool
}
var (
mapItemType = reflect.TypeOf(MapItem{})
durationType = reflect.TypeOf(time.Duration(0))
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = defaultMapType.Elem()
)
func newDecoder(strict bool) *decoder {
d := &decoder{mapType: defaultMapType, strict: strict}
d.aliases = make(map[string]bool)
return d
}
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
if n.tag != "" {
tag = n.tag
}
value := n.value
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
if len(value) > 10 {
value = " `" + value[:7] + "...`"
} else {
value = " `" + value + "`"
}
}
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
}
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
terrlen := len(d.terrors)
err := u.UnmarshalYAML(func(v interface{}) (err error) {
defer handleErr(&err)
d.unmarshal(n, reflect.ValueOf(v))
if len(d.terrors) > terrlen {
issues := d.terrors[terrlen:]
d.terrors = d.terrors[:terrlen]
return &TypeError{issues}
}
return nil
})
if e, ok := err.(*TypeError); ok {
d.terrors = append(d.terrors, e.Errors...)
return false
}
if err != nil {
fail(err)
}
return true
}
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
// if a value is found to implement it.
// It returns the initialized and dereferenced out value, whether
// unmarshalling was already done by UnmarshalYAML, and if so whether
// its types unmarshalled appropriately.
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
return out, false, false
}
again := true
for again {
again = false
if out.Kind() == reflect.Ptr {
if out.IsNil() {
out.Set(reflect.New(out.Type().Elem()))
}
out = out.Elem()
again = true
}
if out.CanAddr() {
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
good = d.callUnmarshaler(n, u)
return out, true, good
}
}
}
return out, false, false
}
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
switch n.kind {
case documentNode:
return d.document(n, out)
case aliasNode:
return d.alias(n, out)
}
out, unmarshaled, good := d.prepare(n, out)
if unmarshaled {
return good
}
switch n.kind {
case scalarNode:
good = d.scalar(n, out)
case mappingNode:
good = d.mapping(n, out)
case sequenceNode:
good = d.sequence(n, out)
default:
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
}
return good
}
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
if len(n.children) == 1 {
d.doc = n
d.unmarshal(n.children[0], out)
return true
}
return false
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
an, ok := d.doc.anchors[n.value]
if !ok {
failf("unknown anchor '%s' referenced", n.value)
}
if d.aliases[n.value] {
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n.value] = true
good = d.unmarshal(an, out)
delete(d.aliases, n.value)
return good
}
var zeroValue reflect.Value
func resetMap(out reflect.Value) {
for _, k := range out.MapKeys() {
out.SetMapIndex(k, zeroValue)
}
}
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
tag = yaml_STR_TAG
resolved = n.value
} else {
tag, resolved = resolve(n.tag, n.value)
if tag == yaml_BINARY_TAG {
data, err := base64.StdEncoding.DecodeString(resolved.(string))
if err != nil {
failf("!!binary value contains invalid base64 data")
}
resolved = string(data)
}
}
if resolved == nil {
if out.Kind() == reflect.Map && !out.CanAddr() {
resetMap(out)
} else {
out.Set(reflect.Zero(out.Type()))
}
return true
}
if s, ok := resolved.(string); ok && out.CanAddr() {
if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
err := u.UnmarshalText([]byte(s))
if err != nil {
fail(err)
}
return true
}
}
switch out.Kind() {
case reflect.String:
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
good = true
} else if resolved != nil {
out.SetString(n.value)
good = true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
} else {
out.Set(reflect.ValueOf(resolved))
}
good = true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
good = true
}
case uint64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case float64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
good = true
}
}
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch resolved := resolved.(type) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
good = true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
good = true
case int64:
out.SetFloat(float64(resolved))
good = true
case uint64:
out.SetFloat(float64(resolved))
good = true
case float64:
out.SetFloat(resolved)
good = true
}
case reflect.Ptr:
if out.Type().Elem() == reflect.TypeOf(resolved) {
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
good = true
}
}
if !good {
d.terror(n, tag, out)
}
return good
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
l := len(n.children)
var iface reflect.Value
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
out = settableValueOf(make([]interface{}, l))
default:
d.terror(n, yaml_SEQ_TAG, out)
return false
}
et := out.Type().Elem()
j := 0
for i := 0; i < l; i++ {
e := reflect.New(et).Elem()
if ok := d.unmarshal(n.children[i], e); ok {
out.Index(j).Set(e)
j++
}
}
out.Set(out.Slice(0, j))
if iface.IsValid() {
iface.Set(out)
}
return true
}
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
switch out.Kind() {
case reflect.Struct:
return d.mappingStruct(n, out)
case reflect.Slice:
return d.mappingSlice(n, out)
case reflect.Map:
// okay
case reflect.Interface:
if d.mapType.Kind() == reflect.Map {
iface := out
out = reflect.MakeMap(d.mapType)
iface.Set(out)
} else {
slicev := reflect.New(d.mapType).Elem()
if !d.mappingSlice(n, slicev) {
return false
}
out.Set(slicev)
return true
}
default:
d.terror(n, yaml_MAP_TAG, out)
return false
}
outt := out.Type()
kt := outt.Key()
et := outt.Elem()
mapType := d.mapType
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
d.mapType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
}
l := len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.children[i], k) {
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
}
if kkind == reflect.Map || kkind == reflect.Slice {
failf("invalid map key: %#v", k.Interface())
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
out.SetMapIndex(k, e)
}
}
}
d.mapType = mapType
return true
}
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
outt := out.Type()
if outt.Elem() != mapItemType {
d.terror(n, yaml_MAP_TAG, out)
return false
}
mapType := d.mapType
d.mapType = outt
var slice []MapItem
var l = len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
item := MapItem{}
k := reflect.ValueOf(&item.Key).Elem()
if d.unmarshal(n.children[i], k) {
v := reflect.ValueOf(&item.Value).Elem()
if d.unmarshal(n.children[i+1], v) {
slice = append(slice, item)
}
}
}
out.Set(reflect.ValueOf(slice))
d.mapType = mapType
return true
}
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
name := settableValueOf("")
l := len(n.children)
var inlineMap reflect.Value
var elemType reflect.Type
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
elemType = inlineMap.Type().Elem()
}
for i := 0; i < l; i += 2 {
ni := n.children[i]
if isMerge(ni) {
d.merge(n.children[i+1], out)
continue
}
if !d.unmarshal(ni, name) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
} else {
field = out.FieldByIndex(info.Inline)
}
d.unmarshal(n.children[i+1], field)
} else if sinfo.InlineMap != -1 {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
inlineMap.SetMapIndex(name, value)
} else if d.strict {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", n.line+1, name.String(), out.Type()))
}
}
return true
}
func failWantMap() {
failf("map merge requires map or sequence of maps as the value")
}
func (d *decoder) merge(n *node, out reflect.Value) {
switch n.kind {
case mappingNode:
d.unmarshal(n, out)
case aliasNode:
an, ok := d.doc.anchors[n.value]
if ok && an.kind != mappingNode {
failWantMap()
}
d.unmarshal(n, out)
case sequenceNode:
// Step backwards as earlier nodes take precedence.
for i := len(n.children) - 1; i >= 0; i-- {
ni := n.children[i]
if ni.kind == aliasNode {
an, ok := d.doc.anchors[ni.value]
if ok && an.kind != mappingNode {
failWantMap()
}
} else if ni.kind != mappingNode {
failWantMap()
}
d.unmarshal(ni, out)
}
default:
failWantMap()
}
}
func isMerge(n *node) bool {
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
}

1684
vendor/gopkg.in/yaml.v2/emitterc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

306
vendor/gopkg.in/yaml.v2/encode.go generated vendored Normal file
View File

@ -0,0 +1,306 @@
package yaml
import (
"encoding"
"fmt"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
}
func newEncoder() (e *encoder) {
e = &encoder{}
e.must(yaml_emitter_initialize(&e.emitter))
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
e.emit()
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
e.emit()
return e
}
func (e *encoder) finish() {
e.must(yaml_document_end_event_initialize(&e.event, true))
e.emit()
e.emitter.open_ended = false
e.must(yaml_stream_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
e.must(false)
}
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "unknown problem generating YAML content"
}
failf("%s", msg)
}
}
func (e *encoder) marshal(tag string, in reflect.Value) {
if !in.IsValid() {
e.nilv()
return
}
iface := in.Interface()
if m, ok := iface.(Marshaler); ok {
v, err := m.MarshalYAML()
if err != nil {
fail(err)
}
if v == nil {
e.nilv()
return
}
in = reflect.ValueOf(v)
} else if m, ok := iface.(encoding.TextMarshaler); ok {
text, err := m.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
}
switch in.Kind() {
case reflect.Interface:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice:
if in.Type().Elem() == mapItemType {
e.itemsv(tag, in)
} else {
e.slicev(tag, in)
}
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if in.Type() == durationType {
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
} else {
e.intv(tag, in)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("cannot marshal type: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) itemsv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
for _, item := range slice {
e.marshal("", reflect.ValueOf(item.Key))
e.marshal("", reflect.ValueOf(item.Value))
}
})
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = in.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
if sinfo.InlineMap >= 0 {
m := in.Field(sinfo.InlineMap)
if m.Len() > 0 {
e.flow = false
keys := keyList(m.MapKeys())
sort.Sort(keys)
for _, k := range keys {
if _, found := sinfo.FieldsMap[k.String()]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
}
e.marshal("", k)
e.flow = false
e.marshal("", m.MapIndex(k))
}
}
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
f()
e.must(yaml_mapping_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
rtag, rs := resolve("", s)
if rtag == yaml_BINARY_TAG {
if tag == "" || tag == yaml_STR_TAG {
tag = rtag
s = rs.(string)
} else if tag == yaml_BINARY_TAG {
failf("explicitly tagged !!binary data must be base64-encoded")
} else {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
}
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if strings.Contains(s, "\n") {
style = yaml_LITERAL_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// FIXME: Handle 64 bits here.
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.emit()
}

1095
vendor/gopkg.in/yaml.v2/parserc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

394
vendor/gopkg.in/yaml.v2/readerc.go generated vendored Normal file
View File

@ -0,0 +1,394 @@
package yaml
import (
"io"
)
// Set the reader error and return 0.
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
// Byte order marks.
const (
bom_UTF8 = "\xef\xbb\xbf"
bom_UTF16LE = "\xff\xfe"
bom_UTF16BE = "\xfe\xff"
)
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
// Ensure that we had enough bytes in the raw buffer.
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
// Determine the encoding.
buf := parser.raw_buffer
pos := parser.raw_buffer_pos
avail := len(buf) - pos
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
// Update the raw buffer.
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
// Return if the raw buffer is full.
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
// Return on EOF.
if parser.eof {
return true
}
// Move the remaining bytes in the raw buffer to the beginning.
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
// Call the read handler to fill the buffer.
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
}
return true
}
// Ensure that the buffer contains at least `length` characters.
// Return true on success, false on failure.
//
// The length is supposed to be significantly less that the buffer size.
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.read_handler == nil {
panic("read handler must be set")
}
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
}
// Return if the buffer contains enough characters.
if parser.unread >= length {
return true
}
// Determine the input encoding if it is not known yet.
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
// Move the unread characters to the beginning of the buffer.
buffer_len := len(parser.buffer)
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_len -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_len {
buffer_len = 0
parser.buffer_pos = 0
}
// Open the whole buffer for writing, and cut it before returning.
parser.buffer = parser.buffer[:cap(parser.buffer)]
// Fill the buffer until it has enough characters.
first := true
for parser.unread < length {
// Fill the raw buffer if necessary.
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_len]
return false
}
}
first = false
// Decode the raw buffer.
inner:
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var width int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
// Decode the next character.
switch parser.encoding {
case yaml_UTF8_ENCODING:
// Decode a UTF-8 character. Check RFC 3629
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
//
// The following table (taken from the RFC) is used for
// decoding.
//
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
//
// Additionally, the characters in the range 0xD800-0xDFFF
// are prohibited as they are reserved for use with UTF-16
// surrogate pairs.
// Determine the length of the UTF-8 sequence.
octet := parser.raw_buffer[parser.raw_buffer_pos]
switch {
case octet&0x80 == 0x00:
width = 1
case octet&0xE0 == 0xC0:
width = 2
case octet&0xF0 == 0xE0:
width = 3
case octet&0xF8 == 0xF0:
width = 4
default:
// The leading octet is invalid.
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
// Check if the raw buffer contains an incomplete character.
if width > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
break inner
}
// Decode the leading octet.
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
// Check and decode the trailing octets.
for k := 1; k < width; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
// Check if the octet is valid.
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
// Decode the octet.
value = (value << 6) + rune(octet&0x3F)
}
// Check the length of the sequence against the value.
switch {
case width == 1:
case width == 2 && value >= 0x80:
case width == 3 && value >= 0x800:
case width == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
// Check the range of the value.
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
low, high = 1, 0
}
// The UTF-16 encoding is not as simple as one might
// naively think. Check RFC 2781
// (http://www.ietf.org/rfc/rfc2781.txt).
//
// Normally, two subsequent bytes describe a Unicode
// character. However a special technique (called a
// surrogate pair) is used for specifying character
// values larger than 0xFFFF.
//
// A surrogate pair consists of two pseudo-characters:
// high surrogate area (0xD800-0xDBFF)
// low surrogate area (0xDC00-0xDFFF)
//
// The following formulas are used for decoding
// and encoding characters using surrogate pairs:
//
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
// W1 = 110110yyyyyyyyyy
// W2 = 110111xxxxxxxxxx
//
// where U is the character value, W1 is the high surrogate
// area, W2 is the low surrogate area.
// Check for incomplete UTF-16 character.
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
break inner
}
// Get the character.
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
// Check for unexpected low surrogate area.
if value&0xFC00 == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
// Check for a high surrogate area.
if value&0xFC00 == 0xD800 {
width = 4
// Check for incomplete surrogate pair.
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
break inner
}
// Get the next character.
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
// Check for a low surrogate area.
if value2&0xFC00 != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
// Generate the value of the surrogate pair.
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
width = 2
}
default:
panic("impossible")
}
// Check if the character is in the allowed range:
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
// | [#x10000-#x10FFFF] (32 bit)
switch {
case value == 0x09:
case value == 0x0A:
case value == 0x0D:
case value >= 0x20 && value <= 0x7E:
case value == 0x85:
case value >= 0xA0 && value <= 0xD7FF:
case value >= 0xE000 && value <= 0xFFFD:
case value >= 0x10000 && value <= 0x10FFFF:
default:
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
// Move the raw pointers.
parser.raw_buffer_pos += width
parser.offset += width
// Finally put the character into the buffer.
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
buffer_len += 1
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
buffer_len += 2
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
buffer_len += 3
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
buffer_len += 4
}
parser.unread++
}
// On EOF, put NUL into the buffer and return.
if parser.eof {
parser.buffer[buffer_len] = 0
buffer_len++
parser.unread++
break
}
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

208
vendor/gopkg.in/yaml.v2/resolve.go generated vendored Normal file
View File

@ -0,0 +1,208 @@
package yaml
import (
"encoding/base64"
"math"
"regexp"
"strconv"
"strings"
"unicode/utf8"
)
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", yaml_MERGE_TAG, []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
// TODO This can easily be made faster and produce less garbage.
if strings.HasPrefix(tag, longTagPrefix) {
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func longTag(tag string) string {
if strings.HasPrefix(tag, "!!") {
return longTagPrefix + tag[2:]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
return true
}
return false
}
var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
}
defer func() {
switch tag {
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
return
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
// Any data is accepted as a !!str or !!binary.
// Otherwise, the prefix is enough of a hint about what it might be.
hint := byte('N')
if in != "" {
hint = resolveTable[in[0]]
}
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
// are purposefully unsupported here. They're still quoted on
// the way out for compatibility with other parser, though.
switch hint {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
case 'D', 'S':
// Int, float, or timestamp.
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain, 0, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
if yamlStyleFloat.MatchString(plain) {
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt(plain[3:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, -int(intv)
} else {
return yaml_INT_TAG, -intv
}
}
}
// XXX Handle timestamps here.
default:
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
}
}
if tag == yaml_BINARY_TAG {
return yaml_BINARY_TAG, in
}
if utf8.ValidString(in) {
return yaml_STR_TAG, in
}
return yaml_BINARY_TAG, encodeBase64(in)
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
// as appropriate for the resulting length.
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}

2711
vendor/gopkg.in/yaml.v2/scannerc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

104
vendor/gopkg.in/yaml.v2/sorter.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
package yaml
import (
"reflect"
"unicode"
)
type keyList []reflect.Value
func (l keyList) Len() int { return len(l) }
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l keyList) Less(i, j int) bool {
a := l[i]
b := l[j]
ak := a.Kind()
bk := b.Kind()
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
a = a.Elem()
ak = a.Kind()
}
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
b = b.Elem()
bk = b.Kind()
}
af, aok := keyFloat(a)
bf, bok := keyFloat(b)
if aok && bok {
if af != bf {
return af < bf
}
if ak != bk {
return ak < bk
}
return numLess(a, b)
}
if ak != reflect.String || bk != reflect.String {
return ak < bk
}
ar, br := []rune(a.String()), []rune(b.String())
for i := 0; i < len(ar) && i < len(br); i++ {
if ar[i] == br[i] {
continue
}
al := unicode.IsLetter(ar[i])
bl := unicode.IsLetter(br[i])
if al && bl {
return ar[i] < br[i]
}
if al || bl {
return bl
}
var ai, bi int
var an, bn int64
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
bn = bn*10 + int64(br[bi]-'0')
}
if an != bn {
return an < bn
}
if ai != bi {
return ai < bi
}
return ar[i] < br[i]
}
return len(ar) < len(br)
}
// keyFloat returns a float value for v if it is a number/bool
// and whether it is a number/bool or not.
func keyFloat(v reflect.Value) (f float64, ok bool) {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int()), true
case reflect.Float32, reflect.Float64:
return v.Float(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return float64(v.Uint()), true
case reflect.Bool:
if v.Bool() {
return 1, true
}
return 0, true
}
return 0, false
}
// numLess returns whether a < b.
// a and b must necessarily have the same kind.
func numLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return a.Int() < b.Int()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Bool:
return !a.Bool() && b.Bool()
}
panic("not a number")
}

89
vendor/gopkg.in/yaml.v2/writerc.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package yaml
// Set the writer error and return false.
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
// Flush the output buffer.
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("write handler not set")
}
// Check if the buffer is empty.
if emitter.buffer_pos == 0 {
return true
}
// If the output encoding is UTF-8, we don't need to recode the buffer.
if emitter.encoding == yaml_UTF8_ENCODING {
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}
// Recode the buffer into the raw buffer.
var low, high int
if emitter.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
pos := 0
for pos < emitter.buffer_pos {
// See the "reader.c" code for more details on UTF-8 encoding. Note
// that we assume that the buffer contains a valid UTF-8 sequence.
// Read the next UTF-8 character.
octet := emitter.buffer[pos]
var w int
var value rune
switch {
case octet&0x80 == 0x00:
w, value = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, value = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, value = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, value = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = emitter.buffer[pos+k]
value = (value << 6) + (rune(octet) & 0x3F)
}
pos += w
// Write the character.
if value < 0x10000 {
var b [2]byte
b[high] = byte(value >> 8)
b[low] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
} else {
// Write the character using a surrogate pair (check "reader.c").
var b [4]byte
value -= 0x10000
b[high] = byte(0xD8 + (value >> 18))
b[low] = byte((value >> 10) & 0xFF)
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
b[low+2] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
}
}
// Write the raw buffer.
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}

357
vendor/gopkg.in/yaml.v2/yaml.go generated vendored Normal file
View File

@ -0,0 +1,357 @@
// Package yaml implements YAML support for the Go language.
//
// Source code and other details for the project are available at GitHub:
//
// https://github.com/go-yaml/yaml
//
package yaml
import (
"errors"
"fmt"
"reflect"
"strings"
"sync"
)
// MapSlice encodes and decodes as a YAML map.
// The order of keys is preserved when encoding and decoding.
type MapSlice []MapItem
// MapItem is an item in a MapSlice.
type MapItem struct {
Key, Value interface{}
}
// The Unmarshaler interface may be implemented by types to customize their
// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
// method receives a function that may be called to unmarshal the original
// YAML value into a field or variable. It is safe to call the unmarshal
// function parameter more than once if necessary.
type Unmarshaler interface {
UnmarshalYAML(unmarshal func(interface{}) error) error
}
// The Marshaler interface may be implemented by types to customize their
// behavior when being marshaled into a YAML document. The returned value
// is marshaled in place of the original value implementing Marshaler.
//
// If an error is returned by MarshalYAML, the marshaling procedure stops
// and returns with the provided error.
type Marshaler interface {
MarshalYAML() (interface{}, error)
}
// Unmarshal decodes the first document found within the in byte slice
// and assigns decoded values into the out value.
//
// Maps and pointers (to a struct, string, int, etc) are accepted as out
// values. If an internal pointer within a struct is not initialized,
// the yaml package will initialize it if necessary for unmarshalling
// the provided data. The out parameter must not be nil.
//
// The type of the decoded values should be compatible with the respective
// values in out. If one or more values cannot be decoded due to a type
// mismatches, decoding continues partially until the end of the YAML
// content, and a *yaml.TypeError is returned with details for all
// missed values.
//
// Struct fields are only unmarshalled if they are exported (have an
// upper case first letter), and are unmarshalled using the field name
// lowercased as the default key. Custom keys may be defined via the
// "yaml" name in the field tag: the content preceding the first comma
// is used as the key, and the following comma-separated options are
// used to tweak the marshalling process (see Marshal).
// Conflicting names result in a runtime error.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// var t T
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
return unmarshal(in, out, false)
}
// UnmarshalStrict is like Unmarshal except that any fields that are found
// in the data that do not have corresponding struct members will result in
// an error.
func UnmarshalStrict(in []byte, out interface{}) (err error) {
return unmarshal(in, out, true)
}
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
defer handleErr(&err)
d := newDecoder(strict)
p := newParser(in)
defer p.destroy()
node := p.parse()
if node != nil {
v := reflect.ValueOf(out)
if v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
d.unmarshal(node, v)
}
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Marshal serializes the value provided into a YAML document. The structure
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
// Struct fields are only unmarshalled if they are exported (have an upper case
// first letter), and are unmarshalled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
// following comma-separated options are used to tweak the marshalling process.
// Conflicting names result in a runtime error.
//
// The field tag format accepted is:
//
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Does not apply to zero valued structs.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the yaml keys of other struct fields.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
// type T struct {
// F int "a,omitempty"
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshal("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
*err = e.err
} else {
panic(v)
}
}
}
type yamlError struct {
err error
}
func fail(err error) {
panic(yamlError{err})
}
func failf(format string, args ...interface{}) {
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
}
// A TypeError is returned by Unmarshal when one or more fields in
// the YAML document cannot be properly decoded into the requested
// types. When this error is returned, the value is still
// unmarshaled partially.
type TypeError struct {
Errors []string
}
func (e *TypeError) Error() string {
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
// The code in this section was copied from mgo/bson.
// structInfo holds details for the serialization of fields of
// a given struct.
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
// InlineMap is the number of the field in the struct that
// contains an ,inline map, or -1 if there's none.
InlineMap int
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
Flow bool
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var fieldMapMutex sync.RWMutex
func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldMapMutex.RLock()
sinfo, found := structMap[st]
fieldMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("yaml")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "flow":
info.Flow = true
case "inline":
inline = true
default:
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
//return nil, errors.New("Option ,inline needs a struct value or map field")
return nil, errors.New("Option ,inline needs a struct value field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
fieldMapMutex.Lock()
structMap[st] = sinfo
fieldMapMutex.Unlock()
return sinfo, nil
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
for i := v.NumField() - 1; i >= 0; i-- {
if vt.Field(i).PkgPath != "" {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}

716
vendor/gopkg.in/yaml.v2/yamlh.go generated vendored Normal file
View File

@ -0,0 +1,716 @@
package yaml
import (
"io"
)
// The version directive data.
type yaml_version_directive_t struct {
major int8 // The major version number.
minor int8 // The minor version number.
}
// The tag directive data.
type yaml_tag_directive_t struct {
handle []byte // The tag handle.
prefix []byte // The tag prefix.
}
type yaml_encoding_t int
// The stream encoding.
const (
// Let the parser choose the encoding.
yaml_ANY_ENCODING yaml_encoding_t = iota
yaml_UTF8_ENCODING // The default UTF-8 encoding.
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
)
type yaml_break_t int
// Line break types.
const (
// Let the parser choose the break type.
yaml_ANY_BREAK yaml_break_t = iota
yaml_CR_BREAK // Use CR for line breaks (Mac style).
yaml_LN_BREAK // Use LN for line breaks (Unix style).
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
)
type yaml_error_type_t int
// Many bad things could happen with the parser and emitter.
const (
// No error is produced.
yaml_NO_ERROR yaml_error_type_t = iota
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
yaml_READER_ERROR // Cannot read or decode the input stream.
yaml_SCANNER_ERROR // Cannot scan the input stream.
yaml_PARSER_ERROR // Cannot parse the input stream.
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
yaml_WRITER_ERROR // Cannot write to the output stream.
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
)
// The pointer position.
type yaml_mark_t struct {
index int // The position index.
line int // The position line.
column int // The position column.
}
// Node Styles
type yaml_style_t int8
type yaml_scalar_style_t yaml_style_t
// Scalar styles.
const (
// Let the emitter choose the style.
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
)
type yaml_sequence_style_t yaml_style_t
// Sequence styles.
const (
// Let the emitter choose the style.
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
)
type yaml_mapping_style_t yaml_style_t
// Mapping styles.
const (
// Let the emitter choose the style.
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
)
// Tokens
type yaml_token_type_t int
// Token types.
const (
// An empty token.
yaml_NO_TOKEN yaml_token_type_t = iota
yaml_STREAM_START_TOKEN // A STREAM-START token.
yaml_STREAM_END_TOKEN // A STREAM-END token.
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
yaml_KEY_TOKEN // A KEY token.
yaml_VALUE_TOKEN // A VALUE token.
yaml_ALIAS_TOKEN // An ALIAS token.
yaml_ANCHOR_TOKEN // An ANCHOR token.
yaml_TAG_TOKEN // A TAG token.
yaml_SCALAR_TOKEN // A SCALAR token.
)
func (tt yaml_token_type_t) String() string {
switch tt {
case yaml_NO_TOKEN:
return "yaml_NO_TOKEN"
case yaml_STREAM_START_TOKEN:
return "yaml_STREAM_START_TOKEN"
case yaml_STREAM_END_TOKEN:
return "yaml_STREAM_END_TOKEN"
case yaml_VERSION_DIRECTIVE_TOKEN:
return "yaml_VERSION_DIRECTIVE_TOKEN"
case yaml_TAG_DIRECTIVE_TOKEN:
return "yaml_TAG_DIRECTIVE_TOKEN"
case yaml_DOCUMENT_START_TOKEN:
return "yaml_DOCUMENT_START_TOKEN"
case yaml_DOCUMENT_END_TOKEN:
return "yaml_DOCUMENT_END_TOKEN"
case yaml_BLOCK_SEQUENCE_START_TOKEN:
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
case yaml_BLOCK_MAPPING_START_TOKEN:
return "yaml_BLOCK_MAPPING_START_TOKEN"
case yaml_BLOCK_END_TOKEN:
return "yaml_BLOCK_END_TOKEN"
case yaml_FLOW_SEQUENCE_START_TOKEN:
return "yaml_FLOW_SEQUENCE_START_TOKEN"
case yaml_FLOW_SEQUENCE_END_TOKEN:
return "yaml_FLOW_SEQUENCE_END_TOKEN"
case yaml_FLOW_MAPPING_START_TOKEN:
return "yaml_FLOW_MAPPING_START_TOKEN"
case yaml_FLOW_MAPPING_END_TOKEN:
return "yaml_FLOW_MAPPING_END_TOKEN"
case yaml_BLOCK_ENTRY_TOKEN:
return "yaml_BLOCK_ENTRY_TOKEN"
case yaml_FLOW_ENTRY_TOKEN:
return "yaml_FLOW_ENTRY_TOKEN"
case yaml_KEY_TOKEN:
return "yaml_KEY_TOKEN"
case yaml_VALUE_TOKEN:
return "yaml_VALUE_TOKEN"
case yaml_ALIAS_TOKEN:
return "yaml_ALIAS_TOKEN"
case yaml_ANCHOR_TOKEN:
return "yaml_ANCHOR_TOKEN"
case yaml_TAG_TOKEN:
return "yaml_TAG_TOKEN"
case yaml_SCALAR_TOKEN:
return "yaml_SCALAR_TOKEN"
}
return "<unknown token>"
}
// The token structure.
type yaml_token_t struct {
// The token type.
typ yaml_token_type_t
// The start/end of the token.
start_mark, end_mark yaml_mark_t
// The stream encoding (for yaml_STREAM_START_TOKEN).
encoding yaml_encoding_t
// The alias/anchor/scalar value or tag/tag directive handle
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
value []byte
// The tag suffix (for yaml_TAG_TOKEN).
suffix []byte
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
prefix []byte
// The scalar style (for yaml_SCALAR_TOKEN).
style yaml_scalar_style_t
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
major, minor int8
}
// Events
type yaml_event_type_t int8
// Event types.
const (
// An empty event.
yaml_NO_EVENT yaml_event_type_t = iota
yaml_STREAM_START_EVENT // A STREAM-START event.
yaml_STREAM_END_EVENT // A STREAM-END event.
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
yaml_ALIAS_EVENT // An ALIAS event.
yaml_SCALAR_EVENT // A SCALAR event.
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
yaml_MAPPING_START_EVENT // A MAPPING-START event.
yaml_MAPPING_END_EVENT // A MAPPING-END event.
)
// The event structure.
type yaml_event_t struct {
// The event type.
typ yaml_event_type_t
// The start and end of the event.
start_mark, end_mark yaml_mark_t
// The document encoding (for yaml_STREAM_START_EVENT).
encoding yaml_encoding_t
// The version directive (for yaml_DOCUMENT_START_EVENT).
version_directive *yaml_version_directive_t
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
tag_directives []yaml_tag_directive_t
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
anchor []byte
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
tag []byte
// The scalar value (for yaml_SCALAR_EVENT).
value []byte
// Is the document start/end indicator implicit, or the tag optional?
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
implicit bool
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
quoted_implicit bool
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
style yaml_style_t
}
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
// Nodes
const (
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
// Not in original libyaml.
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
)
type yaml_node_type_t int
// Node types.
const (
// An empty node.
yaml_NO_NODE yaml_node_type_t = iota
yaml_SCALAR_NODE // A scalar node.
yaml_SEQUENCE_NODE // A sequence node.
yaml_MAPPING_NODE // A mapping node.
)
// An element of a sequence node.
type yaml_node_item_t int
// An element of a mapping node.
type yaml_node_pair_t struct {
key int // The key of the element.
value int // The value of the element.
}
// The node structure.
type yaml_node_t struct {
typ yaml_node_type_t // The node type.
tag []byte // The node tag.
// The node data.
// The scalar parameters (for yaml_SCALAR_NODE).
scalar struct {
value []byte // The scalar value.
length int // The length of the scalar value.
style yaml_scalar_style_t // The scalar style.
}
// The sequence parameters (for YAML_SEQUENCE_NODE).
sequence struct {
items_data []yaml_node_item_t // The stack of sequence items.
style yaml_sequence_style_t // The sequence style.
}
// The mapping parameters (for yaml_MAPPING_NODE).
mapping struct {
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
pairs_start *yaml_node_pair_t // The beginning of the stack.
pairs_end *yaml_node_pair_t // The end of the stack.
pairs_top *yaml_node_pair_t // The top of the stack.
style yaml_mapping_style_t // The mapping style.
}
start_mark yaml_mark_t // The beginning of the node.
end_mark yaml_mark_t // The end of the node.
}
// The document structure.
type yaml_document_t struct {
// The document nodes.
nodes []yaml_node_t
// The version directive.
version_directive *yaml_version_directive_t
// The list of tag directives.
tag_directives_data []yaml_tag_directive_t
tag_directives_start int // The beginning of the tag directives list.
tag_directives_end int // The end of the tag directives list.
start_implicit int // Is the document start indicator implicit?
end_implicit int // Is the document end indicator implicit?
// The start/end of the document.
start_mark, end_mark yaml_mark_t
}
// The prototype of a read handler.
//
// The read handler is called when the parser needs to read more bytes from the
// source. The handler should write not more than size bytes to the buffer.
// The number of written bytes should be set to the size_read variable.
//
// [in,out] data A pointer to an application data specified by
// yaml_parser_set_input().
// [out] buffer The buffer to write the data from the source.
// [in] size The size of the buffer.
// [out] size_read The actual number of bytes read from the source.
//
// On success, the handler should return 1. If the handler failed,
// the returned value should be 0. On EOF, the handler should set the
// size_read to 0 and return 1.
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
// This structure holds information about a potential simple key.
type yaml_simple_key_t struct {
possible bool // Is a simple key possible?
required bool // Is a simple key required?
token_number int // The number of the token.
mark yaml_mark_t // The position mark.
}
// The states of the parser.
type yaml_parser_state_t int
const (
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
yaml_PARSE_END_STATE // Expect nothing.
)
func (ps yaml_parser_state_t) String() string {
switch ps {
case yaml_PARSE_STREAM_START_STATE:
return "yaml_PARSE_STREAM_START_STATE"
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_START_STATE:
return "yaml_PARSE_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
case yaml_PARSE_DOCUMENT_END_STATE:
return "yaml_PARSE_DOCUMENT_END_STATE"
case yaml_PARSE_BLOCK_NODE_STATE:
return "yaml_PARSE_BLOCK_NODE_STATE"
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
case yaml_PARSE_FLOW_NODE_STATE:
return "yaml_PARSE_FLOW_NODE_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
case yaml_PARSE_END_STATE:
return "yaml_PARSE_END_STATE"
}
return "<unknown parser state>"
}
// This structure holds aliases data.
type yaml_alias_data_t struct {
anchor []byte // The anchor.
index int // The node id.
mark yaml_mark_t // The anchor mark.
}
// The parser structure.
//
// All members are internal. Manage the structure using the
// yaml_parser_ family of functions.
type yaml_parser_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// The byte about which the problem occurred.
problem_offset int
problem_value int
problem_mark yaml_mark_t
// The error context.
context string
context_mark yaml_mark_t
// Reader stuff
read_handler yaml_read_handler_t // Read handler.
input_file io.Reader // File input data.
input []byte // String input data.
input_pos int
eof bool // EOF flag
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
unread int // The number of unread characters in the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The input encoding.
offset int // The offset of the current position (in bytes).
mark yaml_mark_t // The mark of the current position.
// Scanner stuff
stream_start_produced bool // Have we started to scan the input stream?
stream_end_produced bool // Have we reached the end of the input stream?
flow_level int // The number of unclosed '[' and '{' indicators.
tokens []yaml_token_t // The tokens queue.
tokens_head int // The head of the tokens queue.
tokens_parsed int // The number of tokens fetched from the queue.
token_available bool // Does the tokens queue contain a token ready for dequeueing.
indent int // The current indentation level.
indents []int // The indentation levels stack.
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
// Parser stuff
state yaml_parser_state_t // The current parser state.
states []yaml_parser_state_t // The parser states stack.
marks []yaml_mark_t // The stack of marks.
tag_directives []yaml_tag_directive_t // The list of TAG directives.
// Dumper stuff
aliases []yaml_alias_data_t // The alias data.
document *yaml_document_t // The currently parsed document.
}
// Emitter Definitions
// The prototype of a write handler.
//
// The write handler is called when the emitter needs to flush the accumulated
// characters to the output. The handler should write @a size bytes of the
// @a buffer to the output.
//
// @param[in,out] data A pointer to an application data specified by
// yaml_emitter_set_output().
// @param[in] buffer The buffer with bytes to be written.
// @param[in] size The size of the buffer.
//
// @returns On success, the handler should return @c 1. If the handler failed,
// the returned value should be @c 0.
//
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
type yaml_emitter_state_t int
// The emitter states.
const (
// Expect STREAM-START.
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
yaml_EMIT_END_STATE // Expect nothing.
)
// The emitter structure.
//
// All members are internal. Manage the structure using the @c yaml_emitter_
// family of functions.
type yaml_emitter_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// Writer stuff
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
output_file io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The stream encoding.
// Emitter stuff
canonical bool // If the output is in the canonical style?
best_indent int // The number of indentation spaces.
best_width int // The preferred width of the output lines.
unicode bool // Allow unescaped non-ASCII characters?
line_break yaml_break_t // The preferred line break.
state yaml_emitter_state_t // The current emitter state.
states []yaml_emitter_state_t // The stack of states.
events []yaml_event_t // The event queue.
events_head int // The head of the event queue.
indents []int // The stack of indentation levels.
tag_directives []yaml_tag_directive_t // The list of tag directives.
indent int // The current indentation level.
flow_level int // The current flow level.
root_context bool // Is it the document root context?
sequence_context bool // Is it a sequence context?
mapping_context bool // Is it a mapping context?
simple_key_context bool // Is it a simple mapping key context?
line int // The current line.
column int // The current column.
whitespace bool // If the last character was a whitespace?
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
open_ended bool // If an explicit document end is required?
// Anchor analysis.
anchor_data struct {
anchor []byte // The anchor value.
alias bool // Is it an alias?
}
// Tag analysis.
tag_data struct {
handle []byte // The tag handle.
suffix []byte // The tag suffix.
}
// Scalar analysis.
scalar_data struct {
value []byte // The scalar value.
multiline bool // Does the scalar contain line breaks?
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
style yaml_scalar_style_t // The output style.
}
// Dumper stuff
opened bool // If the stream was already opened?
closed bool // If the stream was already closed?
// The information associated with the document nodes.
anchors *struct {
references int // The number of references.
anchor int // The anchor id.
serialized bool // If the node has been emitted?
}
last_anchor_id int // The last assigned anchor id.
document *yaml_document_t // The currently emitted document.
}

173
vendor/gopkg.in/yaml.v2/yamlprivateh.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
package yaml
const (
// The size of the input raw buffer.
input_raw_buffer_size = 512
// The size of the input buffer.
// It should be possible to decode the whole raw buffer.
input_buffer_size = input_raw_buffer_size * 3
// The size of the output buffer.
output_buffer_size = 128
// The size of the output raw buffer.
// It should be possible to encode the whole output buffer.
output_raw_buffer_size = (output_buffer_size*2 + 2)
// The size of other stacks and queues.
initial_stack_size = 16
initial_queue_size = 16
initial_string_size = 16
)
// Check if the character at the specified position is an alphabetical
// character, a digit, '_', or '-'.
func is_alpha(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
}
// Check if the character at the specified position is a digit.
func is_digit(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9'
}
// Get the value of a digit.
func as_digit(b []byte, i int) int {
return int(b[i]) - '0'
}
// Check if the character at the specified position is a hex-digit.
func is_hex(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
}
// Get the value of a hex-digit.
func as_hex(b []byte, i int) int {
bi := b[i]
if bi >= 'A' && bi <= 'F' {
return int(bi) - 'A' + 10
}
if bi >= 'a' && bi <= 'f' {
return int(bi) - 'a' + 10
}
return int(bi) - '0'
}
// Check if the character is ASCII.
func is_ascii(b []byte, i int) bool {
return b[i] <= 0x7F
}
// Check if the character at the start of the buffer can be printed unescaped.
func is_printable(b []byte, i int) bool {
return ((b[i] == 0x0A) || // . == #x0A
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
// Check if the character at the specified position is NUL.
func is_z(b []byte, i int) bool {
return b[i] == 0x00
}
// Check if the beginning of the buffer is a BOM.
func is_bom(b []byte, i int) bool {
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
}
// Check if the character at the specified position is space.
func is_space(b []byte, i int) bool {
return b[i] == ' '
}
// Check if the character at the specified position is tab.
func is_tab(b []byte, i int) bool {
return b[i] == '\t'
}
// Check if the character at the specified position is blank (space or tab).
func is_blank(b []byte, i int) bool {
//return is_space(b, i) || is_tab(b, i)
return b[i] == ' ' || b[i] == '\t'
}
// Check if the character at the specified position is a line break.
func is_break(b []byte, i int) bool {
return (b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
}
func is_crlf(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// Check if the character is a line break or NUL.
func is_breakz(b []byte, i int) bool {
//return is_break(b, i) || is_z(b, i)
return ( // is_break:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
// is_z:
b[i] == 0)
}
// Check if the character is a line break, space, or NUL.
func is_spacez(b []byte, i int) bool {
//return is_space(b, i) || is_breakz(b, i)
return ( // is_space:
b[i] == ' ' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Check if the character is a line break, space, tab, or NUL.
func is_blankz(b []byte, i int) bool {
//return is_blank(b, i) || is_breakz(b, i)
return ( // is_blank:
b[i] == ' ' || b[i] == '\t' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Determine the width of the character.
func width(b byte) int {
// Don't replace these by a switch without first
// confirming that it is being inlined.
if b&0x80 == 0x00 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}