Changing k8s-client to kubernetes/go-client
This commit is contained in:
parent
d2bf65ede9
commit
1344f6314e
1134
Godeps/Godeps.json
generated
1134
Godeps/Godeps.json
generated
File diff suppressed because it is too large
Load Diff
@ -1,55 +1,61 @@
|
|||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
restclient "k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
v1batch "k8s.io/client-go/kubernetes/typed/batch/v1"
|
||||||
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
v1beta1extensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ClientInterface interface {
|
type ClientInterface interface {
|
||||||
Pods(string) unversioned.PodInterface
|
Pods(string) v1core.PodInterface
|
||||||
Jobs(string) unversioned.JobInterface
|
Jobs(string) v1batch.JobInterface
|
||||||
Endpoints(string) unversioned.EndpointsInterface
|
Endpoints(string) v1core.EndpointsInterface
|
||||||
DaemonSets(string) unversioned.DaemonSetInterface
|
DaemonSets(string) v1beta1extensions.DaemonSetInterface
|
||||||
Services(string) unversioned.ServiceInterface
|
Services(string) v1core.ServiceInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
*unversioned.Client
|
*kubernetes.Clientset
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) Pods(namespace string) unversioned.PodInterface {
|
func (c Client) Pods(namespace string) v1core.PodInterface {
|
||||||
return c.Client.Pods(namespace)
|
return c.Clientset.Core().Pods(namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) Jobs(namespace string) unversioned.JobInterface {
|
func (c Client) Jobs(namespace string) v1batch.JobInterface {
|
||||||
return c.Client.Extensions().Jobs(namespace)
|
return c.Clientset.Batch().Jobs(namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) Endpoints(namespace string) unversioned.EndpointsInterface {
|
func (c Client) Endpoints(namespace string) v1core.EndpointsInterface {
|
||||||
return c.Client.Endpoints(namespace)
|
return c.Clientset.Core().Endpoints(namespace)
|
||||||
}
|
}
|
||||||
func (c Client) DaemonSets(namespace string) unversioned.DaemonSetInterface {
|
func (c Client) DaemonSets(namespace string) v1beta1extensions.DaemonSetInterface {
|
||||||
return c.Client.Extensions().DaemonSets(namespace)
|
return c.Clientset.Extensions().DaemonSets(namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) Services(namespace string) unversioned.ServiceInterface {
|
func (c Client) Services(namespace string) v1core.ServiceInterface {
|
||||||
return c.Client.Services(namespace)
|
return c.Clientset.Core().Services(namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(config *restclient.Config) (ClientInterface, error) {
|
func New(config *rest.Config) (ClientInterface, error) {
|
||||||
if config == nil {
|
if config == nil {
|
||||||
client, err := unversioned.NewInCluster()
|
config, err := rest.InClusterConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return Client{Client: client}, nil
|
clientset, err := kubernetes.NewForConfig(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return Client{Clientset: clientset}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := unversioned.New(config)
|
clientset, err := kubernetes.NewForConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return Client{Client: client}, nil
|
return Client{Clientset: clientset}, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
19
dependencies/daemonset/daemonset.go
vendored
19
dependencies/daemonset/daemonset.go
vendored
@ -7,8 +7,8 @@ import (
|
|||||||
entry "github.com/stackanetes/kubernetes-entrypoint/entrypoint"
|
entry "github.com/stackanetes/kubernetes-entrypoint/entrypoint"
|
||||||
"github.com/stackanetes/kubernetes-entrypoint/logger"
|
"github.com/stackanetes/kubernetes-entrypoint/logger"
|
||||||
"github.com/stackanetes/kubernetes-entrypoint/util/env"
|
"github.com/stackanetes/kubernetes-entrypoint/util/env"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/client-go/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/client-go/pkg/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Daemonset struct {
|
type Daemonset struct {
|
||||||
@ -48,7 +48,7 @@ func (d Daemonset) IsResolved(entrypoint entry.EntrypointInterface) (bool, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
label := labels.SelectorFromSet(daemonset.Spec.Selector.MatchLabels)
|
label := labels.SelectorFromSet(daemonset.Spec.Selector.MatchLabels)
|
||||||
opts := api.ListOptions{LabelSelector: label}
|
opts := v1.ListOptions{LabelSelector: label.String()}
|
||||||
pods, err := entrypoint.Client().Pods(entrypoint.GetNamespace()).List(opts)
|
pods, err := entrypoint.Client().Pods(entrypoint.GetNamespace()).List(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -65,7 +65,7 @@ func (d Daemonset) IsResolved(entrypoint entry.EntrypointInterface) (bool, error
|
|||||||
if !isPodOnHost(&pod, myHost) {
|
if !isPodOnHost(&pod, myHost) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if api.IsPodReady(&pod) {
|
if isPodReady(pod) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("Pod %v of daemonset %v is not ready", pod.Name, d.GetName())
|
return false, fmt.Errorf("Pod %v of daemonset %v is not ready", pod.Name, d.GetName())
|
||||||
@ -78,9 +78,18 @@ func (d Daemonset) GetName() string {
|
|||||||
return d.name
|
return d.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func isPodOnHost(pod *api.Pod, hostIP string) bool {
|
func isPodOnHost(pod *v1.Pod, hostIP string) bool {
|
||||||
if pod.Status.HostIP == hostIP {
|
if pod.Status.HostIP == hostIP {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isPodReady(pod v1.Pod) bool {
|
||||||
|
for _, condition := range pod.Status.Conditions {
|
||||||
|
if condition.Type == v1.PodReady && condition.Status == "True" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
@ -3,7 +3,7 @@ package entrypoint
|
|||||||
import (
|
import (
|
||||||
cli "github.com/stackanetes/kubernetes-entrypoint/client"
|
cli "github.com/stackanetes/kubernetes-entrypoint/client"
|
||||||
"github.com/stackanetes/kubernetes-entrypoint/logger"
|
"github.com/stackanetes/kubernetes-entrypoint/logger"
|
||||||
restclient "k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/client-go/rest"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -29,7 +29,7 @@ type Entrypoint struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//New is a constructor for entrypoint
|
//New is a constructor for entrypoint
|
||||||
func New(config *restclient.Config) (entry *Entrypoint, err error) {
|
func New(config *rest.Config) (entry *Entrypoint, err error) {
|
||||||
entry = new(Entrypoint)
|
entry = new(Entrypoint)
|
||||||
client, err := cli.New(config)
|
client, err := cli.New(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2,33 +2,35 @@ package mocks
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
cli "github.com/stackanetes/kubernetes-entrypoint/client"
|
cli "github.com/stackanetes/kubernetes-entrypoint/client"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
v1batch "k8s.io/client-go/kubernetes/typed/batch/v1"
|
||||||
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
v1beta1extensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
unversioned.PodInterface
|
v1core.PodInterface
|
||||||
unversioned.ServiceInterface
|
v1core.ServiceInterface
|
||||||
unversioned.DaemonSetInterface
|
v1beta1extensions.DaemonSetInterface
|
||||||
unversioned.EndpointsInterface
|
v1core.EndpointsInterface
|
||||||
unversioned.JobInterface
|
v1batch.JobInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) Pods(namespace string) unversioned.PodInterface {
|
func (c Client) Pods(namespace string) v1core.PodInterface {
|
||||||
return c.PodInterface
|
return c.PodInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) Services(namespace string) unversioned.ServiceInterface {
|
func (c Client) Services(namespace string) v1core.ServiceInterface {
|
||||||
return c.ServiceInterface
|
return c.ServiceInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) DaemonSets(namespace string) unversioned.DaemonSetInterface {
|
func (c Client) DaemonSets(namespace string) v1beta1extensions.DaemonSetInterface {
|
||||||
return c.DaemonSetInterface
|
return c.DaemonSetInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Client) Endpoints(namespace string) unversioned.EndpointsInterface {
|
func (c Client) Endpoints(namespace string) v1core.EndpointsInterface {
|
||||||
return c.EndpointsInterface
|
return c.EndpointsInterface
|
||||||
}
|
}
|
||||||
func (c Client) Jobs(namespace string) unversioned.JobInterface {
|
func (c Client) Jobs(namespace string) v1batch.JobInterface {
|
||||||
return c.JobInterface
|
return c.JobInterface
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,12 +2,12 @@ package mocks
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
v1beta1extensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
api "k8s.io/client-go/pkg/api"
|
||||||
unv "k8s.io/kubernetes/pkg/api/unversioned"
|
unversioned "k8s.io/client-go/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
v1 "k8s.io/client-go/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/watch"
|
"k8s.io/client-go/pkg/watch"
|
||||||
)
|
)
|
||||||
|
|
||||||
type dClient struct {
|
type dClient struct {
|
||||||
@ -18,9 +18,9 @@ func (d dClient) Get(name string) (*extensions.DaemonSet, error) {
|
|||||||
return nil, fmt.Errorf("Mock daemonset didnt work")
|
return nil, fmt.Errorf("Mock daemonset didnt work")
|
||||||
}
|
}
|
||||||
ds := &extensions.DaemonSet{
|
ds := &extensions.DaemonSet{
|
||||||
ObjectMeta: api.ObjectMeta{Name: name},
|
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||||
Spec: extensions.DaemonSetSpec{
|
Spec: extensions.DaemonSetSpec{
|
||||||
Selector: &unv.LabelSelector{
|
Selector: &unversioned.LabelSelector{
|
||||||
MatchLabels: map[string]string{"name": "test"},
|
MatchLabels: map[string]string{"name": "test"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -31,10 +31,10 @@ func (d dClient) Create(ds *extensions.DaemonSet) (*extensions.DaemonSet, error)
|
|||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d dClient) Delete(name string) error {
|
func (d dClient) Delete(name string, options *v1.DeleteOptions) error {
|
||||||
return fmt.Errorf("Not implemented")
|
return fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
func (d dClient) List(options api.ListOptions) (*extensions.DaemonSetList, error) {
|
func (d dClient) List(options v1.ListOptions) (*extensions.DaemonSetList, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,10 +46,18 @@ func (d dClient) UpdateStatus(ds *extensions.DaemonSet) (*extensions.DaemonSet,
|
|||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d dClient) Watch(options api.ListOptions) (watch.Interface, error) {
|
func (d dClient) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||||
|
return fmt.Errorf("Not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d dClient) Watch(options v1.ListOptions) (watch.Interface, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDSClient() unversioned.DaemonSetInterface {
|
func (d dClient) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *extensions.DaemonSet, err error) {
|
||||||
|
return nil, fmt.Errorf("Not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDSClient() v1beta1extensions.DaemonSetInterface {
|
||||||
return dClient{}
|
return dClient{}
|
||||||
}
|
}
|
||||||
|
@ -2,59 +2,68 @@ package mocks
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
api "k8s.io/client-go/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
v1 "k8s.io/client-go/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
"k8s.io/client-go/pkg/watch"
|
||||||
"k8s.io/kubernetes/pkg/watch"
|
"k8s.io/client-go/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
type eClient struct {
|
type eClient struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e eClient) Get(name string) (*api.Endpoints, error) {
|
func (e eClient) Get(name string) (*v1.Endpoints, error) {
|
||||||
if name != "lgtm" {
|
if name != "lgtm" {
|
||||||
return nil, fmt.Errorf("Mock endpoint didnt work")
|
return nil, fmt.Errorf("Mock endpoint didnt work")
|
||||||
}
|
}
|
||||||
endpoint := &api.Endpoints{
|
endpoint := &v1.Endpoints{
|
||||||
ObjectMeta: api.ObjectMeta{Name: name},
|
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||||
Subsets: []api.EndpointSubset{
|
Subsets: []v1.EndpointSubset{
|
||||||
api.EndpointSubset{
|
v1.EndpointSubset{
|
||||||
Addresses: []api.EndpointAddress{
|
Addresses: []v1.EndpointAddress{
|
||||||
api.EndpointAddress{IP: "127.0.0.1"},
|
v1.EndpointAddress{IP: "127.0.0.1"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return endpoint, nil
|
return endpoint, nil
|
||||||
}
|
}
|
||||||
func (e eClient) Create(ds *api.Endpoints) (*api.Endpoints, error) {
|
func (e eClient) Create(ds *v1.Endpoints) (*v1.Endpoints, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e eClient) Delete(name string) error {
|
func (e eClient) Delete(name string, options *v1.DeleteOptions) error {
|
||||||
return fmt.Errorf("Not implemented")
|
return fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
func (e eClient) List(options api.ListOptions) (*api.EndpointsList, error) {
|
|
||||||
|
func (e eClient) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||||
|
return fmt.Errorf("Not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e eClient) List(options v1.ListOptions) (*v1.EndpointsList, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e eClient) Update(ds *api.Endpoints) (*api.Endpoints, error) {
|
func (e eClient) Update(ds *v1.Endpoints) (*v1.Endpoints, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s eClient) UpdateStatus(ds *api.Endpoints) (*api.Endpoints, error) {
|
func (s eClient) UpdateStatus(ds *v1.Endpoints) (*v1.Endpoints, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e eClient) Watch(options api.ListOptions) (watch.Interface, error) {
|
func (e eClient) Watch(options v1.ListOptions) (watch.Interface, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e eClient) ProxyGet(scheme string, name string, port string, path string, params map[string]string) restclient.ResponseWrapper {
|
func (e eClient) ProxyGet(scheme string, name string, port string, path string, params map[string]string) rest.ResponseWrapper {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEClient() unversioned.EndpointsInterface {
|
func (e eClient) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) {
|
||||||
|
return nil, fmt.Errorf("Not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEClient() v1core.EndpointsInterface {
|
||||||
return eClient{}
|
return eClient{}
|
||||||
}
|
}
|
||||||
|
24
mocks/job.go
24
mocks/job.go
@ -2,11 +2,11 @@ package mocks
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
v1batch "k8s.io/client-go/kubernetes/typed/batch/v1"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
api "k8s.io/client-go/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/apis/batch"
|
v1 "k8s.io/client-go/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
batch "k8s.io/client-go/pkg/apis/batch/v1"
|
||||||
"k8s.io/kubernetes/pkg/watch"
|
"k8s.io/client-go/pkg/watch"
|
||||||
)
|
)
|
||||||
|
|
||||||
type jClient struct {
|
type jClient struct {
|
||||||
@ -29,10 +29,13 @@ func (j jClient) Create(job *batch.Job) (*batch.Job, error) {
|
|||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j jClient) Delete(name string, opts *api.DeleteOptions) error {
|
func (j jClient) Delete(name string, opts *v1.DeleteOptions) error {
|
||||||
return fmt.Errorf("Not implemented")
|
return fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
func (j jClient) List(options api.ListOptions) (*batch.JobList, error) {
|
func (j jClient) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||||
|
return fmt.Errorf("Not implemented")
|
||||||
|
}
|
||||||
|
func (j jClient) List(options v1.ListOptions) (*batch.JobList, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,10 +47,13 @@ func (j jClient) UpdateStatus(job *batch.Job) (*batch.Job, error) {
|
|||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j jClient) Watch(options api.ListOptions) (watch.Interface, error) {
|
func (j jClient) Watch(options v1.ListOptions) (watch.Interface, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewJClient() unversioned.JobInterface {
|
func (j jClient) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *batch.Job, err error) {
|
||||||
|
return nil, fmt.Errorf("Not implemented")
|
||||||
|
}
|
||||||
|
func NewJClient() v1batch.JobInterface {
|
||||||
return jClient{}
|
return jClient{}
|
||||||
}
|
}
|
||||||
|
79
mocks/pod.go
79
mocks/pod.go
@ -2,22 +2,23 @@ package mocks
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
api "k8s.io/client-go/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
v1 "k8s.io/client-go/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
policy "k8s.io/client-go/pkg/apis/policy/v1alpha1"
|
||||||
"k8s.io/kubernetes/pkg/watch"
|
"k8s.io/client-go/pkg/watch"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
type pClient struct {
|
type pClient struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p pClient) Get(name string) (*api.Pod, error) {
|
func (p pClient) Get(name string) (*v1.Pod, error) {
|
||||||
return &api.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: name},
|
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||||
Status: api.PodStatus{
|
Status: v1.PodStatus{
|
||||||
ContainerStatuses: []api.ContainerStatus{
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
api.ContainerStatus{
|
v1.ContainerStatus{
|
||||||
Name: "container_test",
|
Name: "container_test",
|
||||||
Ready: true,
|
Ready: true,
|
||||||
},
|
},
|
||||||
@ -27,28 +28,33 @@ func (p pClient) Get(name string) (*api.Pod, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
func (p pClient) Create(pod *api.Pod) (*api.Pod, error) {
|
func (p pClient) Create(pod *v1.Pod) (*v1.Pod, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p pClient) Delete(name string, options *api.DeleteOptions) error {
|
func (p pClient) Delete(name string, options *v1.DeleteOptions) error {
|
||||||
return fmt.Errorf("Not implemented")
|
return fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
func (p pClient) List(options api.ListOptions) (*api.PodList, error) {
|
|
||||||
return &api.PodList{
|
func (p pClient) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||||
Items: []api.Pod{
|
return fmt.Errorf("Not implemented")
|
||||||
api.Pod{
|
}
|
||||||
ObjectMeta: api.ObjectMeta{Name: "podList"},
|
|
||||||
Status: api.PodStatus{
|
func (p pClient) List(options v1.ListOptions) (*v1.PodList, error) {
|
||||||
|
return &v1.PodList{
|
||||||
|
Items: []v1.Pod{
|
||||||
|
v1.Pod{
|
||||||
|
ObjectMeta: v1.ObjectMeta{Name: "podList"},
|
||||||
|
Status: v1.PodStatus{
|
||||||
HostIP: "127.0.01",
|
HostIP: "127.0.01",
|
||||||
Conditions: []api.PodCondition{
|
Conditions: []v1.PodCondition{
|
||||||
api.PodCondition{
|
v1.PodCondition{
|
||||||
Type: api.PodReady,
|
Type: v1.PodReady,
|
||||||
Status: "True",
|
Status: "True",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ContainerStatuses: []api.ContainerStatus{
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
api.ContainerStatus{
|
v1.ContainerStatus{
|
||||||
Name: "container_test",
|
Name: "container_test",
|
||||||
Ready: true,
|
Ready: true,
|
||||||
},
|
},
|
||||||
@ -60,24 +66,33 @@ func (p pClient) List(options api.ListOptions) (*api.PodList, error) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p pClient) Update(pod *api.Pod) (*api.Pod, error) {
|
func (p pClient) Update(pod *v1.Pod) (*v1.Pod, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p pClient) UpdateStatus(pod *api.Pod) (*api.Pod, error) {
|
func (p pClient) UpdateStatus(pod *v1.Pod) (*v1.Pod, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p pClient) Watch(options api.ListOptions) (watch.Interface, error) {
|
func (p pClient) Watch(options v1.ListOptions) (watch.Interface, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p pClient) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request {
|
func (p pClient) Bind(binding *v1.Binding) error {
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (p pClient) Bind(binding *api.Binding) error {
|
|
||||||
return fmt.Errorf("Not implemented")
|
return fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
func NewPClient() unversioned.PodInterface {
|
|
||||||
|
func (p pClient) Evict(eviction *policy.Eviction) error {
|
||||||
|
return fmt.Errorf("Not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pClient) GetLogs(name string, opts *v1.PodLogOptions) *rest.Request {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pClient) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) {
|
||||||
|
return nil, fmt.Errorf("Not implemented")
|
||||||
|
}
|
||||||
|
func NewPClient() v1core.PodInterface {
|
||||||
return pClient{}
|
return pClient{}
|
||||||
}
|
}
|
||||||
|
@ -3,50 +3,60 @@ package mocks
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
api "k8s.io/client-go/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
"k8s.io/client-go/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/watch"
|
"k8s.io/client-go/pkg/watch"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
type sClient struct {
|
type sClient struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s sClient) Get(name string) (*api.Service, error) {
|
func (s sClient) Get(name string) (*v1.Service, error) {
|
||||||
if name != "lgtm" {
|
if name != "lgtm" {
|
||||||
return nil, fmt.Errorf("Mock service didnt work")
|
return nil, fmt.Errorf("Mock service didnt work")
|
||||||
}
|
}
|
||||||
return &api.Service{
|
return &v1.Service{
|
||||||
ObjectMeta: api.ObjectMeta{Name: name},
|
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
func (s sClient) Create(ds *api.Service) (*api.Service, error) {
|
func (s sClient) Create(ds *v1.Service) (*v1.Service, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s sClient) Delete(name string) error {
|
func (s sClient) Delete(name string, options *v1.DeleteOptions) error {
|
||||||
return fmt.Errorf("Not implemented")
|
return fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
func (s sClient) List(options api.ListOptions) (*api.ServiceList, error) {
|
|
||||||
|
func (s sClient) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||||
|
return fmt.Errorf("Not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sClient) List(options v1.ListOptions) (*v1.ServiceList, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s sClient) Update(ds *api.Service) (*api.Service, error) {
|
func (s sClient) Update(ds *v1.Service) (*v1.Service, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s sClient) UpdateStatus(ds *api.Service) (*api.Service, error) {
|
func (s sClient) UpdateStatus(ds *v1.Service) (*v1.Service, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s sClient) Watch(options api.ListOptions) (watch.Interface, error) {
|
func (s sClient) Watch(options v1.ListOptions) (watch.Interface, error) {
|
||||||
return nil, fmt.Errorf("Not implemented")
|
return nil, fmt.Errorf("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s sClient) ProxyGet(scheme string, name string, port string, path string, params map[string]string) restclient.ResponseWrapper {
|
func (s sClient) ProxyGet(scheme string, name string, port string, path string, params map[string]string) rest.ResponseWrapper {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSClient() unversioned.ServiceInterface {
|
func (s sClient) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) {
|
||||||
|
return nil, fmt.Errorf("Not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSClient() v1core.ServiceInterface {
|
||||||
return sClient{}
|
return sClient{}
|
||||||
}
|
}
|
||||||
|
77
vendor/github.com/blang/semver/README.md
generated
vendored
77
vendor/github.com/blang/semver/README.md
generated
vendored
@ -40,52 +40,10 @@ Features
|
|||||||
- Comparator-like comparisons
|
- Comparator-like comparisons
|
||||||
- Compare Helper Methods
|
- Compare Helper Methods
|
||||||
- InPlace manipulation
|
- InPlace manipulation
|
||||||
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
|
|
||||||
- Sortable (implements sort.Interface)
|
- Sortable (implements sort.Interface)
|
||||||
- database/sql compatible (sql.Scanner/Valuer)
|
- database/sql compatible (sql.Scanner/Valuer)
|
||||||
- encoding/json compatible (json.Marshaler/Unmarshaler)
|
- encoding/json compatible (json.Marshaler/Unmarshaler)
|
||||||
|
|
||||||
Ranges
|
|
||||||
------
|
|
||||||
|
|
||||||
A `Range` is a set of conditions which specify which versions satisfy the range.
|
|
||||||
|
|
||||||
A condition is composed of an operator and a version. The supported operators are:
|
|
||||||
|
|
||||||
- `<1.0.0` Less than `1.0.0`
|
|
||||||
- `<=1.0.0` Less than or equal to `1.0.0`
|
|
||||||
- `>1.0.0` Greater than `1.0.0`
|
|
||||||
- `>=1.0.0` Greater than or equal to `1.0.0`
|
|
||||||
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
|
|
||||||
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
|
|
||||||
|
|
||||||
A `Range` can link multiple `Ranges` separated by space:
|
|
||||||
|
|
||||||
Ranges can be linked by logical AND:
|
|
||||||
|
|
||||||
- `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
|
|
||||||
- `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
|
|
||||||
|
|
||||||
Ranges can also be linked by logical OR:
|
|
||||||
|
|
||||||
- `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
|
|
||||||
|
|
||||||
AND has a higher precedence than OR. It's not possible to use brackets.
|
|
||||||
|
|
||||||
Ranges can be combined by both AND and OR
|
|
||||||
|
|
||||||
- `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
|
||||||
|
|
||||||
Range usage:
|
|
||||||
|
|
||||||
```
|
|
||||||
v, err := semver.Parse("1.2.3")
|
|
||||||
range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
|
|
||||||
if range(v) {
|
|
||||||
//valid
|
|
||||||
}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Example
|
Example
|
||||||
-----
|
-----
|
||||||
@ -145,30 +103,23 @@ if err != nil {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Benchmarks
|
Benchmarks
|
||||||
-----
|
-----
|
||||||
|
|
||||||
BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
|
BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op
|
||||||
BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
|
BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op
|
||||||
BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
|
BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op
|
||||||
BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
|
BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op
|
||||||
BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
|
BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op
|
||||||
BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
|
BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op
|
||||||
BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
|
BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op
|
||||||
BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
|
BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op
|
||||||
BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
|
BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op
|
||||||
BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
|
BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op
|
||||||
BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
|
BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op
|
||||||
BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
|
BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op
|
||||||
BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
|
BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op
|
||||||
BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
|
BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op
|
||||||
BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
|
|
||||||
BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
|
|
||||||
BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
|
|
||||||
BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
|
|
||||||
BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
|
|
||||||
|
|
||||||
See benchmark cases at [semver_test.go](semver_test.go)
|
See benchmark cases at [semver_test.go](semver_test.go)
|
||||||
|
|
||||||
|
233
vendor/github.com/blang/semver/range.go
generated
vendored
233
vendor/github.com/blang/semver/range.go
generated
vendored
@ -1,233 +0,0 @@
|
|||||||
package semver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
type comparator func(Version, Version) bool
|
|
||||||
|
|
||||||
var (
|
|
||||||
compEQ comparator = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) == 0
|
|
||||||
}
|
|
||||||
compNE = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) != 0
|
|
||||||
}
|
|
||||||
compGT = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) == 1
|
|
||||||
}
|
|
||||||
compGE = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) >= 0
|
|
||||||
}
|
|
||||||
compLT = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) == -1
|
|
||||||
}
|
|
||||||
compLE = func(v1 Version, v2 Version) bool {
|
|
||||||
return v1.Compare(v2) <= 0
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
type versionRange struct {
|
|
||||||
v Version
|
|
||||||
c comparator
|
|
||||||
}
|
|
||||||
|
|
||||||
// rangeFunc creates a Range from the given versionRange.
|
|
||||||
func (vr *versionRange) rangeFunc() Range {
|
|
||||||
return Range(func(v Version) bool {
|
|
||||||
return vr.c(v, vr.v)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range represents a range of versions.
|
|
||||||
// A Range can be used to check if a Version satisfies it:
|
|
||||||
//
|
|
||||||
// range, err := semver.ParseRange(">1.0.0 <2.0.0")
|
|
||||||
// range(semver.MustParse("1.1.1") // returns true
|
|
||||||
type Range func(Version) bool
|
|
||||||
|
|
||||||
// OR combines the existing Range with another Range using logical OR.
|
|
||||||
func (rf Range) OR(f Range) Range {
|
|
||||||
return Range(func(v Version) bool {
|
|
||||||
return rf(v) || f(v)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// AND combines the existing Range with another Range using logical AND.
|
|
||||||
func (rf Range) AND(f Range) Range {
|
|
||||||
return Range(func(v Version) bool {
|
|
||||||
return rf(v) && f(v)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseRange parses a range and returns a Range.
|
|
||||||
// If the range could not be parsed an error is returned.
|
|
||||||
//
|
|
||||||
// Valid ranges are:
|
|
||||||
// - "<1.0.0"
|
|
||||||
// - "<=1.0.0"
|
|
||||||
// - ">1.0.0"
|
|
||||||
// - ">=1.0.0"
|
|
||||||
// - "1.0.0", "=1.0.0", "==1.0.0"
|
|
||||||
// - "!1.0.0", "!=1.0.0"
|
|
||||||
//
|
|
||||||
// A Range can consist of multiple ranges separated by space:
|
|
||||||
// Ranges can be linked by logical AND:
|
|
||||||
// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
|
|
||||||
// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
|
|
||||||
//
|
|
||||||
// Ranges can also be linked by logical OR:
|
|
||||||
// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
|
|
||||||
//
|
|
||||||
// AND has a higher precedence than OR. It's not possible to use brackets.
|
|
||||||
//
|
|
||||||
// Ranges can be combined by both AND and OR
|
|
||||||
//
|
|
||||||
// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
|
||||||
func ParseRange(s string) (Range, error) {
|
|
||||||
parts := splitAndTrim(s)
|
|
||||||
orParts, err := splitORParts(parts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var orFn Range
|
|
||||||
for _, p := range orParts {
|
|
||||||
var andFn Range
|
|
||||||
for _, ap := range p {
|
|
||||||
opStr, vStr, err := splitComparatorVersion(ap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
vr, err := buildVersionRange(opStr, vStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
|
|
||||||
}
|
|
||||||
rf := vr.rangeFunc()
|
|
||||||
|
|
||||||
// Set function
|
|
||||||
if andFn == nil {
|
|
||||||
andFn = rf
|
|
||||||
} else { // Combine with existing function
|
|
||||||
andFn = andFn.AND(rf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if orFn == nil {
|
|
||||||
orFn = andFn
|
|
||||||
} else {
|
|
||||||
orFn = orFn.OR(andFn)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return orFn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// splitORParts splits the already cleaned parts by '||'.
|
|
||||||
// Checks for invalid positions of the operator and returns an
|
|
||||||
// error if found.
|
|
||||||
func splitORParts(parts []string) ([][]string, error) {
|
|
||||||
var ORparts [][]string
|
|
||||||
last := 0
|
|
||||||
for i, p := range parts {
|
|
||||||
if p == "||" {
|
|
||||||
if i == 0 {
|
|
||||||
return nil, fmt.Errorf("First element in range is '||'")
|
|
||||||
}
|
|
||||||
ORparts = append(ORparts, parts[last:i])
|
|
||||||
last = i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if last == len(parts) {
|
|
||||||
return nil, fmt.Errorf("Last element in range is '||'")
|
|
||||||
}
|
|
||||||
ORparts = append(ORparts, parts[last:])
|
|
||||||
return ORparts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildVersionRange takes a slice of 2: operator and version
|
|
||||||
// and builds a versionRange, otherwise an error.
|
|
||||||
func buildVersionRange(opStr, vStr string) (*versionRange, error) {
|
|
||||||
c := parseComparator(opStr)
|
|
||||||
if c == nil {
|
|
||||||
return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
|
|
||||||
}
|
|
||||||
v, err := Parse(vStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &versionRange{
|
|
||||||
v: v,
|
|
||||||
c: c,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// splitAndTrim splits a range string by spaces and cleans leading and trailing spaces
|
|
||||||
func splitAndTrim(s string) (result []string) {
|
|
||||||
last := 0
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if s[i] == ' ' {
|
|
||||||
if last < i-1 {
|
|
||||||
result = append(result, s[last:i])
|
|
||||||
}
|
|
||||||
last = i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if last < len(s)-1 {
|
|
||||||
result = append(result, s[last:])
|
|
||||||
}
|
|
||||||
// parts := strings.Split(s, " ")
|
|
||||||
// for _, x := range parts {
|
|
||||||
// if s := strings.TrimSpace(x); len(s) != 0 {
|
|
||||||
// result = append(result, s)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// splitComparatorVersion splits the comparator from the version.
|
|
||||||
// Spaces between the comparator and the version are not allowed.
|
|
||||||
// Input must be free of leading or trailing spaces.
|
|
||||||
func splitComparatorVersion(s string) (string, string, error) {
|
|
||||||
i := strings.IndexFunc(s, unicode.IsDigit)
|
|
||||||
if i == -1 {
|
|
||||||
return "", "", fmt.Errorf("Could not get version from string: %q", s)
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(s[0:i]), s[i:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseComparator(s string) comparator {
|
|
||||||
switch s {
|
|
||||||
case "==":
|
|
||||||
fallthrough
|
|
||||||
case "":
|
|
||||||
fallthrough
|
|
||||||
case "=":
|
|
||||||
return compEQ
|
|
||||||
case ">":
|
|
||||||
return compGT
|
|
||||||
case ">=":
|
|
||||||
return compGE
|
|
||||||
case "<":
|
|
||||||
return compLT
|
|
||||||
case "<=":
|
|
||||||
return compLE
|
|
||||||
case "!":
|
|
||||||
fallthrough
|
|
||||||
case "!=":
|
|
||||||
return compNE
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustParseRange is like ParseRange but panics if the range cannot be parsed.
|
|
||||||
func MustParseRange(s string) Range {
|
|
||||||
r, err := ParseRange(s)
|
|
||||||
if err != nil {
|
|
||||||
panic(`semver: ParseRange(` + s + `): ` + err.Error())
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
23
vendor/github.com/blang/semver/semver.go
generated
vendored
23
vendor/github.com/blang/semver/semver.go
generated
vendored
@ -200,29 +200,6 @@ func Make(s string) (Version, error) {
|
|||||||
return Parse(s)
|
return Parse(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
|
|
||||||
// specs to be parsed by this library. It does so by normalizing versions before passing them to
|
|
||||||
// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
|
|
||||||
// with only major and minor components specified
|
|
||||||
func ParseTolerant(s string) (Version, error) {
|
|
||||||
s = strings.TrimSpace(s)
|
|
||||||
s = strings.TrimPrefix(s, "v")
|
|
||||||
|
|
||||||
// Split into major.minor.(patch+pr+meta)
|
|
||||||
parts := strings.SplitN(s, ".", 3)
|
|
||||||
if len(parts) < 3 {
|
|
||||||
if strings.ContainsAny(parts[len(parts)-1], "+-") {
|
|
||||||
return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
|
|
||||||
}
|
|
||||||
for len(parts) < 3 {
|
|
||||||
parts = append(parts, "0")
|
|
||||||
}
|
|
||||||
s = strings.Join(parts, ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
return Parse(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse parses version string and returns a validated Version or error
|
// Parse parses version string and returns a validated Version or error
|
||||||
func Parse(s string) (Version, error) {
|
func Parse(s string) (Version, error) {
|
||||||
if len(s) == 0 {
|
if len(s) == 0 {
|
||||||
|
1
vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore
generated
vendored
1
vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore
generated
vendored
@ -1 +0,0 @@
|
|||||||
*.coverprofile
|
|
12
vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml
generated
vendored
12
vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4.1
|
|
||||||
|
|
||||||
install:
|
|
||||||
- go get -t -v ./...
|
|
||||||
- go install github.com/onsi/ginkgo/ginkgo
|
|
||||||
|
|
||||||
script:
|
|
||||||
- export PATH=$HOME/gopath/bin:$PATH
|
|
||||||
- ginkgo -r -failOnPending -randomizeAllSpecs -race
|
|
176
vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE
generated
vendored
176
vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE
generated
vendored
@ -1,176 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
59
vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md
generated
vendored
59
vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md
generated
vendored
@ -1,59 +0,0 @@
|
|||||||
[![Build Status](https://travis-ci.org/cloudfoundry-incubator/candiedyaml.svg)](https://travis-ci.org/cloudfoundry-incubator/candiedyaml)
|
|
||||||
[![GoDoc](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml?status.svg)](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml)
|
|
||||||
|
|
||||||
|
|
||||||
candiedyaml
|
|
||||||
===========
|
|
||||||
|
|
||||||
YAML for Go
|
|
||||||
|
|
||||||
A YAML 1.1 parser with support for YAML 1.2 features
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
```go
|
|
||||||
package myApp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/cloudfoundry-incubator/candiedyaml"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
file, err := os.Open("path/to/some/file.yml")
|
|
||||||
if err != nil {
|
|
||||||
println("File does not exist:", err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
document := new(interface{})
|
|
||||||
decoder := candiedyaml.NewDecoder(file)
|
|
||||||
err = decoder.Decode(document)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
println("Failed to decode document:", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
println("parsed yml into interface:", fmt.Sprintf("%#v", document))
|
|
||||||
|
|
||||||
fileToWrite, err := os.Create("path/to/some/new/file.yml")
|
|
||||||
if err != nil {
|
|
||||||
println("Failed to open file for writing:", err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer fileToWrite.Close()
|
|
||||||
|
|
||||||
encoder := candiedyaml.NewEncoder(fileToWrite)
|
|
||||||
err = encoder.Encode(document)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
println("Failed to encode document:", err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
```
|
|
834
vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go
generated
vendored
834
vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go
generated
vendored
@ -1,834 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a new parser object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
|
||||||
*parser = yaml_parser_t{
|
|
||||||
raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE),
|
|
||||||
buffer: make([]byte, 0, INPUT_BUFFER_SIZE),
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Destroy a parser object.
|
|
||||||
*/
|
|
||||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
|
||||||
*parser = yaml_parser_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* String read handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
|
|
||||||
if parser.input_pos == len(parser.input) {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
n := copy(buffer, parser.input[parser.input_pos:])
|
|
||||||
parser.input_pos += n
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* File read handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
|
|
||||||
return parser.input_reader.Read(buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a string input.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("input already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.read_handler = yaml_string_read_handler
|
|
||||||
|
|
||||||
parser.input = input
|
|
||||||
parser.input_pos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a reader input
|
|
||||||
*/
|
|
||||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("input already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.read_handler = yaml_file_read_handler
|
|
||||||
parser.input_reader = reader
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a generic input.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) {
|
|
||||||
if parser.read_handler != nil {
|
|
||||||
panic("input already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.read_handler = handler
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the source encoding.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
|
||||||
if parser.encoding != yaml_ANY_ENCODING {
|
|
||||||
panic("encoding already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.encoding = encoding
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a new emitter object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
|
||||||
*emitter = yaml_emitter_t{
|
|
||||||
buffer: make([]byte, OUTPUT_BUFFER_SIZE),
|
|
||||||
raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE),
|
|
||||||
states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE),
|
|
||||||
events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
|
||||||
*emitter = yaml_emitter_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* String write handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
|
||||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* File write handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
|
||||||
_, err := emitter.output_writer.Write(buffer)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a string output.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("output already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.write_handler = yaml_string_write_handler
|
|
||||||
emitter.output_buffer = buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a file output.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("output already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.write_handler = yaml_writer_write_handler
|
|
||||||
emitter.output_writer = w
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a generic output handler.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) {
|
|
||||||
if emitter.write_handler != nil {
|
|
||||||
panic("output already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.write_handler = handler
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the output encoding.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
|
||||||
if emitter.encoding != yaml_ANY_ENCODING {
|
|
||||||
panic("encoding already set")
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.encoding = encoding
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the canonical output style.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
|
||||||
emitter.canonical = canonical
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the indentation increment.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
|
||||||
if indent < 2 || indent > 9 {
|
|
||||||
indent = 2
|
|
||||||
}
|
|
||||||
emitter.best_indent = indent
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the preferred line width.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
|
||||||
if width < 0 {
|
|
||||||
width = -1
|
|
||||||
}
|
|
||||||
emitter.best_width = width
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set if unescaped non-ASCII characters are allowed.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
|
||||||
emitter.unicode = unicode
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the preferred line break character.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
|
||||||
emitter.line_break = line_break
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Destroy a token object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// yaml_DECLARE(void)
|
|
||||||
// yaml_token_delete(yaml_token_t *token)
|
|
||||||
// {
|
|
||||||
// assert(token); /* Non-NULL token object expected. */
|
|
||||||
//
|
|
||||||
// switch (token.type)
|
|
||||||
// {
|
|
||||||
// case yaml_TAG_DIRECTIVE_TOKEN:
|
|
||||||
// yaml_free(token.data.tag_directive.handle);
|
|
||||||
// yaml_free(token.data.tag_directive.prefix);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_ALIAS_TOKEN:
|
|
||||||
// yaml_free(token.data.alias.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_ANCHOR_TOKEN:
|
|
||||||
// yaml_free(token.data.anchor.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_TAG_TOKEN:
|
|
||||||
// yaml_free(token.data.tag.handle);
|
|
||||||
// yaml_free(token.data.tag.suffix);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// case yaml_SCALAR_TOKEN:
|
|
||||||
// yaml_free(token.data.scalar.value);
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// default:
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// memset(token, 0, sizeof(yaml_token_t));
|
|
||||||
// }
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if a string is a valid UTF-8 sequence.
|
|
||||||
*
|
|
||||||
* Check 'reader.c' for more details on UTF-8 encoding.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// static int
|
|
||||||
// yaml_check_utf8(yaml_char_t *start, size_t length)
|
|
||||||
// {
|
|
||||||
// yaml_char_t *end = start+length;
|
|
||||||
// yaml_char_t *pointer = start;
|
|
||||||
//
|
|
||||||
// while (pointer < end) {
|
|
||||||
// unsigned char octet;
|
|
||||||
// unsigned int width;
|
|
||||||
// unsigned int value;
|
|
||||||
// size_t k;
|
|
||||||
//
|
|
||||||
// octet = pointer[0];
|
|
||||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
|
||||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
|
||||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
|
||||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
|
||||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
|
||||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
|
||||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
|
||||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
|
||||||
// if (!width) return 0;
|
|
||||||
// if (pointer+width > end) return 0;
|
|
||||||
// for (k = 1; k < width; k ++) {
|
|
||||||
// octet = pointer[k];
|
|
||||||
// if ((octet & 0xC0) != 0x80) return 0;
|
|
||||||
// value = (value << 6) + (octet & 0x3F);
|
|
||||||
// }
|
|
||||||
// if (!((width == 1) ||
|
|
||||||
// (width == 2 && value >= 0x80) ||
|
|
||||||
// (width == 3 && value >= 0x800) ||
|
|
||||||
// (width == 4 && value >= 0x10000))) return 0;
|
|
||||||
//
|
|
||||||
// pointer += width;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create STREAM-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_STREAM_START_EVENT,
|
|
||||||
encoding: encoding,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create STREAM-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_STREAM_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create DOCUMENT-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_document_start_event_initialize(event *yaml_event_t,
|
|
||||||
version_directive *yaml_version_directive_t,
|
|
||||||
tag_directives []yaml_tag_directive_t,
|
|
||||||
implicit bool) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_DOCUMENT_START_EVENT,
|
|
||||||
version_directive: version_directive,
|
|
||||||
tag_directives: tag_directives,
|
|
||||||
implicit: implicit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create DOCUMENT-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_DOCUMENT_END_EVENT,
|
|
||||||
implicit: implicit,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create ALIAS.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_ALIAS_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create SCALAR.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_scalar_event_initialize(event *yaml_event_t,
|
|
||||||
anchor []byte, tag []byte,
|
|
||||||
value []byte,
|
|
||||||
plain_implicit bool, quoted_implicit bool,
|
|
||||||
style yaml_scalar_style_t) {
|
|
||||||
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_SCALAR_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
value: value,
|
|
||||||
implicit: plain_implicit,
|
|
||||||
quoted_implicit: quoted_implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create SEQUENCE-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_sequence_start_event_initialize(event *yaml_event_t,
|
|
||||||
anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_SEQUENCE_START_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
implicit: implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create SEQUENCE-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_SEQUENCE_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create MAPPING-START.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_mapping_start_event_initialize(event *yaml_event_t,
|
|
||||||
anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_MAPPING_START_EVENT,
|
|
||||||
anchor: anchor,
|
|
||||||
tag: tag,
|
|
||||||
implicit: implicit,
|
|
||||||
style: yaml_style_t(style),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create MAPPING-END.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{
|
|
||||||
event_type: yaml_MAPPING_END_EVENT,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Destroy an event object.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_event_delete(event *yaml_event_t) {
|
|
||||||
*event = yaml_event_t{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Create a document object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// func yaml_document_initialize(document *yaml_document_t,
|
|
||||||
// version_directive *yaml_version_directive_t,
|
|
||||||
// tag_directives []yaml_tag_directive_t,
|
|
||||||
// start_implicit, end_implicit bool) bool {
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// struct {
|
|
||||||
// yaml_node_t *start;
|
|
||||||
// yaml_node_t *end;
|
|
||||||
// yaml_node_t *top;
|
|
||||||
// } nodes = { NULL, NULL, NULL };
|
|
||||||
// yaml_version_directive_t *version_directive_copy = NULL;
|
|
||||||
// struct {
|
|
||||||
// yaml_tag_directive_t *start;
|
|
||||||
// yaml_tag_directive_t *end;
|
|
||||||
// yaml_tag_directive_t *top;
|
|
||||||
// } tag_directives_copy = { NULL, NULL, NULL };
|
|
||||||
// yaml_tag_directive_t value = { NULL, NULL };
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
// assert((tag_directives_start && tag_directives_end) ||
|
|
||||||
// (tag_directives_start == tag_directives_end));
|
|
||||||
// /* Valid tag directives are expected. */
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error;
|
|
||||||
//
|
|
||||||
// if (version_directive) {
|
|
||||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t));
|
|
||||||
// if (!version_directive_copy) goto error;
|
|
||||||
// version_directive_copy.major = version_directive.major;
|
|
||||||
// version_directive_copy.minor = version_directive.minor;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (tag_directives_start != tag_directives_end) {
|
|
||||||
// yaml_tag_directive_t *tag_directive;
|
|
||||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
|
||||||
// goto error;
|
|
||||||
// for (tag_directive = tag_directives_start;
|
|
||||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
|
||||||
// assert(tag_directive.handle);
|
|
||||||
// assert(tag_directive.prefix);
|
|
||||||
// if (!yaml_check_utf8(tag_directive.handle,
|
|
||||||
// strlen((char *)tag_directive.handle)))
|
|
||||||
// goto error;
|
|
||||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
|
||||||
// strlen((char *)tag_directive.prefix)))
|
|
||||||
// goto error;
|
|
||||||
// value.handle = yaml_strdup(tag_directive.handle);
|
|
||||||
// value.prefix = yaml_strdup(tag_directive.prefix);
|
|
||||||
// if (!value.handle || !value.prefix) goto error;
|
|
||||||
// if (!PUSH(&context, tag_directives_copy, value))
|
|
||||||
// goto error;
|
|
||||||
// value.handle = NULL;
|
|
||||||
// value.prefix = NULL;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
|
||||||
// tag_directives_copy.start, tag_directives_copy.top,
|
|
||||||
// start_implicit, end_implicit, mark, mark);
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// STACK_DEL(&context, nodes);
|
|
||||||
// yaml_free(version_directive_copy);
|
|
||||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
|
||||||
// yaml_tag_directive_t value = POP(&context, tag_directives_copy);
|
|
||||||
// yaml_free(value.handle);
|
|
||||||
// yaml_free(value.prefix);
|
|
||||||
// }
|
|
||||||
// STACK_DEL(&context, tag_directives_copy);
|
|
||||||
// yaml_free(value.handle);
|
|
||||||
// yaml_free(value.prefix);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Destroy a document object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void)
|
|
||||||
// yaml_document_delete(document *yaml_document_t)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// yaml_tag_directive_t *tag_directive;
|
|
||||||
//
|
|
||||||
// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
|
||||||
// yaml_node_t node = POP(&context, document.nodes);
|
|
||||||
// yaml_free(node.tag);
|
|
||||||
// switch (node.type) {
|
|
||||||
// case yaml_SCALAR_NODE:
|
|
||||||
// yaml_free(node.data.scalar.value);
|
|
||||||
// break;
|
|
||||||
// case yaml_SEQUENCE_NODE:
|
|
||||||
// STACK_DEL(&context, node.data.sequence.items);
|
|
||||||
// break;
|
|
||||||
// case yaml_MAPPING_NODE:
|
|
||||||
// STACK_DEL(&context, node.data.mapping.pairs);
|
|
||||||
// break;
|
|
||||||
// default:
|
|
||||||
// assert(0); /* Should not happen. */
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// STACK_DEL(&context, document.nodes);
|
|
||||||
//
|
|
||||||
// yaml_free(document.version_directive);
|
|
||||||
// for (tag_directive = document.tag_directives.start;
|
|
||||||
// tag_directive != document.tag_directives.end;
|
|
||||||
// tag_directive++) {
|
|
||||||
// yaml_free(tag_directive.handle);
|
|
||||||
// yaml_free(tag_directive.prefix);
|
|
||||||
// }
|
|
||||||
// yaml_free(document.tag_directives.start);
|
|
||||||
//
|
|
||||||
// memset(document, 0, sizeof(yaml_document_t));
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /**
|
|
||||||
// * Get a document node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(yaml_node_t *)
|
|
||||||
// yaml_document_get_node(document *yaml_document_t, int index)
|
|
||||||
// {
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
|
||||||
// return document.nodes.start + index - 1;
|
|
||||||
// }
|
|
||||||
// return NULL;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /**
|
|
||||||
// * Get the root object.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(yaml_node_t *)
|
|
||||||
// yaml_document_get_root_node(document *yaml_document_t)
|
|
||||||
// {
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (document.nodes.top != document.nodes.start) {
|
|
||||||
// return document.nodes.start;
|
|
||||||
// }
|
|
||||||
// return NULL;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Add a scalar node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_add_scalar(document *yaml_document_t,
|
|
||||||
// yaml_char_t *tag, yaml_char_t *value, int length,
|
|
||||||
// yaml_scalar_style_t style)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
// yaml_char_t *tag_copy = NULL;
|
|
||||||
// yaml_char_t *value_copy = NULL;
|
|
||||||
// yaml_node_t node;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
// assert(value); /* Non-NULL value is expected. */
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
|
|
||||||
// tag_copy = yaml_strdup(tag);
|
|
||||||
// if (!tag_copy) goto error;
|
|
||||||
//
|
|
||||||
// if (length < 0) {
|
|
||||||
// length = strlen((char *)value);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(value, length)) goto error;
|
|
||||||
// value_copy = yaml_malloc(length+1);
|
|
||||||
// if (!value_copy) goto error;
|
|
||||||
// memcpy(value_copy, value, length);
|
|
||||||
// value_copy[length] = '\0';
|
|
||||||
//
|
|
||||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark);
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error;
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// yaml_free(tag_copy);
|
|
||||||
// yaml_free(value_copy);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Add a sequence node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_add_sequence(document *yaml_document_t,
|
|
||||||
// yaml_char_t *tag, yaml_sequence_style_t style)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
// yaml_char_t *tag_copy = NULL;
|
|
||||||
// struct {
|
|
||||||
// yaml_node_item_t *start;
|
|
||||||
// yaml_node_item_t *end;
|
|
||||||
// yaml_node_item_t *top;
|
|
||||||
// } items = { NULL, NULL, NULL };
|
|
||||||
// yaml_node_t node;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
|
|
||||||
// tag_copy = yaml_strdup(tag);
|
|
||||||
// if (!tag_copy) goto error;
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error;
|
|
||||||
//
|
|
||||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
|
||||||
// style, mark, mark);
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error;
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// STACK_DEL(&context, items);
|
|
||||||
// yaml_free(tag_copy);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Add a mapping node to a document.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_add_mapping(document *yaml_document_t,
|
|
||||||
// yaml_char_t *tag, yaml_mapping_style_t style)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
// YAML_mark_t mark = { 0, 0, 0 };
|
|
||||||
// yaml_char_t *tag_copy = NULL;
|
|
||||||
// struct {
|
|
||||||
// yaml_node_pair_t *start;
|
|
||||||
// yaml_node_pair_t *end;
|
|
||||||
// yaml_node_pair_t *top;
|
|
||||||
// } pairs = { NULL, NULL, NULL };
|
|
||||||
// yaml_node_t node;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document object is expected. */
|
|
||||||
//
|
|
||||||
// if (!tag) {
|
|
||||||
// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
|
|
||||||
// tag_copy = yaml_strdup(tag);
|
|
||||||
// if (!tag_copy) goto error;
|
|
||||||
//
|
|
||||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error;
|
|
||||||
//
|
|
||||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
|
||||||
// style, mark, mark);
|
|
||||||
// if (!PUSH(&context, document.nodes, node)) goto error;
|
|
||||||
//
|
|
||||||
// return document.nodes.top - document.nodes.start;
|
|
||||||
//
|
|
||||||
// error:
|
|
||||||
// STACK_DEL(&context, pairs);
|
|
||||||
// yaml_free(tag_copy);
|
|
||||||
//
|
|
||||||
// return 0;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Append an item to a sequence node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_append_sequence_item(document *yaml_document_t,
|
|
||||||
// int sequence, int item)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document is required. */
|
|
||||||
// assert(sequence > 0
|
|
||||||
// && document.nodes.start + sequence <= document.nodes.top);
|
|
||||||
// /* Valid sequence id is required. */
|
|
||||||
// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE);
|
|
||||||
// /* A sequence node is required. */
|
|
||||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top);
|
|
||||||
// /* Valid item id is required. */
|
|
||||||
//
|
|
||||||
// if (!PUSH(&context,
|
|
||||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
|
||||||
// return 0;
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Append a pair of a key and a value to a mapping node.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_document_append_mapping_pair(document *yaml_document_t,
|
|
||||||
// int mapping, int key, int value)
|
|
||||||
// {
|
|
||||||
// struct {
|
|
||||||
// YAML_error_type_t error;
|
|
||||||
// } context;
|
|
||||||
//
|
|
||||||
// yaml_node_pair_t pair;
|
|
||||||
//
|
|
||||||
// assert(document); /* Non-NULL document is required. */
|
|
||||||
// assert(mapping > 0
|
|
||||||
// && document.nodes.start + mapping <= document.nodes.top);
|
|
||||||
// /* Valid mapping id is required. */
|
|
||||||
// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE);
|
|
||||||
// /* A mapping node is required. */
|
|
||||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top);
|
|
||||||
// /* Valid key id is required. */
|
|
||||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top);
|
|
||||||
// /* Valid value id is required. */
|
|
||||||
//
|
|
||||||
// pair.key = key;
|
|
||||||
// pair.value = value;
|
|
||||||
//
|
|
||||||
// if (!PUSH(&context,
|
|
||||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
|
||||||
// return 0;
|
|
||||||
//
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
//
|
|
622
vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go
generated
vendored
622
vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go
generated
vendored
@ -1,622 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Unmarshaler interface {
|
|
||||||
UnmarshalYAML(tag string, value interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Number represents a JSON number literal.
|
|
||||||
type Number string
|
|
||||||
|
|
||||||
// String returns the literal text of the number.
|
|
||||||
func (n Number) String() string { return string(n) }
|
|
||||||
|
|
||||||
// Float64 returns the number as a float64.
|
|
||||||
func (n Number) Float64() (float64, error) {
|
|
||||||
return strconv.ParseFloat(string(n), 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64 returns the number as an int64.
|
|
||||||
func (n Number) Int64() (int64, error) {
|
|
||||||
return strconv.ParseInt(string(n), 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Decoder struct {
|
|
||||||
parser yaml_parser_t
|
|
||||||
event yaml_event_t
|
|
||||||
replay_events []yaml_event_t
|
|
||||||
useNumber bool
|
|
||||||
|
|
||||||
anchors map[string][]yaml_event_t
|
|
||||||
tracking_anchors [][]yaml_event_t
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParserError struct {
|
|
||||||
ErrorType YAML_error_type_t
|
|
||||||
Context string
|
|
||||||
ContextMark YAML_mark_t
|
|
||||||
Problem string
|
|
||||||
ProblemMark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ParserError) Error() string {
|
|
||||||
return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
type UnexpectedEventError struct {
|
|
||||||
Value string
|
|
||||||
EventType yaml_event_type_t
|
|
||||||
At YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *UnexpectedEventError) Error() string {
|
|
||||||
return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func recovery(err *error) {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
if _, ok := r.(runtime.Error); ok {
|
|
||||||
panic(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
var tmpError error
|
|
||||||
switch r := r.(type) {
|
|
||||||
case error:
|
|
||||||
tmpError = r
|
|
||||||
case string:
|
|
||||||
tmpError = errors.New(r)
|
|
||||||
default:
|
|
||||||
tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String())
|
|
||||||
}
|
|
||||||
|
|
||||||
*err = tmpError
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Unmarshal(data []byte, v interface{}) error {
|
|
||||||
d := NewDecoder(bytes.NewBuffer(data))
|
|
||||||
return d.Decode(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDecoder(r io.Reader) *Decoder {
|
|
||||||
d := &Decoder{
|
|
||||||
anchors: make(map[string][]yaml_event_t),
|
|
||||||
tracking_anchors: make([][]yaml_event_t, 1),
|
|
||||||
}
|
|
||||||
yaml_parser_initialize(&d.parser)
|
|
||||||
yaml_parser_set_input_reader(&d.parser, r)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) Decode(v interface{}) (err error) {
|
|
||||||
defer recovery(&err)
|
|
||||||
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
|
||||||
return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type == yaml_NO_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_STREAM_START_EVENT {
|
|
||||||
return errors.New("Invalid stream")
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
d.document(rv)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) UseNumber() { d.useNumber = true }
|
|
||||||
|
|
||||||
func (d *Decoder) error(err error) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) nextEvent() {
|
|
||||||
if d.event.event_type == yaml_STREAM_END_EVENT {
|
|
||||||
d.error(errors.New("The stream is closed"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.replay_events != nil {
|
|
||||||
d.event = d.replay_events[0]
|
|
||||||
if len(d.replay_events) == 1 {
|
|
||||||
d.replay_events = nil
|
|
||||||
} else {
|
|
||||||
d.replay_events = d.replay_events[1:]
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !yaml_parser_parse(&d.parser, &d.event) {
|
|
||||||
yaml_event_delete(&d.event)
|
|
||||||
|
|
||||||
d.error(&ParserError{
|
|
||||||
ErrorType: d.parser.error,
|
|
||||||
Context: d.parser.context,
|
|
||||||
ContextMark: d.parser.context_mark,
|
|
||||||
Problem: d.parser.problem,
|
|
||||||
ProblemMark: d.parser.problem_mark,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
last := len(d.tracking_anchors)
|
|
||||||
// skip aliases when tracking an anchor
|
|
||||||
if last > 0 && d.event.event_type != yaml_ALIAS_EVENT {
|
|
||||||
d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) document(rv reflect.Value) {
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_START_EVENT {
|
|
||||||
d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
d.parse(rv)
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) parse(rv reflect.Value) {
|
|
||||||
if !rv.IsValid() {
|
|
||||||
// skip ahead since we cannot store
|
|
||||||
d.valueInterface()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
anchor := string(d.event.anchor)
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
d.sequence(rv)
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
case yaml_MAPPING_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
d.mapping(rv)
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
case yaml_SCALAR_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
d.scalar(rv)
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
case yaml_ALIAS_EVENT:
|
|
||||||
d.alias(rv)
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
default:
|
|
||||||
d.error(&UnexpectedEventError{
|
|
||||||
Value: string(d.event.value),
|
|
||||||
EventType: d.event.event_type,
|
|
||||||
At: d.event.start_mark,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) begin_anchor(anchor string) {
|
|
||||||
if anchor != "" {
|
|
||||||
events := []yaml_event_t{d.event}
|
|
||||||
d.tracking_anchors = append(d.tracking_anchors, events)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) end_anchor(anchor string) {
|
|
||||||
if anchor != "" {
|
|
||||||
events := d.tracking_anchors[len(d.tracking_anchors)-1]
|
|
||||||
d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1]
|
|
||||||
// remove the anchor, replaying events shouldn't have anchors
|
|
||||||
events[0].anchor = nil
|
|
||||||
// we went one too many, remove the extra event
|
|
||||||
events = events[:len(events)-1]
|
|
||||||
// if nested, append to all the other anchors
|
|
||||||
for i, e := range d.tracking_anchors {
|
|
||||||
d.tracking_anchors[i] = append(e, events...)
|
|
||||||
}
|
|
||||||
d.anchors[anchor] = events
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
|
|
||||||
// If v is a named type and is addressable,
|
|
||||||
// start with its address, so that if the type has pointer methods,
|
|
||||||
// we find them.
|
|
||||||
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
|
|
||||||
v = v.Addr()
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
// Load value from interface, but only if the result will be
|
|
||||||
// usefully addressable.
|
|
||||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
|
||||||
e := v.Elem()
|
|
||||||
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
|
|
||||||
v = e
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Kind() != reflect.Ptr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.IsNil() {
|
|
||||||
v.Set(reflect.New(v.Type().Elem()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Type().NumMethod() > 0 {
|
|
||||||
if u, ok := v.Interface().(Unmarshaler); ok {
|
|
||||||
var temp interface{}
|
|
||||||
return u, reflect.ValueOf(&temp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) sequence(v reflect.Value) {
|
|
||||||
if d.event.event_type != yaml_SEQUENCE_START_EVENT {
|
|
||||||
d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
u, pv := d.indirect(v, false)
|
|
||||||
if u != nil {
|
|
||||||
defer func() {
|
|
||||||
if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
_, pv = d.indirect(pv, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
v = pv
|
|
||||||
|
|
||||||
// Check type of target.
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Interface:
|
|
||||||
if v.NumMethod() == 0 {
|
|
||||||
// Decoding into nil interface? Switch to non-reflect code.
|
|
||||||
v.Set(reflect.ValueOf(d.sequenceInterface()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Otherwise it's invalid.
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark))
|
|
||||||
case reflect.Array:
|
|
||||||
case reflect.Slice:
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
|
|
||||||
break done
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get element of array, growing if necessary.
|
|
||||||
if v.Kind() == reflect.Slice {
|
|
||||||
// Grow slice if necessary
|
|
||||||
if i >= v.Cap() {
|
|
||||||
newcap := v.Cap() + v.Cap()/2
|
|
||||||
if newcap < 4 {
|
|
||||||
newcap = 4
|
|
||||||
}
|
|
||||||
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
|
|
||||||
reflect.Copy(newv, v)
|
|
||||||
v.Set(newv)
|
|
||||||
}
|
|
||||||
if i >= v.Len() {
|
|
||||||
v.SetLen(i + 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < v.Len() {
|
|
||||||
// Decode into element.
|
|
||||||
d.parse(v.Index(i))
|
|
||||||
} else {
|
|
||||||
// Ran out of fixed array: skip.
|
|
||||||
d.parse(reflect.Value{})
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < v.Len() {
|
|
||||||
if v.Kind() == reflect.Array {
|
|
||||||
// Array. Zero the rest.
|
|
||||||
z := reflect.Zero(v.Type().Elem())
|
|
||||||
for ; i < v.Len(); i++ {
|
|
||||||
v.Index(i).Set(z)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
v.SetLen(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i == 0 && v.Kind() == reflect.Slice {
|
|
||||||
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) mapping(v reflect.Value) {
|
|
||||||
u, pv := d.indirect(v, false)
|
|
||||||
if u != nil {
|
|
||||||
defer func() {
|
|
||||||
if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
_, pv = d.indirect(pv, false)
|
|
||||||
}
|
|
||||||
v = pv
|
|
||||||
|
|
||||||
// Decoding into nil interface? Switch to non-reflect code.
|
|
||||||
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
|
|
||||||
v.Set(reflect.ValueOf(d.mappingInterface()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check type of target: struct or map[X]Y
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
d.mappingStruct(v)
|
|
||||||
return
|
|
||||||
case reflect.Map:
|
|
||||||
default:
|
|
||||||
d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
mapt := v.Type()
|
|
||||||
if v.IsNil() {
|
|
||||||
v.Set(reflect.MakeMap(mapt))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
keyt := mapt.Key()
|
|
||||||
mapElemt := mapt.Elem()
|
|
||||||
|
|
||||||
var mapElem reflect.Value
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_MAPPING_END_EVENT:
|
|
||||||
break done
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
key := reflect.New(keyt)
|
|
||||||
d.parse(key.Elem())
|
|
||||||
|
|
||||||
if !mapElem.IsValid() {
|
|
||||||
mapElem = reflect.New(mapElemt).Elem()
|
|
||||||
} else {
|
|
||||||
mapElem.Set(reflect.Zero(mapElemt))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.parse(mapElem)
|
|
||||||
|
|
||||||
v.SetMapIndex(key.Elem(), mapElem)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) mappingStruct(v reflect.Value) {
|
|
||||||
|
|
||||||
structt := v.Type()
|
|
||||||
fields := cachedTypeFields(structt)
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_MAPPING_END_EVENT:
|
|
||||||
break done
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
key := ""
|
|
||||||
d.parse(reflect.ValueOf(&key))
|
|
||||||
|
|
||||||
// Figure out field corresponding to key.
|
|
||||||
var subv reflect.Value
|
|
||||||
|
|
||||||
var f *field
|
|
||||||
for i := range fields {
|
|
||||||
ff := &fields[i]
|
|
||||||
if ff.name == key {
|
|
||||||
f = ff
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if f == nil && strings.EqualFold(ff.name, key) {
|
|
||||||
f = ff
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if f != nil {
|
|
||||||
subv = v
|
|
||||||
for _, i := range f.index {
|
|
||||||
if subv.Kind() == reflect.Ptr {
|
|
||||||
if subv.IsNil() {
|
|
||||||
subv.Set(reflect.New(subv.Type().Elem()))
|
|
||||||
}
|
|
||||||
subv = subv.Elem()
|
|
||||||
}
|
|
||||||
subv = subv.Field(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.parse(subv)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) scalar(v reflect.Value) {
|
|
||||||
val := string(d.event.value)
|
|
||||||
wantptr := null_values[val]
|
|
||||||
|
|
||||||
u, pv := d.indirect(v, wantptr)
|
|
||||||
|
|
||||||
var tag string
|
|
||||||
if u != nil {
|
|
||||||
defer func() {
|
|
||||||
if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
_, pv = d.indirect(pv, wantptr)
|
|
||||||
}
|
|
||||||
v = pv
|
|
||||||
|
|
||||||
var err error
|
|
||||||
tag, err = resolve(d.event, v, d.useNumber)
|
|
||||||
if err != nil {
|
|
||||||
d.error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) alias(rv reflect.Value) {
|
|
||||||
val, ok := d.anchors[string(d.event.anchor)]
|
|
||||||
if !ok {
|
|
||||||
d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.replay_events = val
|
|
||||||
d.nextEvent()
|
|
||||||
d.parse(rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) valueInterface() interface{} {
|
|
||||||
var v interface{}
|
|
||||||
|
|
||||||
anchor := string(d.event.anchor)
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
v = d.sequenceInterface()
|
|
||||||
case yaml_MAPPING_START_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
v = d.mappingInterface()
|
|
||||||
case yaml_SCALAR_EVENT:
|
|
||||||
d.begin_anchor(anchor)
|
|
||||||
v = d.scalarInterface()
|
|
||||||
case yaml_ALIAS_EVENT:
|
|
||||||
rv := reflect.ValueOf(&v)
|
|
||||||
d.alias(rv)
|
|
||||||
return v
|
|
||||||
case yaml_DOCUMENT_END_EVENT:
|
|
||||||
d.error(&UnexpectedEventError{
|
|
||||||
Value: string(d.event.value),
|
|
||||||
EventType: d.event.event_type,
|
|
||||||
At: d.event.start_mark,
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
d.end_anchor(anchor)
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) scalarInterface() interface{} {
|
|
||||||
_, v := resolveInterface(d.event, d.useNumber)
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// sequenceInterface is like sequence but returns []interface{}.
|
|
||||||
func (d *Decoder) sequenceInterface() []interface{} {
|
|
||||||
var v = make([]interface{}, 0)
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
|
|
||||||
break done
|
|
||||||
}
|
|
||||||
|
|
||||||
v = append(v, d.valueInterface())
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// mappingInterface is like mapping but returns map[interface{}]interface{}.
|
|
||||||
func (d *Decoder) mappingInterface() map[interface{}]interface{} {
|
|
||||||
m := make(map[interface{}]interface{})
|
|
||||||
|
|
||||||
d.nextEvent()
|
|
||||||
|
|
||||||
done:
|
|
||||||
for {
|
|
||||||
switch d.event.event_type {
|
|
||||||
case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT:
|
|
||||||
break done
|
|
||||||
}
|
|
||||||
|
|
||||||
key := d.valueInterface()
|
|
||||||
|
|
||||||
// Read value.
|
|
||||||
m[key] = d.valueInterface()
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
|
|
||||||
d.nextEvent()
|
|
||||||
}
|
|
||||||
|
|
||||||
return m
|
|
||||||
}
|
|
2072
vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go
generated
vendored
2072
vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go
generated
vendored
File diff suppressed because it is too large
Load Diff
395
vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go
generated
vendored
395
vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go
generated
vendored
@ -1,395 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
timeTimeType = reflect.TypeOf(time.Time{})
|
|
||||||
marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
|
|
||||||
numberType = reflect.TypeOf(Number(""))
|
|
||||||
nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]")
|
|
||||||
multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029")
|
|
||||||
|
|
||||||
shortTags = map[string]string{
|
|
||||||
yaml_NULL_TAG: "!!null",
|
|
||||||
yaml_BOOL_TAG: "!!bool",
|
|
||||||
yaml_STR_TAG: "!!str",
|
|
||||||
yaml_INT_TAG: "!!int",
|
|
||||||
yaml_FLOAT_TAG: "!!float",
|
|
||||||
yaml_TIMESTAMP_TAG: "!!timestamp",
|
|
||||||
yaml_SEQ_TAG: "!!seq",
|
|
||||||
yaml_MAP_TAG: "!!map",
|
|
||||||
yaml_BINARY_TAG: "!!binary",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
type Marshaler interface {
|
|
||||||
MarshalYAML() (tag string, value interface{}, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Encoder writes JSON objects to an output stream.
|
|
||||||
type Encoder struct {
|
|
||||||
w io.Writer
|
|
||||||
emitter yaml_emitter_t
|
|
||||||
event yaml_event_t
|
|
||||||
flow bool
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func Marshal(v interface{}) ([]byte, error) {
|
|
||||||
b := bytes.Buffer{}
|
|
||||||
e := NewEncoder(&b)
|
|
||||||
err := e.Encode(v)
|
|
||||||
return b.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns a new encoder that writes to w.
|
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
|
||||||
e := &Encoder{w: w}
|
|
||||||
yaml_emitter_initialize(&e.emitter)
|
|
||||||
yaml_emitter_set_output_writer(&e.emitter, e.w)
|
|
||||||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
|
||||||
e.emit()
|
|
||||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) Encode(v interface{}) (err error) {
|
|
||||||
defer recovery(&err)
|
|
||||||
|
|
||||||
if e.err != nil {
|
|
||||||
return e.err
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal("", reflect.ValueOf(v), true)
|
|
||||||
|
|
||||||
yaml_document_end_event_initialize(&e.event, true)
|
|
||||||
e.emit()
|
|
||||||
e.emitter.open_ended = false
|
|
||||||
yaml_stream_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emit() {
|
|
||||||
if !yaml_emitter_emit(&e.emitter, &e.event) {
|
|
||||||
panic("bad emit")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) {
|
|
||||||
vt := v.Type()
|
|
||||||
|
|
||||||
if vt.Implements(marshalerType) {
|
|
||||||
e.emitMarshaler(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if vt.Kind() != reflect.Ptr && allowAddr {
|
|
||||||
if reflect.PtrTo(vt).Implements(marshalerType) {
|
|
||||||
e.emitAddrMarshaler(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Interface:
|
|
||||||
if v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
} else {
|
|
||||||
e.marshal(tag, v.Elem(), allowAddr)
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
e.emitMap(tag, v)
|
|
||||||
case reflect.Ptr:
|
|
||||||
if v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
} else {
|
|
||||||
e.marshal(tag, v.Elem(), true)
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
e.emitStruct(tag, v)
|
|
||||||
case reflect.Slice:
|
|
||||||
e.emitSlice(tag, v)
|
|
||||||
case reflect.String:
|
|
||||||
e.emitString(tag, v)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
e.emitInt(tag, v)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
e.emitUint(tag, v)
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
e.emitFloat(tag, v)
|
|
||||||
case reflect.Bool:
|
|
||||||
e.emitBool(tag, v)
|
|
||||||
default:
|
|
||||||
panic("Can't marshal type yet: " + v.Type().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitMap(tag string, v reflect.Value) {
|
|
||||||
e.mapping(tag, func() {
|
|
||||||
var keys stringValues = v.MapKeys()
|
|
||||||
sort.Sort(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
e.marshal("", k, true)
|
|
||||||
e.marshal("", v.MapIndex(k), true)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitStruct(tag string, v reflect.Value) {
|
|
||||||
if v.Type() == timeTimeType {
|
|
||||||
e.emitTime(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := cachedTypeFields(v.Type())
|
|
||||||
|
|
||||||
e.mapping(tag, func() {
|
|
||||||
for _, f := range fields {
|
|
||||||
fv := fieldByIndex(v, f.index)
|
|
||||||
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal("", reflect.ValueOf(f.name), true)
|
|
||||||
e.flow = f.flow
|
|
||||||
e.marshal("", fv, true)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitTime(tag string, v reflect.Value) {
|
|
||||||
t := v.Interface().(time.Time)
|
|
||||||
bytes, _ := t.MarshalText()
|
|
||||||
e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isEmptyValue(v reflect.Value) bool {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
|
||||||
return v.Len() == 0
|
|
||||||
case reflect.Bool:
|
|
||||||
return !v.Bool()
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return v.Int() == 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
||||||
return v.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return v.Float() == 0
|
|
||||||
case reflect.Interface, reflect.Ptr:
|
|
||||||
return v.IsNil()
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) mapping(tag string, f func()) {
|
|
||||||
implicit := tag == ""
|
|
||||||
style := yaml_BLOCK_MAPPING_STYLE
|
|
||||||
if e.flow {
|
|
||||||
e.flow = false
|
|
||||||
style = yaml_FLOW_MAPPING_STYLE
|
|
||||||
}
|
|
||||||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
f()
|
|
||||||
|
|
||||||
yaml_mapping_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitSlice(tag string, v reflect.Value) {
|
|
||||||
if v.Type() == byteSliceType {
|
|
||||||
e.emitBase64(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
implicit := tag == ""
|
|
||||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
|
||||||
if e.flow {
|
|
||||||
e.flow = false
|
|
||||||
style = yaml_FLOW_SEQUENCE_STYLE
|
|
||||||
}
|
|
||||||
yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
|
||||||
e.emit()
|
|
||||||
|
|
||||||
n := v.Len()
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
e.marshal("", v.Index(i), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
yaml_sequence_end_event_initialize(&e.event)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitBase64(tag string, v reflect.Value) {
|
|
||||||
if v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s := v.Bytes()
|
|
||||||
|
|
||||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
|
|
||||||
|
|
||||||
base64.StdEncoding.Encode(dst, s)
|
|
||||||
e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitString(tag string, v reflect.Value) {
|
|
||||||
var style yaml_scalar_style_t
|
|
||||||
s := v.String()
|
|
||||||
|
|
||||||
if nonPrintable.MatchString(s) {
|
|
||||||
e.emitBase64(tag, v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Type() == numberType {
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
} else {
|
|
||||||
event := yaml_event_t{
|
|
||||||
implicit: true,
|
|
||||||
value: []byte(s),
|
|
||||||
}
|
|
||||||
|
|
||||||
rtag, _ := resolveInterface(event, false)
|
|
||||||
if tag == "" && rtag != yaml_STR_TAG {
|
|
||||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
|
||||||
} else if multiline.MatchString(s) {
|
|
||||||
style = yaml_LITERAL_SCALAR_STYLE
|
|
||||||
} else {
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
e.emitScalar(s, "", tag, style)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitBool(tag string, v reflect.Value) {
|
|
||||||
s := strconv.FormatBool(v.Bool())
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitInt(tag string, v reflect.Value) {
|
|
||||||
s := strconv.FormatInt(v.Int(), 10)
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitUint(tag string, v reflect.Value) {
|
|
||||||
s := strconv.FormatUint(v.Uint(), 10)
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitFloat(tag string, v reflect.Value) {
|
|
||||||
f := v.Float()
|
|
||||||
|
|
||||||
var s string
|
|
||||||
switch {
|
|
||||||
case math.IsNaN(f):
|
|
||||||
s = ".nan"
|
|
||||||
case math.IsInf(f, 1):
|
|
||||||
s = "+.inf"
|
|
||||||
case math.IsInf(f, -1):
|
|
||||||
s = "-.inf"
|
|
||||||
default:
|
|
||||||
s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits())
|
|
||||||
}
|
|
||||||
|
|
||||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitNil() {
|
|
||||||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
|
||||||
implicit := tag == ""
|
|
||||||
if !implicit {
|
|
||||||
style = yaml_PLAIN_SCALAR_STYLE
|
|
||||||
}
|
|
||||||
|
|
||||||
stag := shortTags[tag]
|
|
||||||
if stag == "" {
|
|
||||||
stag = tag
|
|
||||||
}
|
|
||||||
|
|
||||||
yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style)
|
|
||||||
e.emit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitMarshaler(tag string, v reflect.Value) {
|
|
||||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
m := v.Interface().(Marshaler)
|
|
||||||
if m == nil {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t, val, err := m.MarshalYAML()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if val == nil {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal(t, reflect.ValueOf(val), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) {
|
|
||||||
if !v.CanAddr() {
|
|
||||||
e.marshal(tag, v, false)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
va := v.Addr()
|
|
||||||
if va.IsNil() {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
m := v.Interface().(Marshaler)
|
|
||||||
t, val, err := m.MarshalYAML()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if val == nil {
|
|
||||||
e.emitNil()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e.marshal(t, reflect.ValueOf(val), false)
|
|
||||||
}
|
|
19
vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE
generated
vendored
19
vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
Copyright (c) 2006 Kirill Simonov
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
|
||||||
this software and associated documentation files (the "Software"), to deal in
|
|
||||||
the Software without restriction, including without limitation the rights to
|
|
||||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
|
||||||
of the Software, and to permit persons to whom the Software is furnished to do
|
|
||||||
so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
1230
vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go
generated
vendored
1230
vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go
generated
vendored
File diff suppressed because it is too large
Load Diff
465
vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go
generated
vendored
465
vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go
generated
vendored
@ -1,465 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the reader error and return 0.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string,
|
|
||||||
offset int, value int) bool {
|
|
||||||
parser.error = yaml_READER_ERROR
|
|
||||||
parser.problem = problem
|
|
||||||
parser.problem_offset = offset
|
|
||||||
parser.problem_value = value
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Byte order marks.
|
|
||||||
*/
|
|
||||||
const (
|
|
||||||
BOM_UTF8 = "\xef\xbb\xbf"
|
|
||||||
BOM_UTF16LE = "\xff\xfe"
|
|
||||||
BOM_UTF16BE = "\xfe\xff"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
|
||||||
* found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
|
||||||
/* Ensure that we had enough bytes in the raw buffer. */
|
|
||||||
for !parser.eof &&
|
|
||||||
len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
|
||||||
if !yaml_parser_update_raw_buffer(parser) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine the encoding. */
|
|
||||||
raw := parser.raw_buffer
|
|
||||||
pos := parser.raw_buffer_pos
|
|
||||||
remaining := len(raw) - pos
|
|
||||||
if remaining >= 2 &&
|
|
||||||
raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] {
|
|
||||||
parser.encoding = yaml_UTF16LE_ENCODING
|
|
||||||
parser.raw_buffer_pos += 2
|
|
||||||
parser.offset += 2
|
|
||||||
} else if remaining >= 2 &&
|
|
||||||
raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] {
|
|
||||||
parser.encoding = yaml_UTF16BE_ENCODING
|
|
||||||
parser.raw_buffer_pos += 2
|
|
||||||
parser.offset += 2
|
|
||||||
} else if remaining >= 3 &&
|
|
||||||
raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] {
|
|
||||||
parser.encoding = yaml_UTF8_ENCODING
|
|
||||||
parser.raw_buffer_pos += 3
|
|
||||||
parser.offset += 3
|
|
||||||
} else {
|
|
||||||
parser.encoding = yaml_UTF8_ENCODING
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Update the raw buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
|
||||||
size_read := 0
|
|
||||||
|
|
||||||
/* Return if the raw buffer is full. */
|
|
||||||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return on EOF. */
|
|
||||||
|
|
||||||
if parser.eof {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Move the remaining bytes in the raw buffer to the beginning. */
|
|
||||||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
|
||||||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
|
||||||
}
|
|
||||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
|
||||||
parser.raw_buffer_pos = 0
|
|
||||||
|
|
||||||
/* Call the read handler to fill the buffer. */
|
|
||||||
size_read, err := parser.read_handler(parser,
|
|
||||||
parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
|
||||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
parser.eof = true
|
|
||||||
} else if err != nil {
|
|
||||||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(),
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ensure that the buffer contains at least `length` characters.
|
|
||||||
* Return 1 on success, 0 on failure.
|
|
||||||
*
|
|
||||||
* The length is supposed to be significantly less that the buffer size.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
|
||||||
/* Read handler must be set. */
|
|
||||||
if parser.read_handler == nil {
|
|
||||||
panic("read handler must be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If the EOF flag is set and the raw buffer is empty, do nothing. */
|
|
||||||
|
|
||||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return if the buffer contains enough characters. */
|
|
||||||
|
|
||||||
if parser.unread >= length {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine the input encoding if it is not known yet. */
|
|
||||||
|
|
||||||
if parser.encoding == yaml_ANY_ENCODING {
|
|
||||||
if !yaml_parser_determine_encoding(parser) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Move the unread characters to the beginning of the buffer. */
|
|
||||||
buffer_end := len(parser.buffer)
|
|
||||||
if 0 < parser.buffer_pos &&
|
|
||||||
parser.buffer_pos < buffer_end {
|
|
||||||
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
|
||||||
buffer_end -= parser.buffer_pos
|
|
||||||
parser.buffer_pos = 0
|
|
||||||
} else if parser.buffer_pos == buffer_end {
|
|
||||||
buffer_end = 0
|
|
||||||
parser.buffer_pos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
|
||||||
|
|
||||||
/* Fill the buffer until it has enough characters. */
|
|
||||||
first := true
|
|
||||||
for parser.unread < length {
|
|
||||||
/* Fill the raw buffer if necessary. */
|
|
||||||
|
|
||||||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
|
||||||
if !yaml_parser_update_raw_buffer(parser) {
|
|
||||||
parser.buffer = parser.buffer[:buffer_end]
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
first = false
|
|
||||||
|
|
||||||
/* Decode the raw buffer. */
|
|
||||||
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
|
||||||
var value rune
|
|
||||||
var w int
|
|
||||||
|
|
||||||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
|
||||||
incomplete := false
|
|
||||||
|
|
||||||
/* Decode the next character. */
|
|
||||||
|
|
||||||
switch parser.encoding {
|
|
||||||
case yaml_UTF8_ENCODING:
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Decode a UTF-8 character. Check RFC 3629
|
|
||||||
* (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
|
||||||
*
|
|
||||||
* The following table (taken from the RFC) is used for
|
|
||||||
* decoding.
|
|
||||||
*
|
|
||||||
* Char. number range | UTF-8 octet sequence
|
|
||||||
* (hexadecimal) | (binary)
|
|
||||||
* --------------------+------------------------------------
|
|
||||||
* 0000 0000-0000 007F | 0xxxxxxx
|
|
||||||
* 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
|
||||||
* 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
|
||||||
* 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
|
||||||
*
|
|
||||||
* Additionally, the characters in the range 0xD800-0xDFFF
|
|
||||||
* are prohibited as they are reserved for use with UTF-16
|
|
||||||
* surrogate pairs.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Determine the length of the UTF-8 sequence. */
|
|
||||||
|
|
||||||
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
|
||||||
w = width(octet)
|
|
||||||
|
|
||||||
/* Check if the leading octet is valid. */
|
|
||||||
|
|
||||||
if w == 0 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid leading UTF-8 octet",
|
|
||||||
parser.offset, int(octet))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the raw buffer contains an incomplete character. */
|
|
||||||
|
|
||||||
if w > raw_unread {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-8 octet sequence",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
incomplete = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Decode the leading octet. */
|
|
||||||
switch {
|
|
||||||
case octet&0x80 == 0x00:
|
|
||||||
value = rune(octet & 0x7F)
|
|
||||||
case octet&0xE0 == 0xC0:
|
|
||||||
value = rune(octet & 0x1F)
|
|
||||||
case octet&0xF0 == 0xE0:
|
|
||||||
value = rune(octet & 0x0F)
|
|
||||||
case octet&0xF8 == 0xF0:
|
|
||||||
value = rune(octet & 0x07)
|
|
||||||
default:
|
|
||||||
value = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check and decode the trailing octets. */
|
|
||||||
|
|
||||||
for k := 1; k < w; k++ {
|
|
||||||
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
|
||||||
|
|
||||||
/* Check if the octet is valid. */
|
|
||||||
|
|
||||||
if (octet & 0xC0) != 0x80 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid trailing UTF-8 octet",
|
|
||||||
parser.offset+k, int(octet))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Decode the octet. */
|
|
||||||
|
|
||||||
value = (value << 6) + rune(octet&0x3F)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check the length of the sequence against the value. */
|
|
||||||
switch {
|
|
||||||
case w == 1:
|
|
||||||
case w == 2 && value >= 0x80:
|
|
||||||
case w == 3 && value >= 0x800:
|
|
||||||
case w == 4 && value >= 0x10000:
|
|
||||||
default:
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid length of a UTF-8 sequence",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check the range of the value. */
|
|
||||||
|
|
||||||
if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"invalid Unicode character",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
case yaml_UTF16LE_ENCODING,
|
|
||||||
yaml_UTF16BE_ENCODING:
|
|
||||||
|
|
||||||
var low, high int
|
|
||||||
if parser.encoding == yaml_UTF16LE_ENCODING {
|
|
||||||
low, high = 0, 1
|
|
||||||
} else {
|
|
||||||
high, low = 1, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The UTF-16 encoding is not as simple as one might
|
|
||||||
* naively think. Check RFC 2781
|
|
||||||
* (http://www.ietf.org/rfc/rfc2781.txt).
|
|
||||||
*
|
|
||||||
* Normally, two subsequent bytes describe a Unicode
|
|
||||||
* character. However a special technique (called a
|
|
||||||
* surrogate pair) is used for specifying character
|
|
||||||
* values larger than 0xFFFF.
|
|
||||||
*
|
|
||||||
* A surrogate pair consists of two pseudo-characters:
|
|
||||||
* high surrogate area (0xD800-0xDBFF)
|
|
||||||
* low surrogate area (0xDC00-0xDFFF)
|
|
||||||
*
|
|
||||||
* The following formulas are used for decoding
|
|
||||||
* and encoding characters using surrogate pairs:
|
|
||||||
*
|
|
||||||
* U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
|
||||||
* U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
|
||||||
* W1 = 110110yyyyyyyyyy
|
|
||||||
* W2 = 110111xxxxxxxxxx
|
|
||||||
*
|
|
||||||
* where U is the character value, W1 is the high surrogate
|
|
||||||
* area, W2 is the low surrogate area.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Check for incomplete UTF-16 character. */
|
|
||||||
|
|
||||||
if raw_unread < 2 {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-16 character",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
incomplete = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get the character. */
|
|
||||||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
|
||||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
|
||||||
|
|
||||||
/* Check for unexpected low surrogate area. */
|
|
||||||
|
|
||||||
if (value & 0xFC00) == 0xDC00 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"unexpected low surrogate area",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check for a high surrogate area. */
|
|
||||||
|
|
||||||
if (value & 0xFC00) == 0xD800 {
|
|
||||||
|
|
||||||
w = 4
|
|
||||||
|
|
||||||
/* Check for incomplete surrogate pair. */
|
|
||||||
|
|
||||||
if raw_unread < 4 {
|
|
||||||
if parser.eof {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"incomplete UTF-16 surrogate pair",
|
|
||||||
parser.offset, -1)
|
|
||||||
}
|
|
||||||
incomplete = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get the next character. */
|
|
||||||
|
|
||||||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
|
||||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
|
||||||
|
|
||||||
/* Check for a low surrogate area. */
|
|
||||||
|
|
||||||
if (value2 & 0xFC00) != 0xDC00 {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"expected low surrogate area",
|
|
||||||
parser.offset+2, int(value2))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Generate the value of the surrogate pair. */
|
|
||||||
|
|
||||||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
|
||||||
} else {
|
|
||||||
w = 2
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic("Impossible") /* Impossible. */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the raw buffer contains enough bytes to form a character. */
|
|
||||||
|
|
||||||
if incomplete {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if the character is in the allowed range:
|
|
||||||
* #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
|
||||||
* | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
|
||||||
* | [#x10000-#x10FFFF] (32 bit)
|
|
||||||
*/
|
|
||||||
|
|
||||||
if !(value == 0x09 || value == 0x0A || value == 0x0D ||
|
|
||||||
(value >= 0x20 && value <= 0x7E) ||
|
|
||||||
(value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) ||
|
|
||||||
(value >= 0xE000 && value <= 0xFFFD) ||
|
|
||||||
(value >= 0x10000 && value <= 0x10FFFF)) {
|
|
||||||
return yaml_parser_set_reader_error(parser,
|
|
||||||
"control characters are not allowed",
|
|
||||||
parser.offset, int(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Move the raw pointers. */
|
|
||||||
|
|
||||||
parser.raw_buffer_pos += w
|
|
||||||
parser.offset += w
|
|
||||||
|
|
||||||
/* Finally put the character into the buffer. */
|
|
||||||
|
|
||||||
/* 0000 0000-0000 007F . 0xxxxxxx */
|
|
||||||
if value <= 0x7F {
|
|
||||||
parser.buffer[buffer_end] = byte(value)
|
|
||||||
} else if value <= 0x7FF {
|
|
||||||
/* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */
|
|
||||||
parser.buffer[buffer_end] = byte(0xC0 + (value >> 6))
|
|
||||||
parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F))
|
|
||||||
} else if value <= 0xFFFF {
|
|
||||||
/* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */
|
|
||||||
parser.buffer[buffer_end] = byte(0xE0 + (value >> 12))
|
|
||||||
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
|
||||||
parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F))
|
|
||||||
} else {
|
|
||||||
/* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
|
|
||||||
parser.buffer[buffer_end] = byte(0xF0 + (value >> 18))
|
|
||||||
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
|
||||||
parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
|
||||||
parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F))
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer_end += w
|
|
||||||
parser.unread++
|
|
||||||
}
|
|
||||||
|
|
||||||
/* On EOF, put NUL into the buffer and return. */
|
|
||||||
|
|
||||||
if parser.eof {
|
|
||||||
parser.buffer[buffer_end] = 0
|
|
||||||
buffer_end++
|
|
||||||
parser.buffer = parser.buffer[:buffer_end]
|
|
||||||
parser.unread++
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.buffer = parser.buffer[:buffer_end]
|
|
||||||
return true
|
|
||||||
}
|
|
449
vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go
generated
vendored
449
vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go
generated
vendored
@ -1,449 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var byteSliceType = reflect.TypeOf([]byte(nil))
|
|
||||||
|
|
||||||
var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)}
|
|
||||||
var bool_values map[string]bool
|
|
||||||
var null_values map[string]bool
|
|
||||||
|
|
||||||
var signs = []byte{'-', '+'}
|
|
||||||
var nulls = []byte{'~', 'n', 'N'}
|
|
||||||
var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'}
|
|
||||||
|
|
||||||
var timestamp_regexp *regexp.Regexp
|
|
||||||
var ymd_regexp *regexp.Regexp
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
bool_values = make(map[string]bool)
|
|
||||||
bool_values["y"] = true
|
|
||||||
bool_values["yes"] = true
|
|
||||||
bool_values["n"] = false
|
|
||||||
bool_values["no"] = false
|
|
||||||
bool_values["true"] = true
|
|
||||||
bool_values["false"] = false
|
|
||||||
bool_values["on"] = true
|
|
||||||
bool_values["off"] = false
|
|
||||||
|
|
||||||
null_values = make(map[string]bool)
|
|
||||||
null_values["~"] = true
|
|
||||||
null_values["null"] = true
|
|
||||||
null_values["Null"] = true
|
|
||||||
null_values["NULL"] = true
|
|
||||||
|
|
||||||
timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$")
|
|
||||||
ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$")
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) {
|
|
||||||
val := string(event.value)
|
|
||||||
|
|
||||||
if null_values[val] {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
return yaml_NULL_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
if useNumber && v.Type() == numberType {
|
|
||||||
tag, i := resolveInterface(event, useNumber)
|
|
||||||
if n, ok := i.(Number); ok {
|
|
||||||
v.Set(reflect.ValueOf(n))
|
|
||||||
return tag, nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resolve_string(val, v, event)
|
|
||||||
case reflect.Bool:
|
|
||||||
return resolve_bool(val, v, event)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return resolve_int(val, v, useNumber, event)
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
return resolve_uint(val, v, useNumber, event)
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return resolve_float(val, v, useNumber, event)
|
|
||||||
case reflect.Interface:
|
|
||||||
_, i := resolveInterface(event, useNumber)
|
|
||||||
if i != nil {
|
|
||||||
v.Set(reflect.ValueOf(i))
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
return resolve_time(val, v, event)
|
|
||||||
case reflect.Slice:
|
|
||||||
if v.Type() != byteSliceType {
|
|
||||||
return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark)
|
|
||||||
}
|
|
||||||
b, err := decode_binary(event.value, event)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set(reflect.ValueOf(b))
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_STR_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasBinaryTag(event yaml_event_t) bool {
|
|
||||||
for _, tag := range binary_tags {
|
|
||||||
if bytes.Equal(event.tag, tag) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func decode_binary(value []byte, event yaml_event_t) ([]byte, error) {
|
|
||||||
b := make([]byte, base64.StdEncoding.DecodedLen(len(value)))
|
|
||||||
n, err := base64.StdEncoding.Decode(b, value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark)
|
|
||||||
}
|
|
||||||
return b[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) {
|
|
||||||
if len(event.tag) > 0 {
|
|
||||||
if hasBinaryTag(event) {
|
|
||||||
b, err := decode_binary(event.value, event)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
val = string(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
v.SetString(val)
|
|
||||||
return yaml_STR_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) {
|
|
||||||
b, found := bool_values[strings.ToLower(val)]
|
|
||||||
if !found {
|
|
||||||
return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetBool(b)
|
|
||||||
return yaml_BOOL_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
|
|
||||||
original := val
|
|
||||||
val = strings.Replace(val, "_", "", -1)
|
|
||||||
var value uint64
|
|
||||||
|
|
||||||
isNumberValue := v.Type() == numberType
|
|
||||||
|
|
||||||
sign := int64(1)
|
|
||||||
if val[0] == '-' {
|
|
||||||
sign = -1
|
|
||||||
val = val[1:]
|
|
||||||
} else if val[0] == '+' {
|
|
||||||
val = val[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
base := 0
|
|
||||||
if val == "0" {
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString("0")
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(val, "0o") {
|
|
||||||
base = 8
|
|
||||||
val = val[2:]
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := strconv.ParseUint(val, base, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
var val64 int64
|
|
||||||
if value <= math.MaxInt64 {
|
|
||||||
val64 = int64(value)
|
|
||||||
if sign == -1 {
|
|
||||||
val64 = -val64
|
|
||||||
}
|
|
||||||
} else if sign == -1 && value == uint64(math.MaxInt64)+1 {
|
|
||||||
val64 = math.MinInt64
|
|
||||||
} else {
|
|
||||||
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString(strconv.FormatInt(val64, 10))
|
|
||||||
} else {
|
|
||||||
if v.OverflowInt(val64) {
|
|
||||||
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
v.SetInt(val64)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
|
|
||||||
original := val
|
|
||||||
val = strings.Replace(val, "_", "", -1)
|
|
||||||
var value uint64
|
|
||||||
|
|
||||||
isNumberValue := v.Type() == numberType
|
|
||||||
|
|
||||||
if val[0] == '-' {
|
|
||||||
return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if val[0] == '+' {
|
|
||||||
val = val[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
base := 0
|
|
||||||
if val == "0" {
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString("0")
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Zero(v.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(val, "0o") {
|
|
||||||
base = 8
|
|
||||||
val = val[2:]
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := strconv.ParseUint(val, base, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString(strconv.FormatUint(value, 10))
|
|
||||||
} else {
|
|
||||||
if v.OverflowUint(value) {
|
|
||||||
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetUint(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_INT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
|
|
||||||
val = strings.Replace(val, "_", "", -1)
|
|
||||||
var value float64
|
|
||||||
|
|
||||||
isNumberValue := v.Type() == numberType
|
|
||||||
typeBits := 64
|
|
||||||
if !isNumberValue {
|
|
||||||
typeBits = v.Type().Bits()
|
|
||||||
}
|
|
||||||
|
|
||||||
sign := 1
|
|
||||||
if val[0] == '-' {
|
|
||||||
sign = -1
|
|
||||||
val = val[1:]
|
|
||||||
} else if val[0] == '+' {
|
|
||||||
val = val[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
valLower := strings.ToLower(val)
|
|
||||||
if valLower == ".inf" {
|
|
||||||
value = math.Inf(sign)
|
|
||||||
} else if valLower == ".nan" {
|
|
||||||
value = math.NaN()
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
value, err = strconv.ParseFloat(val, typeBits)
|
|
||||||
value *= float64(sign)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if isNumberValue {
|
|
||||||
v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits))
|
|
||||||
} else {
|
|
||||||
if v.OverflowFloat(value) {
|
|
||||||
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetFloat(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_FLOAT_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) {
|
|
||||||
var parsedTime time.Time
|
|
||||||
matches := ymd_regexp.FindStringSubmatch(val)
|
|
||||||
if len(matches) > 0 {
|
|
||||||
year, _ := strconv.Atoi(matches[1])
|
|
||||||
month, _ := strconv.Atoi(matches[2])
|
|
||||||
day, _ := strconv.Atoi(matches[3])
|
|
||||||
parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
|
|
||||||
} else {
|
|
||||||
matches = timestamp_regexp.FindStringSubmatch(val)
|
|
||||||
if len(matches) == 0 {
|
|
||||||
return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark)
|
|
||||||
}
|
|
||||||
|
|
||||||
year, _ := strconv.Atoi(matches[1])
|
|
||||||
month, _ := strconv.Atoi(matches[2])
|
|
||||||
day, _ := strconv.Atoi(matches[3])
|
|
||||||
hour, _ := strconv.Atoi(matches[4])
|
|
||||||
min, _ := strconv.Atoi(matches[5])
|
|
||||||
sec, _ := strconv.Atoi(matches[6])
|
|
||||||
|
|
||||||
nsec := 0
|
|
||||||
if matches[7] != "" {
|
|
||||||
millis, _ := strconv.Atoi(matches[7])
|
|
||||||
nsec = int(time.Duration(millis) * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
loc := time.UTC
|
|
||||||
if matches[8] != "" {
|
|
||||||
sign := matches[8][0]
|
|
||||||
hr, _ := strconv.Atoi(matches[8][1:])
|
|
||||||
min := 0
|
|
||||||
if matches[9] != "" {
|
|
||||||
min, _ = strconv.Atoi(matches[9])
|
|
||||||
}
|
|
||||||
|
|
||||||
zoneOffset := (hr*60 + min) * 60
|
|
||||||
if sign == '-' {
|
|
||||||
zoneOffset = -zoneOffset
|
|
||||||
}
|
|
||||||
|
|
||||||
loc = time.FixedZone("", zoneOffset)
|
|
||||||
}
|
|
||||||
parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc)
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Set(reflect.ValueOf(parsedTime))
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) {
|
|
||||||
val := string(event.value)
|
|
||||||
if len(event.tag) == 0 && !event.implicit {
|
|
||||||
return "", val
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(val) == 0 {
|
|
||||||
return yaml_NULL_TAG, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var result interface{}
|
|
||||||
|
|
||||||
sign := false
|
|
||||||
c := val[0]
|
|
||||||
switch {
|
|
||||||
case bytes.IndexByte(signs, c) != -1:
|
|
||||||
sign = true
|
|
||||||
fallthrough
|
|
||||||
case c >= '0' && c <= '9':
|
|
||||||
i := int64(0)
|
|
||||||
result = &i
|
|
||||||
if useNumber {
|
|
||||||
var n Number
|
|
||||||
result = &n
|
|
||||||
}
|
|
||||||
|
|
||||||
v := reflect.ValueOf(result).Elem()
|
|
||||||
if _, err := resolve_int(val, v, useNumber, event); err == nil {
|
|
||||||
return yaml_INT_TAG, v.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
f := float64(0)
|
|
||||||
result = &f
|
|
||||||
if useNumber {
|
|
||||||
var n Number
|
|
||||||
result = &n
|
|
||||||
}
|
|
||||||
|
|
||||||
v = reflect.ValueOf(result).Elem()
|
|
||||||
if _, err := resolve_float(val, v, useNumber, event); err == nil {
|
|
||||||
return yaml_FLOAT_TAG, v.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
if !sign {
|
|
||||||
t := time.Time{}
|
|
||||||
if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil {
|
|
||||||
return "", t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case bytes.IndexByte(nulls, c) != -1:
|
|
||||||
if null_values[val] {
|
|
||||||
return yaml_NULL_TAG, nil
|
|
||||||
}
|
|
||||||
b := false
|
|
||||||
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
|
|
||||||
return yaml_BOOL_TAG, b
|
|
||||||
}
|
|
||||||
case c == '.':
|
|
||||||
f := float64(0)
|
|
||||||
result = &f
|
|
||||||
if useNumber {
|
|
||||||
var n Number
|
|
||||||
result = &n
|
|
||||||
}
|
|
||||||
|
|
||||||
v := reflect.ValueOf(result).Elem()
|
|
||||||
if _, err := resolve_float(val, v, useNumber, event); err == nil {
|
|
||||||
return yaml_FLOAT_TAG, v.Interface()
|
|
||||||
}
|
|
||||||
case bytes.IndexByte(bools, c) != -1:
|
|
||||||
b := false
|
|
||||||
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
|
|
||||||
return yaml_BOOL_TAG, b
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasBinaryTag(event) {
|
|
||||||
bytes, err := decode_binary(event.value, event)
|
|
||||||
if err == nil {
|
|
||||||
return yaml_BINARY_TAG, bytes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml_STR_TAG, val
|
|
||||||
}
|
|
62
vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go
generated
vendored
62
vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go
generated
vendored
@ -1,62 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Run_parser(cmd string, args []string) {
|
|
||||||
for i := 0; i < len(args); i++ {
|
|
||||||
fmt.Printf("[%d] Scanning '%s'", i, args[i])
|
|
||||||
file, err := os.Open(args[i])
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
parser := yaml_parser_t{}
|
|
||||||
yaml_parser_initialize(&parser)
|
|
||||||
yaml_parser_set_input_reader(&parser, file)
|
|
||||||
|
|
||||||
failed := false
|
|
||||||
token := yaml_token_t{}
|
|
||||||
count := 0
|
|
||||||
for {
|
|
||||||
if !yaml_parser_scan(&parser, &token) {
|
|
||||||
failed = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if token.token_type == yaml_STREAM_END_TOKEN {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
|
|
||||||
file.Close()
|
|
||||||
|
|
||||||
msg := "SUCCESS"
|
|
||||||
if failed {
|
|
||||||
msg = "FAILED"
|
|
||||||
if parser.error != yaml_NO_ERROR {
|
|
||||||
m := parser.problem_mark
|
|
||||||
fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n",
|
|
||||||
parser.context, parser.problem, m.line, m.column)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("%s (%d tokens)\n", msg, count)
|
|
||||||
}
|
|
||||||
}
|
|
3318
vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go
generated
vendored
3318
vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go
generated
vendored
File diff suppressed because it is too large
Load Diff
360
vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go
generated
vendored
360
vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go
generated
vendored
@ -1,360 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A field represents a single field found in a struct.
|
|
||||||
type field struct {
|
|
||||||
name string
|
|
||||||
tag bool
|
|
||||||
index []int
|
|
||||||
typ reflect.Type
|
|
||||||
omitEmpty bool
|
|
||||||
flow bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// byName sorts field by name, breaking ties with depth,
|
|
||||||
// then breaking ties with "name came from json tag", then
|
|
||||||
// breaking ties with index sequence.
|
|
||||||
type byName []field
|
|
||||||
|
|
||||||
func (x byName) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byName) Less(i, j int) bool {
|
|
||||||
if x[i].name != x[j].name {
|
|
||||||
return x[i].name < x[j].name
|
|
||||||
}
|
|
||||||
if len(x[i].index) != len(x[j].index) {
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
if x[i].tag != x[j].tag {
|
|
||||||
return x[i].tag
|
|
||||||
}
|
|
||||||
return byIndex(x).Less(i, j)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byIndex sorts field by index sequence.
|
|
||||||
type byIndex []field
|
|
||||||
|
|
||||||
func (x byIndex) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byIndex) Less(i, j int) bool {
|
|
||||||
for k, xik := range x[i].index {
|
|
||||||
if k >= len(x[j].index) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if xik != x[j].index[k] {
|
|
||||||
return xik < x[j].index[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
|
||||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
|
||||||
// and then any reachable anonymous structs.
|
|
||||||
func typeFields(t reflect.Type) []field {
|
|
||||||
// Anonymous fields to explore at the current level and the next.
|
|
||||||
current := []field{}
|
|
||||||
next := []field{{typ: t}}
|
|
||||||
|
|
||||||
// Count of queued names for current level and the next.
|
|
||||||
count := map[reflect.Type]int{}
|
|
||||||
nextCount := map[reflect.Type]int{}
|
|
||||||
|
|
||||||
// Types already visited at an earlier level.
|
|
||||||
visited := map[reflect.Type]bool{}
|
|
||||||
|
|
||||||
// Fields found.
|
|
||||||
var fields []field
|
|
||||||
|
|
||||||
for len(next) > 0 {
|
|
||||||
current, next = next, current[:0]
|
|
||||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
|
||||||
|
|
||||||
for _, f := range current {
|
|
||||||
if visited[f.typ] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
visited[f.typ] = true
|
|
||||||
|
|
||||||
// Scan f.typ for fields to include.
|
|
||||||
for i := 0; i < f.typ.NumField(); i++ {
|
|
||||||
sf := f.typ.Field(i)
|
|
||||||
if sf.PkgPath != "" { // unexported
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tag := sf.Tag.Get("yaml")
|
|
||||||
if tag == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
name, opts := parseTag(tag)
|
|
||||||
if !isValidTag(name) {
|
|
||||||
name = ""
|
|
||||||
}
|
|
||||||
index := make([]int, len(f.index)+1)
|
|
||||||
copy(index, f.index)
|
|
||||||
index[len(f.index)] = i
|
|
||||||
|
|
||||||
ft := sf.Type
|
|
||||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
|
||||||
// Follow pointer.
|
|
||||||
ft = ft.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record found field and index sequence.
|
|
||||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
|
||||||
tagged := name != ""
|
|
||||||
if name == "" {
|
|
||||||
name = sf.Name
|
|
||||||
}
|
|
||||||
fields = append(fields, field{name, tagged, index, ft,
|
|
||||||
opts.Contains("omitempty"), opts.Contains("flow")})
|
|
||||||
if count[f.typ] > 1 {
|
|
||||||
// If there were multiple instances, add a second,
|
|
||||||
// so that the annihilation code will see a duplicate.
|
|
||||||
// It only cares about the distinction between 1 or 2,
|
|
||||||
// so don't bother generating any more copies.
|
|
||||||
fields = append(fields, fields[len(fields)-1])
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record new anonymous struct to explore in next round.
|
|
||||||
nextCount[ft]++
|
|
||||||
if nextCount[ft] == 1 {
|
|
||||||
next = append(next, field{name: ft.Name(), index: index, typ: ft})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(byName(fields))
|
|
||||||
|
|
||||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
|
||||||
// except that fields with JSON tags are promoted.
|
|
||||||
|
|
||||||
// The fields are sorted in primary order of name, secondary order
|
|
||||||
// of field index length. Loop over names; for each name, delete
|
|
||||||
// hidden fields by choosing the one dominant field that survives.
|
|
||||||
out := fields[:0]
|
|
||||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
|
||||||
// One iteration per name.
|
|
||||||
// Find the sequence of fields with the name of this first field.
|
|
||||||
fi := fields[i]
|
|
||||||
name := fi.name
|
|
||||||
for advance = 1; i+advance < len(fields); advance++ {
|
|
||||||
fj := fields[i+advance]
|
|
||||||
if fj.name != name {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if advance == 1 { // Only one field with this name
|
|
||||||
out = append(out, fi)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dominant, ok := dominantField(fields[i : i+advance])
|
|
||||||
if ok {
|
|
||||||
out = append(out, dominant)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fields = out
|
|
||||||
sort.Sort(byIndex(fields))
|
|
||||||
|
|
||||||
return fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// dominantField looks through the fields, all of which are known to
|
|
||||||
// have the same name, to find the single field that dominates the
|
|
||||||
// others using Go's embedding rules, modified by the presence of
|
|
||||||
// JSON tags. If there are multiple top-level fields, the boolean
|
|
||||||
// will be false: This condition is an error in Go and we skip all
|
|
||||||
// the fields.
|
|
||||||
func dominantField(fields []field) (field, bool) {
|
|
||||||
// The fields are sorted in increasing index-length order. The winner
|
|
||||||
// must therefore be one with the shortest index length. Drop all
|
|
||||||
// longer entries, which is easy: just truncate the slice.
|
|
||||||
length := len(fields[0].index)
|
|
||||||
tagged := -1 // Index of first tagged field.
|
|
||||||
for i, f := range fields {
|
|
||||||
if len(f.index) > length {
|
|
||||||
fields = fields[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if f.tag {
|
|
||||||
if tagged >= 0 {
|
|
||||||
// Multiple tagged fields at the same level: conflict.
|
|
||||||
// Return no field.
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
tagged = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tagged >= 0 {
|
|
||||||
return fields[tagged], true
|
|
||||||
}
|
|
||||||
// All remaining fields have the same length. If there's more than one,
|
|
||||||
// we have a conflict (two fields named "X" at the same level) and we
|
|
||||||
// return no field.
|
|
||||||
if len(fields) > 1 {
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
return fields[0], true
|
|
||||||
}
|
|
||||||
|
|
||||||
var fieldCache struct {
|
|
||||||
sync.RWMutex
|
|
||||||
m map[reflect.Type][]field
|
|
||||||
}
|
|
||||||
|
|
||||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
|
||||||
func cachedTypeFields(t reflect.Type) []field {
|
|
||||||
fieldCache.RLock()
|
|
||||||
f := fieldCache.m[t]
|
|
||||||
fieldCache.RUnlock()
|
|
||||||
if f != nil {
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute fields without lock.
|
|
||||||
// Might duplicate effort but won't hold other computations back.
|
|
||||||
f = typeFields(t)
|
|
||||||
if f == nil {
|
|
||||||
f = []field{}
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldCache.Lock()
|
|
||||||
if fieldCache.m == nil {
|
|
||||||
fieldCache.m = map[reflect.Type][]field{}
|
|
||||||
}
|
|
||||||
fieldCache.m[t] = f
|
|
||||||
fieldCache.Unlock()
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// tagOptions is the string following a comma in a struct field's "json"
|
|
||||||
// tag, or the empty string. It does not include the leading comma.
|
|
||||||
type tagOptions string
|
|
||||||
|
|
||||||
func isValidTag(s string) bool {
|
|
||||||
if s == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, c := range s {
|
|
||||||
switch {
|
|
||||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
|
||||||
// Backslash and quote chars are reserved, but
|
|
||||||
// otherwise any punctuation chars are allowed
|
|
||||||
// in a tag name.
|
|
||||||
default:
|
|
||||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
|
|
||||||
for _, i := range index {
|
|
||||||
if v.Kind() == reflect.Ptr {
|
|
||||||
if v.IsNil() {
|
|
||||||
return reflect.Value{}
|
|
||||||
}
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
v = v.Field(i)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func typeByIndex(t reflect.Type, index []int) reflect.Type {
|
|
||||||
for _, i := range index {
|
|
||||||
if t.Kind() == reflect.Ptr {
|
|
||||||
t = t.Elem()
|
|
||||||
}
|
|
||||||
t = t.Field(i).Type
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
|
||||||
// It implements the methods to sort by string.
|
|
||||||
type stringValues []reflect.Value
|
|
||||||
|
|
||||||
func (sv stringValues) Len() int { return len(sv) }
|
|
||||||
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
|
|
||||||
func (sv stringValues) Less(i, j int) bool {
|
|
||||||
av, ak := getElem(sv[i])
|
|
||||||
bv, bk := getElem(sv[j])
|
|
||||||
if ak == reflect.String && bk == reflect.String {
|
|
||||||
return av.String() < bv.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return ak < bk
|
|
||||||
}
|
|
||||||
|
|
||||||
func getElem(v reflect.Value) (reflect.Value, reflect.Kind) {
|
|
||||||
k := v.Kind()
|
|
||||||
for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() {
|
|
||||||
v = v.Elem()
|
|
||||||
k = v.Kind()
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, k
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTag splits a struct field's json tag into its name and
|
|
||||||
// comma-separated options.
|
|
||||||
func parseTag(tag string) (string, tagOptions) {
|
|
||||||
if idx := strings.Index(tag, ","); idx != -1 {
|
|
||||||
return tag[:idx], tagOptions(tag[idx+1:])
|
|
||||||
}
|
|
||||||
return tag, tagOptions("")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains reports whether a comma-separated list of options
|
|
||||||
// contains a particular substr flag. substr must be surrounded by a
|
|
||||||
// string boundary or commas.
|
|
||||||
func (o tagOptions) Contains(optionName string) bool {
|
|
||||||
if len(o) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
s := string(o)
|
|
||||||
for s != "" {
|
|
||||||
var next string
|
|
||||||
i := strings.Index(s, ",")
|
|
||||||
if i >= 0 {
|
|
||||||
s, next = s[:i], s[i+1:]
|
|
||||||
}
|
|
||||||
if s == optionName {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
s = next
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
128
vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go
generated
vendored
128
vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go
generated
vendored
@ -1,128 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the writer error and return 0.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
|
||||||
emitter.error = yaml_WRITER_ERROR
|
|
||||||
emitter.problem = problem
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush the output buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
|
||||||
if emitter.write_handler == nil {
|
|
||||||
panic("Write handler must be set") /* Write handler must be set. */
|
|
||||||
}
|
|
||||||
if emitter.encoding == yaml_ANY_ENCODING {
|
|
||||||
panic("Encoding must be set") /* Output encoding must be set. */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the buffer is empty. */
|
|
||||||
|
|
||||||
if emitter.buffer_pos == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If the output encoding is UTF-8, we don't need to recode the buffer. */
|
|
||||||
|
|
||||||
if emitter.encoding == yaml_UTF8_ENCODING {
|
|
||||||
if err := emitter.write_handler(emitter,
|
|
||||||
emitter.buffer[:emitter.buffer_pos]); err != nil {
|
|
||||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
|
||||||
}
|
|
||||||
emitter.buffer_pos = 0
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Recode the buffer into the raw buffer. */
|
|
||||||
|
|
||||||
var low, high int
|
|
||||||
if emitter.encoding == yaml_UTF16LE_ENCODING {
|
|
||||||
low, high = 0, 1
|
|
||||||
} else {
|
|
||||||
high, low = 1, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
pos := 0
|
|
||||||
for pos < emitter.buffer_pos {
|
|
||||||
|
|
||||||
/*
|
|
||||||
* See the "reader.c" code for more details on UTF-8 encoding. Note
|
|
||||||
* that we assume that the buffer contains a valid UTF-8 sequence.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Read the next UTF-8 character. */
|
|
||||||
|
|
||||||
octet := emitter.buffer[pos]
|
|
||||||
|
|
||||||
var w int
|
|
||||||
var value rune
|
|
||||||
switch {
|
|
||||||
case octet&0x80 == 0x00:
|
|
||||||
w, value = 1, rune(octet&0x7F)
|
|
||||||
case octet&0xE0 == 0xC0:
|
|
||||||
w, value = 2, rune(octet&0x1F)
|
|
||||||
case octet&0xF0 == 0xE0:
|
|
||||||
w, value = 3, rune(octet&0x0F)
|
|
||||||
case octet&0xF8 == 0xF0:
|
|
||||||
w, value = 4, rune(octet&0x07)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k := 1; k < w; k++ {
|
|
||||||
octet = emitter.buffer[pos+k]
|
|
||||||
value = (value << 6) + (rune(octet) & 0x3F)
|
|
||||||
}
|
|
||||||
|
|
||||||
pos += w
|
|
||||||
|
|
||||||
/* Write the character. */
|
|
||||||
|
|
||||||
if value < 0x10000 {
|
|
||||||
var b [2]byte
|
|
||||||
b[high] = byte(value >> 8)
|
|
||||||
b[low] = byte(value & 0xFF)
|
|
||||||
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
|
|
||||||
} else {
|
|
||||||
/* Write the character using a surrogate pair (check "reader.c"). */
|
|
||||||
|
|
||||||
var b [4]byte
|
|
||||||
value -= 0x10000
|
|
||||||
b[high] = byte(0xD8 + (value >> 18))
|
|
||||||
b[low] = byte((value >> 10) & 0xFF)
|
|
||||||
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
|
|
||||||
b[low+2] = byte(value & 0xFF)
|
|
||||||
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Write the raw buffer. */
|
|
||||||
|
|
||||||
// Write the raw buffer.
|
|
||||||
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
|
|
||||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
emitter.buffer_pos = 0
|
|
||||||
emitter.raw_buffer = emitter.raw_buffer[:0]
|
|
||||||
return true
|
|
||||||
}
|
|
22
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go
generated
vendored
22
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
const (
|
|
||||||
yaml_VERSION_MAJOR = 0
|
|
||||||
yaml_VERSION_MINOR = 1
|
|
||||||
yaml_VERSION_PATCH = 6
|
|
||||||
yaml_VERSION_STRING = "0.1.6"
|
|
||||||
)
|
|
891
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go
generated
vendored
891
vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go
generated
vendored
@ -1,891 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
const (
|
|
||||||
INPUT_RAW_BUFFER_SIZE = 1024
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The size of the input buffer.
|
|
||||||
*
|
|
||||||
* It should be possible to decode the whole raw buffer.
|
|
||||||
*/
|
|
||||||
INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The size of the output buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
OUTPUT_BUFFER_SIZE = 512
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The size of the output raw buffer.
|
|
||||||
*
|
|
||||||
* It should be possible to encode the whole output buffer.
|
|
||||||
*/
|
|
||||||
|
|
||||||
OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2)
|
|
||||||
|
|
||||||
INITIAL_STACK_SIZE = 16
|
|
||||||
INITIAL_QUEUE_SIZE = 16
|
|
||||||
)
|
|
||||||
|
|
||||||
func width(b byte) int {
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if b&0xE0 == 0xC0 {
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
|
|
||||||
if b&0xF0 == 0xE0 {
|
|
||||||
return 3
|
|
||||||
}
|
|
||||||
|
|
||||||
if b&0xF8 == 0xF0 {
|
|
||||||
return 4
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) {
|
|
||||||
w := width(src[*src_pos])
|
|
||||||
switch w {
|
|
||||||
case 4:
|
|
||||||
dest[*dest_pos+3] = src[*src_pos+3]
|
|
||||||
fallthrough
|
|
||||||
case 3:
|
|
||||||
dest[*dest_pos+2] = src[*src_pos+2]
|
|
||||||
fallthrough
|
|
||||||
case 2:
|
|
||||||
dest[*dest_pos+1] = src[*src_pos+1]
|
|
||||||
fallthrough
|
|
||||||
case 1:
|
|
||||||
dest[*dest_pos] = src[*src_pos]
|
|
||||||
default:
|
|
||||||
panic("invalid width")
|
|
||||||
}
|
|
||||||
*dest_pos += w
|
|
||||||
*src_pos += w
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is an alphabetical
|
|
||||||
// * character, a digit, '_', or '-'.
|
|
||||||
// */
|
|
||||||
|
|
||||||
func is_alpha(b byte) bool {
|
|
||||||
return (b >= '0' && b <= '9') ||
|
|
||||||
(b >= 'A' && b <= 'Z') ||
|
|
||||||
(b >= 'a' && b <= 'z') ||
|
|
||||||
b == '_' || b == '-'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func is_digit(b byte) bool {
|
|
||||||
return b >= '0' && b <= '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Get the value of a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func as_digit(b byte) int {
|
|
||||||
return int(b) - '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func is_hex(b byte) bool {
|
|
||||||
return (b >= '0' && b <= '9') ||
|
|
||||||
(b >= 'A' && b <= 'F') ||
|
|
||||||
(b >= 'a' && b <= 'f')
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Get the value of a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func as_hex(b byte) int {
|
|
||||||
if b >= 'A' && b <= 'F' {
|
|
||||||
return int(b) - 'A' + 10
|
|
||||||
} else if b >= 'a' && b <= 'f' {
|
|
||||||
return int(b) - 'a' + 10
|
|
||||||
}
|
|
||||||
return int(b) - '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
// #define AS_HEX_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) '0'))
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break, space, tab, or NUL.
|
|
||||||
// */
|
|
||||||
func is_blankz_at(b []byte, i int) bool {
|
|
||||||
return is_blank(b[i]) || is_breakz_at(b, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a line break.
|
|
||||||
// */
|
|
||||||
func is_break_at(b []byte, i int) bool {
|
|
||||||
return b[i] == '\r' || /* CR (#xD)*/
|
|
||||||
b[i] == '\n' || /* LF (#xA) */
|
|
||||||
(b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */
|
|
||||||
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */
|
|
||||||
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */
|
|
||||||
}
|
|
||||||
|
|
||||||
func is_breakz_at(b []byte, i int) bool {
|
|
||||||
return is_break_at(b, i) || is_z(b[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
func is_crlf_at(b []byte, i int) bool {
|
|
||||||
return b[i] == '\r' && b[i+1] == '\n'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is NUL.
|
|
||||||
// */
|
|
||||||
func is_z(b byte) bool {
|
|
||||||
return b == 0x0
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is space.
|
|
||||||
// */
|
|
||||||
func is_space(b byte) bool {
|
|
||||||
return b == ' '
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is tab.
|
|
||||||
// */
|
|
||||||
func is_tab(b byte) bool {
|
|
||||||
return b == '\t'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is blank (space or tab).
|
|
||||||
// */
|
|
||||||
func is_blank(b byte) bool {
|
|
||||||
return is_space(b) || is_tab(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character is ASCII.
|
|
||||||
// */
|
|
||||||
func is_ascii(b byte) bool {
|
|
||||||
return b <= '\x7f'
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character can be printed unescaped.
|
|
||||||
// */
|
|
||||||
func is_printable_at(b []byte, i int) bool {
|
|
||||||
return ((b[i] == 0x0A) || /* . == #x0A */
|
|
||||||
(b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */
|
|
||||||
(b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */
|
|
||||||
(b[i] > 0xC2 && b[i] < 0xED) ||
|
|
||||||
(b[i] == 0xED && b[i+1] < 0xA0) ||
|
|
||||||
(b[i] == 0xEE) ||
|
|
||||||
(b[i] == 0xEF && /* && . != #xFEFF */
|
|
||||||
!(b[i+1] == 0xBB && b[i+2] == 0xBF) &&
|
|
||||||
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
|
||||||
}
|
|
||||||
|
|
||||||
func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
|
||||||
// collapse the slice
|
|
||||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
|
||||||
if parser.tokens_head != len(parser.tokens) {
|
|
||||||
// move the tokens down
|
|
||||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
|
||||||
}
|
|
||||||
// readjust the length
|
|
||||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
|
||||||
parser.tokens_head = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
parser.tokens = append(parser.tokens, *token)
|
|
||||||
if pos < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
|
||||||
parser.tokens[parser.tokens_head+pos] = *token
|
|
||||||
}
|
|
||||||
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is BOM.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
func is_bom_at(b []byte, i int) bool {
|
|
||||||
return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// #ifdef HAVE_CONFIG_H
|
|
||||||
// #include <config.h>
|
|
||||||
// #endif
|
|
||||||
//
|
|
||||||
// #include "./yaml.h"
|
|
||||||
//
|
|
||||||
// #include <assert.h>
|
|
||||||
// #include <limits.h>
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Memory management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void *)
|
|
||||||
// yaml_malloc(size_t size);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void *)
|
|
||||||
// yaml_realloc(void *ptr, size_t size);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(void)
|
|
||||||
// yaml_free(void *ptr);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(yaml_char_t *)
|
|
||||||
// yaml_strdup(const yaml_char_t *);
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Reader: Ensure that the buffer contains at least `length` characters.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length);
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Scanner: Ensure that the token stack contains at least one token ready.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_parser_fetch_more_tokens(yaml_parser_t *parser);
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the input raw buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define INPUT_RAW_BUFFER_SIZE 16384
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the input buffer.
|
|
||||||
// *
|
|
||||||
// * It should be possible to decode the whole raw buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the output buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define OUTPUT_BUFFER_SIZE 16384
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of the output raw buffer.
|
|
||||||
// *
|
|
||||||
// * It should be possible to encode the whole output buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * The size of other stacks and queues.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define INITIAL_STACK_SIZE 16
|
|
||||||
// #define INITIAL_QUEUE_SIZE 16
|
|
||||||
// #define INITIAL_STRING_SIZE 16
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Buffer management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define BUFFER_INIT(context,buffer,size) \
|
|
||||||
// (((buffer).start = yaml_malloc(size)) ? \
|
|
||||||
// ((buffer).last = (buffer).pointer = (buffer).start, \
|
|
||||||
// (buffer).end = (buffer).start+(size), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define BUFFER_DEL(context,buffer) \
|
|
||||||
// (yaml_free((buffer).start), \
|
|
||||||
// (buffer).start = (buffer).pointer = (buffer).end = 0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * String management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// typedef struct {
|
|
||||||
// yaml_char_t *start;
|
|
||||||
// yaml_char_t *end;
|
|
||||||
// yaml_char_t *pointer;
|
|
||||||
// } yaml_string_t;
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_string_extend(yaml_char_t **start,
|
|
||||||
// yaml_char_t **pointer, yaml_char_t **end);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_string_join(
|
|
||||||
// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end,
|
|
||||||
// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end);
|
|
||||||
//
|
|
||||||
// #define NULL_STRING { NULL, NULL, NULL }
|
|
||||||
//
|
|
||||||
// #define STRING(string,length) { (string), (string)+(length), (string) }
|
|
||||||
//
|
|
||||||
// #define STRING_ASSIGN(value,string,length) \
|
|
||||||
// ((value).start = (string), \
|
|
||||||
// (value).end = (string)+(length), \
|
|
||||||
// (value).pointer = (string))
|
|
||||||
//
|
|
||||||
// #define STRING_INIT(context,string,size) \
|
|
||||||
// (((string).start = yaml_malloc(size)) ? \
|
|
||||||
// ((string).pointer = (string).start, \
|
|
||||||
// (string).end = (string).start+(size), \
|
|
||||||
// memset((string).start, 0, (size)), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define STRING_DEL(context,string) \
|
|
||||||
// (yaml_free((string).start), \
|
|
||||||
// (string).start = (string).pointer = (string).end = 0)
|
|
||||||
//
|
|
||||||
// #define STRING_EXTEND(context,string) \
|
|
||||||
// (((string).pointer+5 < (string).end) \
|
|
||||||
// || yaml_string_extend(&(string).start, \
|
|
||||||
// &(string).pointer, &(string).end))
|
|
||||||
//
|
|
||||||
// #define CLEAR(context,string) \
|
|
||||||
// ((string).pointer = (string).start, \
|
|
||||||
// memset((string).start, 0, (string).end-(string).start))
|
|
||||||
//
|
|
||||||
// #define JOIN(context,string_a,string_b) \
|
|
||||||
// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \
|
|
||||||
// &(string_a).end, &(string_b).start, \
|
|
||||||
// &(string_b).pointer, &(string_b).end)) ? \
|
|
||||||
// ((string_b).pointer = (string_b).start, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * String check operations.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check the octet at the specified position.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define CHECK_AT(string,octet,offset) \
|
|
||||||
// ((string).pointer[offset] == (yaml_char_t)(octet))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check the current octet in the buffer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define CHECK(string,octet) CHECK_AT((string),(octet),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is an alphabetical
|
|
||||||
// * character, a digit, '_', or '-'.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_ALPHA_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) '9') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'Z') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'z') || \
|
|
||||||
// (string).pointer[offset] == '_' || \
|
|
||||||
// (string).pointer[offset] == '-')
|
|
||||||
//
|
|
||||||
// #define IS_ALPHA(string) IS_ALPHA_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_DIGIT_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) '9'))
|
|
||||||
//
|
|
||||||
// #define IS_DIGIT(string) IS_DIGIT_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Get the value of a digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define AS_DIGIT_AT(string,offset) \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) '0')
|
|
||||||
//
|
|
||||||
// #define AS_DIGIT(string) AS_DIGIT_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_HEX_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) '9') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'F') || \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'f'))
|
|
||||||
//
|
|
||||||
// #define IS_HEX(string) IS_HEX_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Get the value of a hex-digit.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define AS_HEX_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
|
|
||||||
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
|
|
||||||
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
|
|
||||||
// ((string).pointer[offset] - (yaml_char_t) '0'))
|
|
||||||
//
|
|
||||||
// #define AS_HEX(string) AS_HEX_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is ASCII.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_ASCII_AT(string,offset) \
|
|
||||||
// ((string).pointer[offset] <= (yaml_char_t) '\x7F')
|
|
||||||
//
|
|
||||||
// #define IS_ASCII(string) IS_ASCII_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character can be printed unescaped.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_PRINTABLE_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \
|
|
||||||
// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \
|
|
||||||
// && (string).pointer[offset] <= 0x7E) \
|
|
||||||
// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \
|
|
||||||
// && (string).pointer[offset+1] >= 0xA0) \
|
|
||||||
// || ((string).pointer[offset] > 0xC2 \
|
|
||||||
// && (string).pointer[offset] < 0xED) \
|
|
||||||
// || ((string).pointer[offset] == 0xED \
|
|
||||||
// && (string).pointer[offset+1] < 0xA0) \
|
|
||||||
// || ((string).pointer[offset] == 0xEE) \
|
|
||||||
// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \
|
|
||||||
// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \
|
|
||||||
// && (string).pointer[offset+2] == 0xBF) \
|
|
||||||
// && !((string).pointer[offset+1] == 0xBF \
|
|
||||||
// && ((string).pointer[offset+2] == 0xBE \
|
|
||||||
// || (string).pointer[offset+2] == 0xBF))))
|
|
||||||
//
|
|
||||||
// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset))
|
|
||||||
//
|
|
||||||
// #define IS_Z(string) IS_Z_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is BOM.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BOM_AT(string,offset) \
|
|
||||||
// (CHECK_AT((string),'\xEF',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\xBB',(offset)+1) \
|
|
||||||
// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */
|
|
||||||
//
|
|
||||||
// #define IS_BOM(string) IS_BOM_AT(string,0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is space.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset))
|
|
||||||
//
|
|
||||||
// #define IS_SPACE(string) IS_SPACE_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is tab.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset))
|
|
||||||
//
|
|
||||||
// #define IS_TAB(string) IS_TAB_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is blank (space or tab).
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BLANK_AT(string,offset) \
|
|
||||||
// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_BLANK(string) IS_BLANK_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character at the specified position is a line break.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BREAK_AT(string,offset) \
|
|
||||||
// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \
|
|
||||||
// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \
|
|
||||||
// || (CHECK_AT((string),'\xC2',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \
|
|
||||||
// || (CHECK_AT((string),'\xE2',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\x80',(offset)+1) \
|
|
||||||
// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \
|
|
||||||
// || (CHECK_AT((string),'\xE2',(offset)) \
|
|
||||||
// && CHECK_AT((string),'\x80',(offset)+1) \
|
|
||||||
// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */
|
|
||||||
//
|
|
||||||
// #define IS_BREAK(string) IS_BREAK_AT((string),0)
|
|
||||||
//
|
|
||||||
// #define IS_CRLF_AT(string,offset) \
|
|
||||||
// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1))
|
|
||||||
//
|
|
||||||
// #define IS_CRLF(string) IS_CRLF_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break or NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BREAKZ_AT(string,offset) \
|
|
||||||
// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break, space, or NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_SPACEZ_AT(string,offset) \
|
|
||||||
// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Check if the character is a line break, space, tab, or NUL.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define IS_BLANKZ_AT(string,offset) \
|
|
||||||
// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
|
|
||||||
//
|
|
||||||
// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Determine the width of the character.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define WIDTH_AT(string,offset) \
|
|
||||||
// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \
|
|
||||||
// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \
|
|
||||||
// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \
|
|
||||||
// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0)
|
|
||||||
//
|
|
||||||
// #define WIDTH(string) WIDTH_AT((string),0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Move the string pointer to the next character.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define MOVE(string) ((string).pointer += WIDTH((string)))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Copy a character and move the pointers of both strings.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define COPY(string_a,string_b) \
|
|
||||||
// ((*(string_b).pointer & 0x80) == 0x00 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++)) : \
|
|
||||||
// (*(string_b).pointer & 0xE0) == 0xC0 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++)) : \
|
|
||||||
// (*(string_b).pointer & 0xF0) == 0xE0 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++)) : \
|
|
||||||
// (*(string_b).pointer & 0xF8) == 0xF0 ? \
|
|
||||||
// (*((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++), \
|
|
||||||
// *((string_a).pointer++) = *((string_b).pointer++)) : 0)
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Stack and queue management.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_stack_extend(void **start, void **top, void **end);
|
|
||||||
//
|
|
||||||
// yaml_DECLARE(int)
|
|
||||||
// yaml_queue_extend(void **start, void **head, void **tail, void **end);
|
|
||||||
//
|
|
||||||
// #define STACK_INIT(context,stack,size) \
|
|
||||||
// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \
|
|
||||||
// ((stack).top = (stack).start, \
|
|
||||||
// (stack).end = (stack).start+(size), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define STACK_DEL(context,stack) \
|
|
||||||
// (yaml_free((stack).start), \
|
|
||||||
// (stack).start = (stack).top = (stack).end = 0)
|
|
||||||
//
|
|
||||||
// #define STACK_EMPTY(context,stack) \
|
|
||||||
// ((stack).start == (stack).top)
|
|
||||||
//
|
|
||||||
// #define PUSH(context,stack,value) \
|
|
||||||
// (((stack).top != (stack).end \
|
|
||||||
// || yaml_stack_extend((void **)&(stack).start, \
|
|
||||||
// (void **)&(stack).top, (void **)&(stack).end)) ? \
|
|
||||||
// (*((stack).top++) = value, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define POP(context,stack) \
|
|
||||||
// (*(--(stack).top))
|
|
||||||
//
|
|
||||||
// #define QUEUE_INIT(context,queue,size) \
|
|
||||||
// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \
|
|
||||||
// ((queue).head = (queue).tail = (queue).start, \
|
|
||||||
// (queue).end = (queue).start+(size), \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define QUEUE_DEL(context,queue) \
|
|
||||||
// (yaml_free((queue).start), \
|
|
||||||
// (queue).start = (queue).head = (queue).tail = (queue).end = 0)
|
|
||||||
//
|
|
||||||
// #define QUEUE_EMPTY(context,queue) \
|
|
||||||
// ((queue).head == (queue).tail)
|
|
||||||
//
|
|
||||||
// #define ENQUEUE(context,queue,value) \
|
|
||||||
// (((queue).tail != (queue).end \
|
|
||||||
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
|
|
||||||
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
|
|
||||||
// (*((queue).tail++) = value, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// #define DEQUEUE(context,queue) \
|
|
||||||
// (*((queue).head++))
|
|
||||||
//
|
|
||||||
// #define QUEUE_INSERT(context,queue,index,value) \
|
|
||||||
// (((queue).tail != (queue).end \
|
|
||||||
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
|
|
||||||
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
|
|
||||||
// (memmove((queue).head+(index)+1,(queue).head+(index), \
|
|
||||||
// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \
|
|
||||||
// *((queue).head+(index)) = value, \
|
|
||||||
// (queue).tail++, \
|
|
||||||
// 1) : \
|
|
||||||
// ((context)->error = yaml_MEMORY_ERROR, \
|
|
||||||
// 0))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Token initializers.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \
|
|
||||||
// (memset(&(token), 0, sizeof(yaml_token_t)), \
|
|
||||||
// (token).type = (token_type), \
|
|
||||||
// (token).start_mark = (token_start_mark), \
|
|
||||||
// (token).end_mark = (token_end_mark))
|
|
||||||
//
|
|
||||||
// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.stream_start.encoding = (token_encoding))
|
|
||||||
//
|
|
||||||
// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.alias.value = (token_value))
|
|
||||||
//
|
|
||||||
// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.anchor.value = (token_value))
|
|
||||||
//
|
|
||||||
// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.tag.handle = (token_handle), \
|
|
||||||
// (token).data.tag.suffix = (token_suffix))
|
|
||||||
//
|
|
||||||
// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.scalar.value = (token_value), \
|
|
||||||
// (token).data.scalar.length = (token_length), \
|
|
||||||
// (token).data.scalar.style = (token_style))
|
|
||||||
//
|
|
||||||
// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.version_directive.major = (token_major), \
|
|
||||||
// (token).data.version_directive.minor = (token_minor))
|
|
||||||
//
|
|
||||||
// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \
|
|
||||||
// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
|
|
||||||
// (token).data.tag_directive.handle = (token_handle), \
|
|
||||||
// (token).data.tag_directive.prefix = (token_prefix))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Event initializers.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \
|
|
||||||
// (memset(&(event), 0, sizeof(yaml_event_t)), \
|
|
||||||
// (event).type = (event_type), \
|
|
||||||
// (event).start_mark = (event_start_mark), \
|
|
||||||
// (event).end_mark = (event_end_mark))
|
|
||||||
//
|
|
||||||
// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.stream_start.encoding = (event_encoding))
|
|
||||||
//
|
|
||||||
// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \
|
|
||||||
// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.document_start.version_directive = (event_version_directive), \
|
|
||||||
// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \
|
|
||||||
// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \
|
|
||||||
// (event).data.document_start.implicit = (event_implicit))
|
|
||||||
//
|
|
||||||
// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.document_end.implicit = (event_implicit))
|
|
||||||
//
|
|
||||||
// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.alias.anchor = (event_anchor))
|
|
||||||
//
|
|
||||||
// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \
|
|
||||||
// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.scalar.anchor = (event_anchor), \
|
|
||||||
// (event).data.scalar.tag = (event_tag), \
|
|
||||||
// (event).data.scalar.value = (event_value), \
|
|
||||||
// (event).data.scalar.length = (event_length), \
|
|
||||||
// (event).data.scalar.plain_implicit = (event_plain_implicit), \
|
|
||||||
// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \
|
|
||||||
// (event).data.scalar.style = (event_style))
|
|
||||||
//
|
|
||||||
// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \
|
|
||||||
// event_implicit,event_style,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.sequence_start.anchor = (event_anchor), \
|
|
||||||
// (event).data.sequence_start.tag = (event_tag), \
|
|
||||||
// (event).data.sequence_start.implicit = (event_implicit), \
|
|
||||||
// (event).data.sequence_start.style = (event_style))
|
|
||||||
//
|
|
||||||
// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \
|
|
||||||
// event_implicit,event_style,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \
|
|
||||||
// (event).data.mapping_start.anchor = (event_anchor), \
|
|
||||||
// (event).data.mapping_start.tag = (event_tag), \
|
|
||||||
// (event).data.mapping_start.implicit = (event_implicit), \
|
|
||||||
// (event).data.mapping_start.style = (event_style))
|
|
||||||
//
|
|
||||||
// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \
|
|
||||||
// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark)))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Document initializer.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \
|
|
||||||
// document_version_directive,document_tag_directives_start, \
|
|
||||||
// document_tag_directives_end,document_start_implicit, \
|
|
||||||
// document_end_implicit,document_start_mark,document_end_mark) \
|
|
||||||
// (memset(&(document), 0, sizeof(yaml_document_t)), \
|
|
||||||
// (document).nodes.start = (document_nodes_start), \
|
|
||||||
// (document).nodes.end = (document_nodes_end), \
|
|
||||||
// (document).nodes.top = (document_nodes_start), \
|
|
||||||
// (document).version_directive = (document_version_directive), \
|
|
||||||
// (document).tag_directives.start = (document_tag_directives_start), \
|
|
||||||
// (document).tag_directives.end = (document_tag_directives_end), \
|
|
||||||
// (document).start_implicit = (document_start_implicit), \
|
|
||||||
// (document).end_implicit = (document_end_implicit), \
|
|
||||||
// (document).start_mark = (document_start_mark), \
|
|
||||||
// (document).end_mark = (document_end_mark))
|
|
||||||
//
|
|
||||||
// /*
|
|
||||||
// * Node initializers.
|
|
||||||
// */
|
|
||||||
//
|
|
||||||
// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \
|
|
||||||
// (memset(&(node), 0, sizeof(yaml_node_t)), \
|
|
||||||
// (node).type = (node_type), \
|
|
||||||
// (node).tag = (node_tag), \
|
|
||||||
// (node).start_mark = (node_start_mark), \
|
|
||||||
// (node).end_mark = (node_end_mark))
|
|
||||||
//
|
|
||||||
// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \
|
|
||||||
// node_style,start_mark,end_mark) \
|
|
||||||
// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \
|
|
||||||
// (node).data.scalar.value = (node_value), \
|
|
||||||
// (node).data.scalar.length = (node_length), \
|
|
||||||
// (node).data.scalar.style = (node_style))
|
|
||||||
//
|
|
||||||
// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \
|
|
||||||
// node_style,start_mark,end_mark) \
|
|
||||||
// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \
|
|
||||||
// (node).data.sequence.items.start = (node_items_start), \
|
|
||||||
// (node).data.sequence.items.end = (node_items_end), \
|
|
||||||
// (node).data.sequence.items.top = (node_items_start), \
|
|
||||||
// (node).data.sequence.style = (node_style))
|
|
||||||
//
|
|
||||||
// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \
|
|
||||||
// node_style,start_mark,end_mark) \
|
|
||||||
// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \
|
|
||||||
// (node).data.mapping.pairs.start = (node_pairs_start), \
|
|
||||||
// (node).data.mapping.pairs.end = (node_pairs_end), \
|
|
||||||
// (node).data.mapping.pairs.top = (node_pairs_start), \
|
|
||||||
// (node).data.mapping.style = (node_style))
|
|
||||||
//
|
|
953
vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go
generated
vendored
953
vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go
generated
vendored
@ -1,953 +0,0 @@
|
|||||||
/*
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package candiedyaml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The version directive data. */
|
|
||||||
type yaml_version_directive_t struct {
|
|
||||||
major int // The major version number
|
|
||||||
minor int // The minor version number
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The tag directive data. */
|
|
||||||
type yaml_tag_directive_t struct {
|
|
||||||
handle []byte // The tag handle
|
|
||||||
prefix []byte // The tag prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The stream encoding. */
|
|
||||||
type yaml_encoding_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the parser choose the encoding. */
|
|
||||||
yaml_ANY_ENCODING yaml_encoding_t = iota
|
|
||||||
/** The defau lt UTF-8 encoding. */
|
|
||||||
yaml_UTF8_ENCODING
|
|
||||||
/** The UTF-16-LE encoding with BOM. */
|
|
||||||
yaml_UTF16LE_ENCODING
|
|
||||||
/** The UTF-16-BE encoding with BOM. */
|
|
||||||
yaml_UTF16BE_ENCODING
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Line break types. */
|
|
||||||
type yaml_break_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */
|
|
||||||
yaml_CR_BREAK /** Use CR for line breaks (Mac style). */
|
|
||||||
yaml_LN_BREAK /** Use LN for line breaks (Unix style). */
|
|
||||||
yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Many bad things could happen with the parser and emitter. */
|
|
||||||
type YAML_error_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** No error is produced. */
|
|
||||||
yaml_NO_ERROR YAML_error_type_t = iota
|
|
||||||
|
|
||||||
/** Cannot allocate or reallocate a block of memory. */
|
|
||||||
yaml_MEMORY_ERROR
|
|
||||||
|
|
||||||
/** Cannot read or decode the input stream. */
|
|
||||||
yaml_READER_ERROR
|
|
||||||
/** Cannot scan the input stream. */
|
|
||||||
yaml_SCANNER_ERROR
|
|
||||||
/** Cannot parse the input stream. */
|
|
||||||
yaml_PARSER_ERROR
|
|
||||||
/** Cannot compose a YAML document. */
|
|
||||||
yaml_COMPOSER_ERROR
|
|
||||||
|
|
||||||
/** Cannot write to the output stream. */
|
|
||||||
yaml_WRITER_ERROR
|
|
||||||
/** Cannot emit a YAML stream. */
|
|
||||||
yaml_EMITTER_ERROR
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The pointer position. */
|
|
||||||
type YAML_mark_t struct {
|
|
||||||
/** The position index. */
|
|
||||||
index int
|
|
||||||
|
|
||||||
/** The position line. */
|
|
||||||
line int
|
|
||||||
|
|
||||||
/** The position column. */
|
|
||||||
column int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m YAML_mark_t) String() string {
|
|
||||||
return fmt.Sprintf("line %d, column %d", m.line, m.column)
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup styles Node Styles
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_style_t int
|
|
||||||
|
|
||||||
/** Scalar styles. */
|
|
||||||
type yaml_scalar_style_t yaml_style_t
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the emitter choose the style. */
|
|
||||||
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
|
|
||||||
|
|
||||||
/** The plain scalar style. */
|
|
||||||
yaml_PLAIN_SCALAR_STYLE
|
|
||||||
|
|
||||||
/** The single-quoted scalar style. */
|
|
||||||
yaml_SINGLE_QUOTED_SCALAR_STYLE
|
|
||||||
/** The double-quoted scalar style. */
|
|
||||||
yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
|
||||||
|
|
||||||
/** The literal scalar style. */
|
|
||||||
yaml_LITERAL_SCALAR_STYLE
|
|
||||||
/** The folded scalar style. */
|
|
||||||
yaml_FOLDED_SCALAR_STYLE
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Sequence styles. */
|
|
||||||
type yaml_sequence_style_t yaml_style_t
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the emitter choose the style. */
|
|
||||||
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
|
||||||
|
|
||||||
/** The block sequence style. */
|
|
||||||
yaml_BLOCK_SEQUENCE_STYLE
|
|
||||||
/** The flow sequence style. */
|
|
||||||
yaml_FLOW_SEQUENCE_STYLE
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Mapping styles. */
|
|
||||||
type yaml_mapping_style_t yaml_style_t
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Let the emitter choose the style. */
|
|
||||||
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
|
||||||
|
|
||||||
/** The block mapping style. */
|
|
||||||
yaml_BLOCK_MAPPING_STYLE
|
|
||||||
/** The flow mapping style. */
|
|
||||||
yaml_FLOW_MAPPING_STYLE
|
|
||||||
|
|
||||||
/* yaml_FLOW_SET_MAPPING_STYLE */
|
|
||||||
)
|
|
||||||
|
|
||||||
/** @} */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup tokens Tokens
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Token types. */
|
|
||||||
type yaml_token_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** An empty token. */
|
|
||||||
yaml_NO_TOKEN yaml_token_type_t = iota
|
|
||||||
|
|
||||||
/** A STREAM-START token. */
|
|
||||||
yaml_STREAM_START_TOKEN
|
|
||||||
/** A STREAM-END token. */
|
|
||||||
yaml_STREAM_END_TOKEN
|
|
||||||
|
|
||||||
/** A VERSION-DIRECTIVE token. */
|
|
||||||
yaml_VERSION_DIRECTIVE_TOKEN
|
|
||||||
/** A TAG-DIRECTIVE token. */
|
|
||||||
yaml_TAG_DIRECTIVE_TOKEN
|
|
||||||
/** A DOCUMENT-START token. */
|
|
||||||
yaml_DOCUMENT_START_TOKEN
|
|
||||||
/** A DOCUMENT-END token. */
|
|
||||||
yaml_DOCUMENT_END_TOKEN
|
|
||||||
|
|
||||||
/** A BLOCK-SEQUENCE-START token. */
|
|
||||||
yaml_BLOCK_SEQUENCE_START_TOKEN
|
|
||||||
/** A BLOCK-SEQUENCE-END token. */
|
|
||||||
yaml_BLOCK_MAPPING_START_TOKEN
|
|
||||||
/** A BLOCK-END token. */
|
|
||||||
yaml_BLOCK_END_TOKEN
|
|
||||||
|
|
||||||
/** A FLOW-SEQUENCE-START token. */
|
|
||||||
yaml_FLOW_SEQUENCE_START_TOKEN
|
|
||||||
/** A FLOW-SEQUENCE-END token. */
|
|
||||||
yaml_FLOW_SEQUENCE_END_TOKEN
|
|
||||||
/** A FLOW-MAPPING-START token. */
|
|
||||||
yaml_FLOW_MAPPING_START_TOKEN
|
|
||||||
/** A FLOW-MAPPING-END token. */
|
|
||||||
yaml_FLOW_MAPPING_END_TOKEN
|
|
||||||
|
|
||||||
/** A BLOCK-ENTRY token. */
|
|
||||||
yaml_BLOCK_ENTRY_TOKEN
|
|
||||||
/** A FLOW-ENTRY token. */
|
|
||||||
yaml_FLOW_ENTRY_TOKEN
|
|
||||||
/** A KEY token. */
|
|
||||||
yaml_KEY_TOKEN
|
|
||||||
/** A VALUE token. */
|
|
||||||
yaml_VALUE_TOKEN
|
|
||||||
|
|
||||||
/** An ALIAS token. */
|
|
||||||
yaml_ALIAS_TOKEN
|
|
||||||
/** An ANCHOR token. */
|
|
||||||
yaml_ANCHOR_TOKEN
|
|
||||||
/** A TAG token. */
|
|
||||||
yaml_TAG_TOKEN
|
|
||||||
/** A SCALAR token. */
|
|
||||||
yaml_SCALAR_TOKEN
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The token structure. */
|
|
||||||
type yaml_token_t struct {
|
|
||||||
|
|
||||||
/** The token type. */
|
|
||||||
token_type yaml_token_type_t
|
|
||||||
|
|
||||||
/** The token data. */
|
|
||||||
/** The stream start (for @c yaml_STREAM_START_TOKEN). */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */
|
|
||||||
/** The anchor (for @c ). */
|
|
||||||
/** The scalar value (for @c ). */
|
|
||||||
value []byte
|
|
||||||
|
|
||||||
/** The tag suffix. */
|
|
||||||
suffix []byte
|
|
||||||
|
|
||||||
/** The scalar value (for @c yaml_SCALAR_TOKEN). */
|
|
||||||
/** The scalar style. */
|
|
||||||
style yaml_scalar_style_t
|
|
||||||
|
|
||||||
/** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */
|
|
||||||
version_directive yaml_version_directive_t
|
|
||||||
|
|
||||||
/** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */
|
|
||||||
prefix []byte
|
|
||||||
|
|
||||||
/** The beginning of the token. */
|
|
||||||
start_mark YAML_mark_t
|
|
||||||
/** The end of the token. */
|
|
||||||
end_mark YAML_mark_t
|
|
||||||
|
|
||||||
major, minor int
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup events Events
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Event types. */
|
|
||||||
type yaml_event_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** An empty event. */
|
|
||||||
yaml_NO_EVENT yaml_event_type_t = iota
|
|
||||||
|
|
||||||
/** A STREAM-START event. */
|
|
||||||
yaml_STREAM_START_EVENT
|
|
||||||
/** A STREAM-END event. */
|
|
||||||
yaml_STREAM_END_EVENT
|
|
||||||
|
|
||||||
/** A DOCUMENT-START event. */
|
|
||||||
yaml_DOCUMENT_START_EVENT
|
|
||||||
/** A DOCUMENT-END event. */
|
|
||||||
yaml_DOCUMENT_END_EVENT
|
|
||||||
|
|
||||||
/** An ALIAS event. */
|
|
||||||
yaml_ALIAS_EVENT
|
|
||||||
/** A SCALAR event. */
|
|
||||||
yaml_SCALAR_EVENT
|
|
||||||
|
|
||||||
/** A SEQUENCE-START event. */
|
|
||||||
yaml_SEQUENCE_START_EVENT
|
|
||||||
/** A SEQUENCE-END event. */
|
|
||||||
yaml_SEQUENCE_END_EVENT
|
|
||||||
|
|
||||||
/** A MAPPING-START event. */
|
|
||||||
yaml_MAPPING_START_EVENT
|
|
||||||
/** A MAPPING-END event. */
|
|
||||||
yaml_MAPPING_END_EVENT
|
|
||||||
)
|
|
||||||
|
|
||||||
/** The event structure. */
|
|
||||||
type yaml_event_t struct {
|
|
||||||
|
|
||||||
/** The event type. */
|
|
||||||
event_type yaml_event_type_t
|
|
||||||
|
|
||||||
/** The stream parameters (for @c yaml_STREAM_START_EVENT). */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */
|
|
||||||
version_directive *yaml_version_directive_t
|
|
||||||
|
|
||||||
/** The beginning and end of the tag directives list. */
|
|
||||||
tag_directives []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */
|
|
||||||
/** Is the document indicator implicit? */
|
|
||||||
implicit bool
|
|
||||||
|
|
||||||
/** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
|
|
||||||
/** The anchor. */
|
|
||||||
anchor []byte
|
|
||||||
|
|
||||||
/** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
|
|
||||||
/** The tag. */
|
|
||||||
tag []byte
|
|
||||||
/** The scalar value. */
|
|
||||||
value []byte
|
|
||||||
|
|
||||||
/** Is the tag optional for the plain style? */
|
|
||||||
plain_implicit bool
|
|
||||||
/** Is the tag optional for any non-plain style? */
|
|
||||||
quoted_implicit bool
|
|
||||||
|
|
||||||
/** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
|
|
||||||
/** The sequence style. */
|
|
||||||
/** The scalar style. */
|
|
||||||
style yaml_style_t
|
|
||||||
|
|
||||||
/** The beginning of the event. */
|
|
||||||
start_mark, end_mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @defgroup nodes Nodes
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** The tag @c !!null with the only possible value: @c null. */
|
|
||||||
yaml_NULL_TAG = "tag:yaml.org,2002:null"
|
|
||||||
/** The tag @c !!bool with the values: @c true and @c falce. */
|
|
||||||
yaml_BOOL_TAG = "tag:yaml.org,2002:bool"
|
|
||||||
/** The tag @c !!str for string values. */
|
|
||||||
yaml_STR_TAG = "tag:yaml.org,2002:str"
|
|
||||||
/** The tag @c !!int for integer values. */
|
|
||||||
yaml_INT_TAG = "tag:yaml.org,2002:int"
|
|
||||||
/** The tag @c !!float for float values. */
|
|
||||||
yaml_FLOAT_TAG = "tag:yaml.org,2002:float"
|
|
||||||
/** The tag @c !!timestamp for date and time values. */
|
|
||||||
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp"
|
|
||||||
|
|
||||||
/** The tag @c !!seq is used to denote sequences. */
|
|
||||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq"
|
|
||||||
/** The tag @c !!map is used to denote mapping. */
|
|
||||||
yaml_MAP_TAG = "tag:yaml.org,2002:map"
|
|
||||||
|
|
||||||
/** The default scalar tag is @c !!str. */
|
|
||||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG
|
|
||||||
/** The default sequence tag is @c !!seq. */
|
|
||||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG
|
|
||||||
/** The default mapping tag is @c !!map. */
|
|
||||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG
|
|
||||||
|
|
||||||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
|
||||||
)
|
|
||||||
|
|
||||||
/** Node types. */
|
|
||||||
type yaml_node_type_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** An empty node. */
|
|
||||||
yaml_NO_NODE yaml_node_type_t = iota
|
|
||||||
|
|
||||||
/** A scalar node. */
|
|
||||||
yaml_SCALAR_NODE
|
|
||||||
/** A sequence node. */
|
|
||||||
yaml_SEQUENCE_NODE
|
|
||||||
/** A mapping node. */
|
|
||||||
yaml_MAPPING_NODE
|
|
||||||
)
|
|
||||||
|
|
||||||
/** An element of a sequence node. */
|
|
||||||
type yaml_node_item_t int
|
|
||||||
|
|
||||||
/** An element of a mapping node. */
|
|
||||||
type yaml_node_pair_t struct {
|
|
||||||
/** The key of the element. */
|
|
||||||
key int
|
|
||||||
/** The value of the element. */
|
|
||||||
value int
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The node structure. */
|
|
||||||
type yaml_node_t struct {
|
|
||||||
|
|
||||||
/** The node type. */
|
|
||||||
node_type yaml_node_type_t
|
|
||||||
|
|
||||||
/** The node tag. */
|
|
||||||
tag []byte
|
|
||||||
|
|
||||||
/** The scalar parameters (for @c yaml_SCALAR_NODE). */
|
|
||||||
scalar struct {
|
|
||||||
/** The scalar value. */
|
|
||||||
value []byte
|
|
||||||
/** The scalar style. */
|
|
||||||
style yaml_scalar_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The sequence parameters (for @c yaml_SEQUENCE_NODE). */
|
|
||||||
sequence struct {
|
|
||||||
/** The stack of sequence items. */
|
|
||||||
items []yaml_node_item_t
|
|
||||||
/** The sequence style. */
|
|
||||||
style yaml_sequence_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The mapping parameters (for @c yaml_MAPPING_NODE). */
|
|
||||||
mapping struct {
|
|
||||||
/** The stack of mapping pairs (key, value). */
|
|
||||||
pairs []yaml_node_pair_t
|
|
||||||
/** The mapping style. */
|
|
||||||
style yaml_mapping_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The beginning of the node. */
|
|
||||||
start_mark YAML_mark_t
|
|
||||||
/** The end of the node. */
|
|
||||||
end_mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The document structure. */
|
|
||||||
type yaml_document_t struct {
|
|
||||||
|
|
||||||
/** The document nodes. */
|
|
||||||
nodes []yaml_node_t
|
|
||||||
|
|
||||||
/** The version directive. */
|
|
||||||
version_directive *yaml_version_directive_t
|
|
||||||
|
|
||||||
/** The list of tag directives. */
|
|
||||||
tags []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/** Is the document start indicator implicit? */
|
|
||||||
start_implicit bool
|
|
||||||
/** Is the document end indicator implicit? */
|
|
||||||
end_implicit bool
|
|
||||||
|
|
||||||
/** The beginning of the document. */
|
|
||||||
start_mark YAML_mark_t
|
|
||||||
/** The end of the document. */
|
|
||||||
end_mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The prototype of a read handler.
|
|
||||||
*
|
|
||||||
* The read handler is called when the parser needs to read more bytes from the
|
|
||||||
* source. The handler should write not more than @a size bytes to the @a
|
|
||||||
* buffer. The number of written bytes should be set to the @a length variable.
|
|
||||||
*
|
|
||||||
* @param[in,out] data A pointer to an application data specified by
|
|
||||||
* yaml_parser_set_input().
|
|
||||||
* @param[out] buffer The buffer to write the data from the source.
|
|
||||||
* @param[in] size The size of the buffer.
|
|
||||||
* @param[out] size_read The actual number of bytes read from the source.
|
|
||||||
*
|
|
||||||
* @returns On success, the handler should return @c 1. If the handler failed,
|
|
||||||
* the returned value should be @c 0. On EOF, the handler should set the
|
|
||||||
* @a size_read to @c 0 and return @c 1.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This structure holds information about a potential simple key.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_simple_key_t struct {
|
|
||||||
/** Is a simple key possible? */
|
|
||||||
possible bool
|
|
||||||
|
|
||||||
/** Is a simple key required? */
|
|
||||||
required bool
|
|
||||||
|
|
||||||
/** The number of the token. */
|
|
||||||
token_number int
|
|
||||||
|
|
||||||
/** The position mark. */
|
|
||||||
mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The states of the parser.
|
|
||||||
*/
|
|
||||||
type yaml_parser_state_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Expect STREAM-START. */
|
|
||||||
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
|
||||||
/** Expect the beginning of an implicit document. */
|
|
||||||
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
|
|
||||||
/** Expect DOCUMENT-START. */
|
|
||||||
yaml_PARSE_DOCUMENT_START_STATE
|
|
||||||
/** Expect the content of a document. */
|
|
||||||
yaml_PARSE_DOCUMENT_CONTENT_STATE
|
|
||||||
/** Expect DOCUMENT-END. */
|
|
||||||
yaml_PARSE_DOCUMENT_END_STATE
|
|
||||||
/** Expect a block node. */
|
|
||||||
yaml_PARSE_BLOCK_NODE_STATE
|
|
||||||
/** Expect a block node or indentless sequence. */
|
|
||||||
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE
|
|
||||||
/** Expect a flow node. */
|
|
||||||
yaml_PARSE_FLOW_NODE_STATE
|
|
||||||
/** Expect the first entry of a block sequence. */
|
|
||||||
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
|
|
||||||
/** Expect an entry of a block sequence. */
|
|
||||||
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
|
|
||||||
/** Expect an entry of an indentless sequence. */
|
|
||||||
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
|
|
||||||
/** Expect the first key of a block mapping. */
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect a block mapping key. */
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_KEY_STATE
|
|
||||||
/** Expect a block mapping value. */
|
|
||||||
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
|
|
||||||
/** Expect the first entry of a flow sequence. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
|
|
||||||
/** Expect an entry of a flow sequence. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
|
|
||||||
/** Expect a key of an ordered mapping. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value of an ordered mapping. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
|
|
||||||
/** Expect the and of an ordered mapping entry. */
|
|
||||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
|
|
||||||
/** Expect the first key of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect a key of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_VALUE_STATE
|
|
||||||
/** Expect an empty value of a flow mapping. */
|
|
||||||
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE
|
|
||||||
/** Expect nothing. */
|
|
||||||
yaml_PARSE_END_STATE
|
|
||||||
)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This structure holds aliases data.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_alias_data_t struct {
|
|
||||||
/** The anchor. */
|
|
||||||
anchor []byte
|
|
||||||
/** The node id. */
|
|
||||||
index int
|
|
||||||
/** The anchor mark. */
|
|
||||||
mark YAML_mark_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The parser structure.
|
|
||||||
*
|
|
||||||
* All members are internal. Manage the structure using the @c yaml_parser_
|
|
||||||
* family of functions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_parser_t struct {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Error handling
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Error type. */
|
|
||||||
error YAML_error_type_t
|
|
||||||
/** Error description. */
|
|
||||||
problem string
|
|
||||||
/** The byte about which the problem occured. */
|
|
||||||
problem_offset int
|
|
||||||
/** The problematic value (@c -1 is none). */
|
|
||||||
problem_value int
|
|
||||||
/** The problem position. */
|
|
||||||
problem_mark YAML_mark_t
|
|
||||||
/** The error context. */
|
|
||||||
context string
|
|
||||||
/** The context position. */
|
|
||||||
context_mark YAML_mark_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Reader stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Read handler. */
|
|
||||||
read_handler yaml_read_handler_t
|
|
||||||
|
|
||||||
/** Reader input data. */
|
|
||||||
input_reader io.Reader
|
|
||||||
input []byte
|
|
||||||
input_pos int
|
|
||||||
|
|
||||||
/** EOF flag */
|
|
||||||
eof bool
|
|
||||||
|
|
||||||
/** The working buffer. */
|
|
||||||
buffer []byte
|
|
||||||
buffer_pos int
|
|
||||||
|
|
||||||
/* The number of unread characters in the buffer. */
|
|
||||||
unread int
|
|
||||||
|
|
||||||
/** The raw buffer. */
|
|
||||||
raw_buffer []byte
|
|
||||||
raw_buffer_pos int
|
|
||||||
|
|
||||||
/** The input encoding. */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/** The offset of the current position (in bytes). */
|
|
||||||
offset int
|
|
||||||
|
|
||||||
/** The mark of the current position. */
|
|
||||||
mark YAML_mark_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Scanner stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Have we started to scan the input stream? */
|
|
||||||
stream_start_produced bool
|
|
||||||
|
|
||||||
/** Have we reached the end of the input stream? */
|
|
||||||
stream_end_produced bool
|
|
||||||
|
|
||||||
/** The number of unclosed '[' and '{' indicators. */
|
|
||||||
flow_level int
|
|
||||||
|
|
||||||
/** The tokens queue. */
|
|
||||||
tokens []yaml_token_t
|
|
||||||
tokens_head int
|
|
||||||
|
|
||||||
/** The number of tokens fetched from the queue. */
|
|
||||||
tokens_parsed int
|
|
||||||
|
|
||||||
/* Does the tokens queue contain a token ready for dequeueing. */
|
|
||||||
token_available bool
|
|
||||||
|
|
||||||
/** The indentation levels stack. */
|
|
||||||
indents []int
|
|
||||||
|
|
||||||
/** The current indentation level. */
|
|
||||||
indent int
|
|
||||||
|
|
||||||
/** May a simple key occur at the current position? */
|
|
||||||
simple_key_allowed bool
|
|
||||||
|
|
||||||
/** The stack of simple keys. */
|
|
||||||
simple_keys []yaml_simple_key_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Parser stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** The parser states stack. */
|
|
||||||
states []yaml_parser_state_t
|
|
||||||
|
|
||||||
/** The current parser state. */
|
|
||||||
state yaml_parser_state_t
|
|
||||||
|
|
||||||
/** The stack of marks. */
|
|
||||||
marks []YAML_mark_t
|
|
||||||
|
|
||||||
/** The list of TAG directives. */
|
|
||||||
tag_directives []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Dumper stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** The alias data. */
|
|
||||||
aliases []yaml_alias_data_t
|
|
||||||
|
|
||||||
/** The currently parsed document. */
|
|
||||||
document *yaml_document_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The prototype of a write handler.
|
|
||||||
*
|
|
||||||
* The write handler is called when the emitter needs to flush the accumulated
|
|
||||||
* characters to the output. The handler should write @a size bytes of the
|
|
||||||
* @a buffer to the output.
|
|
||||||
*
|
|
||||||
* @param[in,out] data A pointer to an application data specified by
|
|
||||||
* yaml_emitter_set_output().
|
|
||||||
* @param[in] buffer The buffer with bytes to be written.
|
|
||||||
* @param[in] size The size of the buffer.
|
|
||||||
*
|
|
||||||
* @returns On success, the handler should return @c 1. If the handler failed,
|
|
||||||
* the returned value should be @c 0.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
|
||||||
|
|
||||||
/** The emitter states. */
|
|
||||||
type yaml_emitter_state_t int
|
|
||||||
|
|
||||||
const (
|
|
||||||
/** Expect STREAM-START. */
|
|
||||||
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
|
||||||
/** Expect the first DOCUMENT-START or STREAM-END. */
|
|
||||||
yaml_EMIT_FIRST_DOCUMENT_START_STATE
|
|
||||||
/** Expect DOCUMENT-START or STREAM-END. */
|
|
||||||
yaml_EMIT_DOCUMENT_START_STATE
|
|
||||||
/** Expect the content of a document. */
|
|
||||||
yaml_EMIT_DOCUMENT_CONTENT_STATE
|
|
||||||
/** Expect DOCUMENT-END. */
|
|
||||||
yaml_EMIT_DOCUMENT_END_STATE
|
|
||||||
/** Expect the first item of a flow sequence. */
|
|
||||||
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
|
|
||||||
/** Expect an item of a flow sequence. */
|
|
||||||
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE
|
|
||||||
/** Expect the first key of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect a key of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value for a simple key of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE
|
|
||||||
/** Expect a value of a flow mapping. */
|
|
||||||
yaml_EMIT_FLOW_MAPPING_VALUE_STATE
|
|
||||||
/** Expect the first item of a block sequence. */
|
|
||||||
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
|
|
||||||
/** Expect an item of a block sequence. */
|
|
||||||
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE
|
|
||||||
/** Expect the first key of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
|
|
||||||
/** Expect the key of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_KEY_STATE
|
|
||||||
/** Expect a value for a simple key of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE
|
|
||||||
/** Expect a value of a block mapping. */
|
|
||||||
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE
|
|
||||||
/** Expect nothing. */
|
|
||||||
yaml_EMIT_END_STATE
|
|
||||||
)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The emitter structure.
|
|
||||||
*
|
|
||||||
* All members are internal. Manage the structure using the @c yaml_emitter_
|
|
||||||
* family of functions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type yaml_emitter_t struct {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Error handling
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Error type. */
|
|
||||||
error YAML_error_type_t
|
|
||||||
/** Error description. */
|
|
||||||
problem string
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Writer stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** Write handler. */
|
|
||||||
write_handler yaml_write_handler_t
|
|
||||||
|
|
||||||
/** Standard (string or file) output data. */
|
|
||||||
output_buffer *[]byte
|
|
||||||
output_writer io.Writer
|
|
||||||
|
|
||||||
/** The working buffer. */
|
|
||||||
buffer []byte
|
|
||||||
buffer_pos int
|
|
||||||
|
|
||||||
/** The raw buffer. */
|
|
||||||
raw_buffer []byte
|
|
||||||
raw_buffer_pos int
|
|
||||||
|
|
||||||
/** The stream encoding. */
|
|
||||||
encoding yaml_encoding_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Emitter stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** If the output is in the canonical style? */
|
|
||||||
canonical bool
|
|
||||||
/** The number of indentation spaces. */
|
|
||||||
best_indent int
|
|
||||||
/** The preferred width of the output lines. */
|
|
||||||
best_width int
|
|
||||||
/** Allow unescaped non-ASCII characters? */
|
|
||||||
unicode bool
|
|
||||||
/** The preferred line break. */
|
|
||||||
line_break yaml_break_t
|
|
||||||
|
|
||||||
/** The stack of states. */
|
|
||||||
states []yaml_emitter_state_t
|
|
||||||
|
|
||||||
/** The current emitter state. */
|
|
||||||
state yaml_emitter_state_t
|
|
||||||
|
|
||||||
/** The event queue. */
|
|
||||||
events []yaml_event_t
|
|
||||||
events_head int
|
|
||||||
|
|
||||||
/** The stack of indentation levels. */
|
|
||||||
indents []int
|
|
||||||
|
|
||||||
/** The list of tag directives. */
|
|
||||||
tag_directives []yaml_tag_directive_t
|
|
||||||
|
|
||||||
/** The current indentation level. */
|
|
||||||
indent int
|
|
||||||
|
|
||||||
/** The current flow level. */
|
|
||||||
flow_level int
|
|
||||||
|
|
||||||
/** Is it the document root context? */
|
|
||||||
root_context bool
|
|
||||||
/** Is it a sequence context? */
|
|
||||||
sequence_context bool
|
|
||||||
/** Is it a mapping context? */
|
|
||||||
mapping_context bool
|
|
||||||
/** Is it a simple mapping key context? */
|
|
||||||
simple_key_context bool
|
|
||||||
|
|
||||||
/** The current line. */
|
|
||||||
line int
|
|
||||||
/** The current column. */
|
|
||||||
column int
|
|
||||||
/** If the last character was a whitespace? */
|
|
||||||
whitespace bool
|
|
||||||
/** If the last character was an indentation character (' ', '-', '?', ':')? */
|
|
||||||
indention bool
|
|
||||||
/** If an explicit document end is required? */
|
|
||||||
open_ended bool
|
|
||||||
|
|
||||||
/** Anchor analysis. */
|
|
||||||
anchor_data struct {
|
|
||||||
/** The anchor value. */
|
|
||||||
anchor []byte
|
|
||||||
/** Is it an alias? */
|
|
||||||
alias bool
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Tag analysis. */
|
|
||||||
tag_data struct {
|
|
||||||
/** The tag handle. */
|
|
||||||
handle []byte
|
|
||||||
/** The tag suffix. */
|
|
||||||
suffix []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Scalar analysis. */
|
|
||||||
scalar_data struct {
|
|
||||||
/** The scalar value. */
|
|
||||||
value []byte
|
|
||||||
/** Does the scalar contain line breaks? */
|
|
||||||
multiline bool
|
|
||||||
/** Can the scalar be expessed in the flow plain style? */
|
|
||||||
flow_plain_allowed bool
|
|
||||||
/** Can the scalar be expressed in the block plain style? */
|
|
||||||
block_plain_allowed bool
|
|
||||||
/** Can the scalar be expressed in the single quoted style? */
|
|
||||||
single_quoted_allowed bool
|
|
||||||
/** Can the scalar be expressed in the literal or folded styles? */
|
|
||||||
block_allowed bool
|
|
||||||
/** The output style. */
|
|
||||||
style yaml_scalar_style_t
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @name Dumper stuff
|
|
||||||
* @{
|
|
||||||
*/
|
|
||||||
|
|
||||||
/** If the stream was already opened? */
|
|
||||||
opened bool
|
|
||||||
/** If the stream was already closed? */
|
|
||||||
closed bool
|
|
||||||
|
|
||||||
/** The information associated with the document nodes. */
|
|
||||||
anchors *struct {
|
|
||||||
/** The number of references. */
|
|
||||||
references int
|
|
||||||
/** The anchor id. */
|
|
||||||
anchor int
|
|
||||||
/** If the node has been emitted? */
|
|
||||||
serialized bool
|
|
||||||
}
|
|
||||||
|
|
||||||
/** The last assigned anchor id. */
|
|
||||||
last_anchor_id int
|
|
||||||
|
|
||||||
/** The currently emitted document. */
|
|
||||||
document *yaml_document_t
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @}
|
|
||||||
*/
|
|
||||||
|
|
||||||
}
|
|
2
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
2
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
@ -1,5 +1,3 @@
|
|||||||
ISC License
|
|
||||||
|
|
||||||
Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
|
Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
Permission to use, copy, modify, and distribute this software for any
|
||||||
|
7
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
7
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
@ -13,10 +13,9 @@
|
|||||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
// when the code is not running on Google App Engine and "-tags disableunsafe"
|
||||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
// is not added to the go build command line.
|
||||||
// tag is deprecated and thus should not be used.
|
// +build !appengine,!disableunsafe
|
||||||
// +build !js,!appengine,!safe,!disableunsafe
|
|
||||||
|
|
||||||
package spew
|
package spew
|
||||||
|
|
||||||
|
7
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
7
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
@ -13,10 +13,9 @@
|
|||||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
// when either the code is running on Google App Engine or "-tags disableunsafe"
|
||||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
// is added to the go build command line.
|
||||||
// tag is deprecated and thus should not be used.
|
// +build appengine disableunsafe
|
||||||
// +build js appengine safe disableunsafe
|
|
||||||
|
|
||||||
package spew
|
package spew
|
||||||
|
|
||||||
|
2
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
2
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
@ -64,7 +64,7 @@ type ConfigState struct {
|
|||||||
// inside these interface methods. As a result, this option relies on
|
// inside these interface methods. As a result, this option relies on
|
||||||
// access to the unsafe package, so it will not have any effect when
|
// access to the unsafe package, so it will not have any effect when
|
||||||
// running in environments without access to the unsafe package such as
|
// running in environments without access to the unsafe package such as
|
||||||
// Google App Engine or with the "safe" build tag specified.
|
// Google App Engine or with the "disableunsafe" build tag specified.
|
||||||
DisablePointerMethods bool
|
DisablePointerMethods bool
|
||||||
|
|
||||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||||
|
10
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
10
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
@ -24,7 +24,6 @@ package reference
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/distribution/digest"
|
"github.com/docker/distribution/digest"
|
||||||
)
|
)
|
||||||
@ -44,9 +43,6 @@ var (
|
|||||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||||
|
|
||||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
|
||||||
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
|
||||||
|
|
||||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||||
|
|
||||||
@ -138,7 +134,7 @@ type Canonical interface {
|
|||||||
func SplitHostname(named Named) (string, string) {
|
func SplitHostname(named Named) (string, string) {
|
||||||
name := named.Name()
|
name := named.Name()
|
||||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||||
if len(match) != 3 {
|
if match == nil || len(match) != 3 {
|
||||||
return "", name
|
return "", name
|
||||||
}
|
}
|
||||||
return match[1], match[2]
|
return match[1], match[2]
|
||||||
@ -153,9 +149,7 @@ func Parse(s string) (Reference, error) {
|
|||||||
if s == "" {
|
if s == "" {
|
||||||
return nil, ErrNameEmpty
|
return nil, ErrNameEmpty
|
||||||
}
|
}
|
||||||
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
// TODO(dmcgowan): Provide more specific and helpful error
|
||||||
return nil, ErrNameContainsUppercase
|
|
||||||
}
|
|
||||||
return nil, ErrReferenceInvalidFormat
|
return nil, ErrReferenceInvalidFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
|
11
vendor/github.com/emicklei/go-restful/swagger/model_builder.go
generated
vendored
11
vendor/github.com/emicklei/go-restful/swagger/model_builder.go
generated
vendored
@ -43,12 +43,6 @@ func (b modelBuilder) addModelFrom(sample interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
|
func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
|
||||||
// Turn pointers into simpler types so further checks are
|
|
||||||
// correct.
|
|
||||||
if st.Kind() == reflect.Ptr {
|
|
||||||
st = st.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
modelName := b.keyFrom(st)
|
modelName := b.keyFrom(st)
|
||||||
if nameOverride != "" {
|
if nameOverride != "" {
|
||||||
modelName = nameOverride
|
modelName = nameOverride
|
||||||
@ -143,11 +137,6 @@ func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, mod
|
|||||||
return "", "", prop
|
return "", "", prop
|
||||||
}
|
}
|
||||||
|
|
||||||
if field.Name == "XMLName" && field.Type.String() == "xml.Name" {
|
|
||||||
// property is metadata for the xml.Name attribute, can be skipped
|
|
||||||
return "", "", prop
|
|
||||||
}
|
|
||||||
|
|
||||||
if tag := field.Tag.Get("modelDescription"); tag != "" {
|
if tag := field.Tag.Get("modelDescription"); tag != "" {
|
||||||
modelDescription = tag
|
modelDescription = tag
|
||||||
}
|
}
|
||||||
|
15
vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go
generated
vendored
15
vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go
generated
vendored
@ -33,21 +33,6 @@ func (prop *ModelProperty) setMaximum(field reflect.StructField) {
|
|||||||
|
|
||||||
func (prop *ModelProperty) setType(field reflect.StructField) {
|
func (prop *ModelProperty) setType(field reflect.StructField) {
|
||||||
if tag := field.Tag.Get("type"); tag != "" {
|
if tag := field.Tag.Get("type"); tag != "" {
|
||||||
// Check if the first two characters of the type tag are
|
|
||||||
// intended to emulate slice/array behaviour.
|
|
||||||
//
|
|
||||||
// If type is intended to be a slice/array then add the
|
|
||||||
// overriden type to the array item instead of the main property
|
|
||||||
if len(tag) > 2 && tag[0:2] == "[]" {
|
|
||||||
pType := "array"
|
|
||||||
prop.Type = &pType
|
|
||||||
prop.Items = new(Item)
|
|
||||||
|
|
||||||
iType := tag[2:]
|
|
||||||
prop.Items.Type = &iType
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
prop.Type = &tag
|
prop.Type = &tag
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go
generated
vendored
2
vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go
generated
vendored
@ -277,7 +277,7 @@ func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *
|
|||||||
}
|
}
|
||||||
// sort by code
|
// sort by code
|
||||||
codes := sort.IntSlice{}
|
codes := sort.IntSlice{}
|
||||||
for code := range route.ResponseErrors {
|
for code, _ := range route.ResponseErrors {
|
||||||
codes = append(codes, code)
|
codes = append(codes, code)
|
||||||
}
|
}
|
||||||
codes.Sort()
|
codes.Sort()
|
||||||
|
18
vendor/github.com/ghodss/yaml/README.md
generated
vendored
18
vendor/github.com/ghodss/yaml/README.md
generated
vendored
@ -4,17 +4,17 @@
|
|||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
A wrapper around [candiedyaml](https://github.com/cloudfoundry-incubator/candiedyaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||||
|
|
||||||
In short, this library first converts YAML to JSON using candiedyaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike candiedyaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
||||||
|
|
||||||
## Compatibility
|
## Compatibility
|
||||||
|
|
||||||
This package uses [candiedyaml](https://github.com/cloudfoundry-incubator/candiedyaml) and therefore supports [everything candiedyaml supports](https://github.com/cloudfoundry-incubator/candiedyaml#candiedyaml).
|
This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
|
||||||
|
|
||||||
## Caveats
|
## Caveats
|
||||||
|
|
||||||
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, candiedyaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
|
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
|
||||||
|
|
||||||
```
|
```
|
||||||
BAD:
|
BAD:
|
||||||
@ -44,8 +44,6 @@ import "github.com/ghodss/yaml"
|
|||||||
Usage is very similar to the JSON library:
|
Usage is very similar to the JSON library:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
@ -54,7 +52,7 @@ import (
|
|||||||
|
|
||||||
type Person struct {
|
type Person struct {
|
||||||
Name string `json:"name"` // Affects YAML field names too.
|
Name string `json:"name"` // Affects YAML field names too.
|
||||||
Age int `json:"age"`
|
Age int `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@ -67,13 +65,13 @@ func main() {
|
|||||||
}
|
}
|
||||||
fmt.Println(string(y))
|
fmt.Println(string(y))
|
||||||
/* Output:
|
/* Output:
|
||||||
age: 30
|
|
||||||
name: John
|
name: John
|
||||||
|
age: 30
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Unmarshal the YAML back into a Person struct.
|
// Unmarshal the YAML back into a Person struct.
|
||||||
var p2 Person
|
var p2 Person
|
||||||
err = yaml.Unmarshal(y, &p2)
|
err := yaml.Unmarshal(y, &p2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("err: %v\n", err)
|
fmt.Printf("err: %v\n", err)
|
||||||
return
|
return
|
||||||
@ -88,8 +86,6 @@ func main() {
|
|||||||
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
|
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
8
vendor/github.com/ghodss/yaml/yaml.go
generated
vendored
8
vendor/github.com/ghodss/yaml/yaml.go
generated
vendored
@ -7,7 +7,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
yaml "github.com/cloudfoundry-incubator/candiedyaml"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Marshals the object into JSON then converts JSON to YAML and returns the
|
// Marshals the object into JSON then converts JSON to YAML and returns the
|
||||||
@ -15,12 +15,12 @@ import (
|
|||||||
func Marshal(o interface{}) ([]byte, error) {
|
func Marshal(o interface{}) ([]byte, error) {
|
||||||
j, err := json.Marshal(o)
|
j, err := json.Marshal(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
|
return nil, fmt.Errorf("error marshaling into JSON: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
y, err := JSONToYAML(j)
|
y, err := JSONToYAML(j)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
|
return nil, fmt.Errorf("error converting JSON to YAML: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return y, nil
|
return y, nil
|
||||||
@ -48,7 +48,7 @@ func JSONToYAML(j []byte) ([]byte, error) {
|
|||||||
var jsonObj interface{}
|
var jsonObj interface{}
|
||||||
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
|
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
|
||||||
// Go JSON library doesn't try to pick the right number type (int, float,
|
// Go JSON library doesn't try to pick the right number type (int, float,
|
||||||
// etc.) when unmarshalling to interface{}, it just picks float64
|
// etc.) when unmarshling to interface{}, it just picks float64
|
||||||
// universally. go-yaml does go through the effort of picking the right
|
// universally. go-yaml does go through the effort of picking the right
|
||||||
// number type, so we can preserve number type throughout this process.
|
// number type, so we can preserve number type throughout this process.
|
||||||
err := yaml.Unmarshal(j, &jsonObj)
|
err := yaml.Unmarshal(j, &jsonObj)
|
||||||
|
16
vendor/github.com/go-openapi/spec/.travis.yml
generated
vendored
16
vendor/github.com/go-openapi/spec/.travis.yml
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.7
|
|
||||||
install:
|
|
||||||
- go get -u github.com/stretchr/testify
|
|
||||||
- go get -u github.com/go-openapi/swag
|
|
||||||
- go get -u gopkg.in/yaml.v2
|
|
||||||
- go get -u github.com/go-openapi/jsonpointer
|
|
||||||
- go get -u github.com/go-openapi/jsonreference
|
|
||||||
script:
|
|
||||||
- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./...
|
|
||||||
after_success:
|
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
|
||||||
notifications:
|
|
||||||
slack:
|
|
||||||
secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E=
|
|
4
vendor/github.com/go-openapi/spec/README.md
generated
vendored
4
vendor/github.com/go-openapi/spec/README.md
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
# OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
# OAI object model [![Build Status](https://ci.vmware.run/api/badges/go-openapi/spec/status.svg)](https://ci.vmware.run/go-openapi/spec) [![Coverage](https://coverage.vmware.run/badges/go-openapi/spec/coverage.svg)](https://coverage.vmware.run/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||||
|
|
||||||
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec)
|
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec)
|
||||||
|
|
||||||
The object model for OpenAPI specification documents
|
The object model for OpenAPI specification documents
|
26
vendor/github.com/go-openapi/spec/bindata.go
generated
vendored
26
vendor/github.com/go-openapi/spec/bindata.go
generated
vendored
File diff suppressed because one or more lines are too long
BIN
vendor/github.com/go-openapi/spec/debug.test
generated
vendored
BIN
vendor/github.com/go-openapi/spec/debug.test
generated
vendored
Binary file not shown.
180
vendor/github.com/go-openapi/spec/expander.go
generated
vendored
180
vendor/github.com/go-openapi/spec/expander.go
generated
vendored
@ -17,9 +17,7 @@ package spec
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -28,11 +26,6 @@ import (
|
|||||||
"github.com/go-openapi/swag"
|
"github.com/go-openapi/swag"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
// Debug enables logging when SWAGGER_DEBUG env var is not empty
|
|
||||||
Debug = os.Getenv("SWAGGER_DEBUG") != ""
|
|
||||||
)
|
|
||||||
|
|
||||||
// ResolutionCache a cache for resolving urls
|
// ResolutionCache a cache for resolving urls
|
||||||
type ResolutionCache interface {
|
type ResolutionCache interface {
|
||||||
Get(string) (interface{}, bool)
|
Get(string) (interface{}, bool)
|
||||||
@ -44,11 +37,7 @@ type simpleCache struct {
|
|||||||
store map[string]interface{}
|
store map[string]interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
var resCache ResolutionCache
|
var resCache = initResolutionCache()
|
||||||
|
|
||||||
func init() {
|
|
||||||
resCache = initResolutionCache()
|
|
||||||
}
|
|
||||||
|
|
||||||
func initResolutionCache() ResolutionCache {
|
func initResolutionCache() ResolutionCache {
|
||||||
return &simpleCache{store: map[string]interface{}{
|
return &simpleCache{store: map[string]interface{}{
|
||||||
@ -58,15 +47,8 @@ func initResolutionCache() ResolutionCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *simpleCache) Get(uri string) (interface{}, bool) {
|
func (s *simpleCache) Get(uri string) (interface{}, bool) {
|
||||||
if Debug {
|
|
||||||
log.Printf("getting %q from resolution cache", uri)
|
|
||||||
}
|
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
v, ok := s.store[uri]
|
v, ok := s.store[uri]
|
||||||
if Debug {
|
|
||||||
log.Printf("got %q from resolution cache: %t", uri, ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
return v, ok
|
return v, ok
|
||||||
}
|
}
|
||||||
@ -150,10 +132,6 @@ func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*sc
|
|||||||
startingRef: ref,
|
startingRef: ref,
|
||||||
cache: cache,
|
cache: cache,
|
||||||
loadDoc: func(path string) (json.RawMessage, error) {
|
loadDoc: func(path string) (json.RawMessage, error) {
|
||||||
if Debug {
|
|
||||||
log.Printf("fetching document at %q", path)
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := swag.LoadFromFileOrHTTP(path)
|
data, err := swag.LoadFromFileOrHTTP(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -181,7 +159,6 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe
|
|||||||
if startingRef == nil {
|
if startingRef == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if ptr == nil {
|
if ptr == nil {
|
||||||
return startingRef
|
return startingRef
|
||||||
}
|
}
|
||||||
@ -216,19 +193,16 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error {
|
func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error {
|
||||||
|
|
||||||
tgt := reflect.ValueOf(target)
|
tgt := reflect.ValueOf(target)
|
||||||
if tgt.Kind() != reflect.Ptr {
|
if tgt.Kind() != reflect.Ptr {
|
||||||
return fmt.Errorf("resolve ref: target needs to be a pointer")
|
return fmt.Errorf("resolve ref: target needs to be a pointer")
|
||||||
}
|
}
|
||||||
|
|
||||||
oldRef := currentRef
|
oldRef := currentRef
|
||||||
|
|
||||||
if currentRef != nil {
|
if currentRef != nil {
|
||||||
var err error
|
var err error
|
||||||
currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer()))
|
currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer()))
|
||||||
@ -236,7 +210,6 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if currentRef == nil {
|
if currentRef == nil {
|
||||||
currentRef = ref
|
currentRef = ref
|
||||||
}
|
}
|
||||||
@ -293,27 +266,7 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}
|
|||||||
if currentRef.String() != "" {
|
if currentRef.String() != "" {
|
||||||
res, _, err = currentRef.GetPointer().Get(data)
|
res, _, err = currentRef.GetPointer().Get(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return err
|
||||||
if strings.HasPrefix(ref.String(), "#") {
|
|
||||||
// go back to original spec
|
|
||||||
newUrl := r.loadingRef.GetURL().String()
|
|
||||||
refURL, err = url.Parse(newUrl + ref.String())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
data, _, _, err = r.load(refURL)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
res, _, err = ref.GetPointer().Get(data)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
res = data
|
res = data
|
||||||
@ -324,9 +277,6 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r.currentRef = currentRef
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -406,6 +356,7 @@ func ExpandSpec(spec *Swagger) error {
|
|||||||
|
|
||||||
// ExpandSchema expands the refs in the schema object
|
// ExpandSchema expands the refs in the schema object
|
||||||
func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
|
func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
|
||||||
|
|
||||||
if schema == nil {
|
if schema == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -424,6 +375,7 @@ func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error
|
|||||||
rid, _ := NewRef(root.(*Swagger).ID)
|
rid, _ := NewRef(root.(*Swagger).ID)
|
||||||
rrr, _ = rid.Inherits(nrr)
|
rrr, _ = rid.Inherits(nrr)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resolver, err := defaultSchemaLoader(root, rrr, cache)
|
resolver, err := defaultSchemaLoader(root, rrr, cache)
|
||||||
@ -437,7 +389,7 @@ func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error
|
|||||||
}
|
}
|
||||||
var s *Schema
|
var s *Schema
|
||||||
if s, err = expandSchema(*schema, refs, resolver); err != nil {
|
if s, err = expandSchema(*schema, refs, resolver); err != nil {
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
*schema = *s
|
*schema = *s
|
||||||
return nil
|
return nil
|
||||||
@ -448,15 +400,7 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S
|
|||||||
if target.Items.Schema != nil {
|
if target.Items.Schema != nil {
|
||||||
t, err := expandSchema(*target.Items.Schema, parentRefs, resolver)
|
t, err := expandSchema(*target.Items.Schema, parentRefs, resolver)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if target.Items.Schema.ID == "" {
|
return nil, err
|
||||||
target.Items.Schema.ID = target.ID
|
|
||||||
if err != nil {
|
|
||||||
t, err = expandSchema(*target.Items.Schema, parentRefs, resolver)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
*target.Items.Schema = *t
|
*target.Items.Schema = *t
|
||||||
}
|
}
|
||||||
@ -471,110 +415,101 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S
|
|||||||
return &target, nil
|
return &target, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) {
|
func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) {
|
||||||
|
defer func() {
|
||||||
|
schema = &target
|
||||||
|
}()
|
||||||
if target.Ref.String() == "" && target.Ref.IsRoot() {
|
if target.Ref.String() == "" && target.Ref.IsRoot() {
|
||||||
if Debug {
|
target = *resolver.root.(*Schema)
|
||||||
log.Printf("skipping expand schema for no ref and root: %v", resolver.root)
|
return
|
||||||
}
|
|
||||||
|
|
||||||
return resolver.root.(*Schema), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// t is the new expanded schema
|
// t is the new expanded schema
|
||||||
var t *Schema
|
var t *Schema
|
||||||
|
|
||||||
for target.Ref.String() != "" {
|
for target.Ref.String() != "" {
|
||||||
if swag.ContainsStringsCI(parentRefs, target.Ref.String()) {
|
// var newTarget Schema
|
||||||
return &target, nil
|
pRefs := strings.Join(parentRefs, ",")
|
||||||
|
pRefs += ","
|
||||||
|
if strings.Contains(pRefs, target.Ref.String()+",") {
|
||||||
|
err = nil
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := resolver.Resolve(&target.Ref, &t); err != nil {
|
if err = resolver.Resolve(&target.Ref, &t); err != nil {
|
||||||
return &target, err
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
parentRefs = append(parentRefs, target.Ref.String())
|
parentRefs = append(parentRefs, target.Ref.String())
|
||||||
target = *t
|
target = *t
|
||||||
}
|
}
|
||||||
|
|
||||||
t, err := expandItems(target, parentRefs, resolver)
|
if t, err = expandItems(target, parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
target = *t
|
target = *t
|
||||||
|
|
||||||
for i := range target.AllOf {
|
for i := range target.AllOf {
|
||||||
t, err := expandSchema(target.AllOf[i], parentRefs, resolver)
|
if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
target.AllOf[i] = *t
|
target.AllOf[i] = *t
|
||||||
}
|
}
|
||||||
for i := range target.AnyOf {
|
for i := range target.AnyOf {
|
||||||
t, err := expandSchema(target.AnyOf[i], parentRefs, resolver)
|
if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
target.AnyOf[i] = *t
|
target.AnyOf[i] = *t
|
||||||
}
|
}
|
||||||
for i := range target.OneOf {
|
for i := range target.OneOf {
|
||||||
t, err := expandSchema(target.OneOf[i], parentRefs, resolver)
|
if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
target.OneOf[i] = *t
|
target.OneOf[i] = *t
|
||||||
}
|
}
|
||||||
if target.Not != nil {
|
if target.Not != nil {
|
||||||
t, err := expandSchema(*target.Not, parentRefs, resolver)
|
if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
*target.Not = *t
|
*target.Not = *t
|
||||||
}
|
}
|
||||||
for k := range target.Properties {
|
for k, _ := range target.Properties {
|
||||||
t, err := expandSchema(target.Properties[k], parentRefs, resolver)
|
if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
target.Properties[k] = *t
|
target.Properties[k] = *t
|
||||||
}
|
}
|
||||||
if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
|
if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
|
||||||
t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver)
|
if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
*target.AdditionalProperties.Schema = *t
|
*target.AdditionalProperties.Schema = *t
|
||||||
}
|
}
|
||||||
for k := range target.PatternProperties {
|
for k, _ := range target.PatternProperties {
|
||||||
t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver)
|
if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
target.PatternProperties[k] = *t
|
target.PatternProperties[k] = *t
|
||||||
}
|
}
|
||||||
for k := range target.Dependencies {
|
for k, _ := range target.Dependencies {
|
||||||
if target.Dependencies[k].Schema != nil {
|
if target.Dependencies[k].Schema != nil {
|
||||||
t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver)
|
if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
*target.Dependencies[k].Schema = *t
|
*target.Dependencies[k].Schema = *t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
|
if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
|
||||||
t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver)
|
if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
*target.AdditionalItems.Schema = *t
|
*target.AdditionalItems.Schema = *t
|
||||||
}
|
}
|
||||||
for k := range target.Definitions {
|
for k, _ := range target.Definitions {
|
||||||
t, err := expandSchema(target.Definitions[k], parentRefs, resolver)
|
if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
return
|
||||||
return &target, err
|
|
||||||
}
|
}
|
||||||
target.Definitions[k] = *t
|
target.Definitions[k] = *t
|
||||||
}
|
}
|
||||||
return &target, nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error {
|
func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error {
|
||||||
@ -647,24 +582,22 @@ func expandResponse(response *Response, resolver *schemaLoader) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var parentRefs []string
|
|
||||||
if response.Ref.String() != "" {
|
if response.Ref.String() != "" {
|
||||||
parentRefs = append(parentRefs, response.Ref.String())
|
|
||||||
if err := resolver.Resolve(&response.Ref, response); err != nil {
|
if err := resolver.Resolve(&response.Ref, response); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.Schema != nil {
|
if response.Schema != nil {
|
||||||
parentRefs = append(parentRefs, response.Schema.Ref.String())
|
parentRefs := []string{response.Schema.Ref.String()}
|
||||||
if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil {
|
if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s, err := expandSchema(*response.Schema, parentRefs, resolver)
|
if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
|
} else {
|
||||||
|
*response.Schema = *s
|
||||||
}
|
}
|
||||||
*response.Schema = *s
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -673,24 +606,21 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader) error {
|
|||||||
if parameter == nil {
|
if parameter == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var parentRefs []string
|
|
||||||
if parameter.Ref.String() != "" {
|
if parameter.Ref.String() != "" {
|
||||||
parentRefs = append(parentRefs, parameter.Ref.String())
|
|
||||||
if err := resolver.Resolve(¶meter.Ref, parameter); err != nil {
|
if err := resolver.Resolve(¶meter.Ref, parameter); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if parameter.Schema != nil {
|
if parameter.Schema != nil {
|
||||||
parentRefs = append(parentRefs, parameter.Schema.Ref.String())
|
parentRefs := []string{parameter.Schema.Ref.String()}
|
||||||
if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil {
|
if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s, err := expandSchema(*parameter.Schema, parentRefs, resolver)
|
if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
|
} else {
|
||||||
|
*parameter.Schema = *s
|
||||||
}
|
}
|
||||||
*parameter.Schema = *s
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/go-openapi/spec/header.go
generated
vendored
4
vendor/github.com/go-openapi/spec/header.go
generated
vendored
@ -30,7 +30,6 @@ type HeaderProps struct {
|
|||||||
type Header struct {
|
type Header struct {
|
||||||
CommonValidations
|
CommonValidations
|
||||||
SimpleSchema
|
SimpleSchema
|
||||||
VendorExtensible
|
|
||||||
HeaderProps
|
HeaderProps
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,9 +158,6 @@ func (h *Header) UnmarshalJSON(data []byte) error {
|
|||||||
if err := json.Unmarshal(data, &h.SimpleSchema); err != nil {
|
if err := json.Unmarshal(data, &h.SimpleSchema); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(data, &h.VendorExtensible); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &h.HeaderProps); err != nil {
|
if err := json.Unmarshal(data, &h.HeaderProps); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
9
vendor/github.com/go-openapi/spec/spec.go
generated
vendored
9
vendor/github.com/go-openapi/spec/spec.go
generated
vendored
@ -27,14 +27,9 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
jsonSchema *Schema
|
jsonSchema = MustLoadJSONSchemaDraft04()
|
||||||
swaggerSchema *Schema
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
jsonSchema = MustLoadJSONSchemaDraft04()
|
|
||||||
swaggerSchema = MustLoadSwagger20Schema()
|
swaggerSchema = MustLoadSwagger20Schema()
|
||||||
}
|
)
|
||||||
|
|
||||||
// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error
|
// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error
|
||||||
func MustLoadJSONSchemaDraft04() *Schema {
|
func MustLoadJSONSchemaDraft04() *Schema {
|
||||||
|
13
vendor/github.com/go-openapi/swag/.travis.yml
generated
vendored
13
vendor/github.com/go-openapi/swag/.travis.yml
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.7
|
|
||||||
install:
|
|
||||||
- go get -u github.com/stretchr/testify
|
|
||||||
- go get -u github.com/mailru/easyjson
|
|
||||||
script:
|
|
||||||
- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./...
|
|
||||||
after_success:
|
|
||||||
- bash <(curl -s https://codecov.io/bash)
|
|
||||||
notifications:
|
|
||||||
slack:
|
|
||||||
secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E=
|
|
2
vendor/github.com/go-openapi/swag/README.md
generated
vendored
2
vendor/github.com/go-openapi/swag/README.md
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
# Swag [![Build Status](https://ci.vmware.run/api/badges/go-openapi/swag/status.svg)](https://ci.vmware.run/go-openapi/swag) [![Coverage](https://coverage.vmware.run/badges/go-openapi/swag/coverage.svg)](https://coverage.vmware.run/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||||
|
|
||||||
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag)
|
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag)
|
||||||
|
|
||||||
|
45
vendor/github.com/go-openapi/swag/loading.go
generated
vendored
45
vendor/github.com/go-openapi/swag/loading.go
generated
vendored
@ -19,21 +19,11 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// LoadHTTPTimeout the default timeout for load requests
|
|
||||||
var LoadHTTPTimeout = 30 * time.Second
|
|
||||||
|
|
||||||
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
|
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
|
||||||
func LoadFromFileOrHTTP(path string) ([]byte, error) {
|
func LoadFromFileOrHTTP(path string) ([]byte, error) {
|
||||||
return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
|
return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes)(path)
|
||||||
}
|
|
||||||
|
|
||||||
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
|
|
||||||
// timeout arg allows for per request overriding of the request timeout
|
|
||||||
func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
|
|
||||||
return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadStrategy returns a loader function for a given path or uri
|
// LoadStrategy returns a loader function for a given path or uri
|
||||||
@ -44,27 +34,16 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(
|
|||||||
return local
|
return local
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) {
|
func loadHTTPBytes(path string) ([]byte, error) {
|
||||||
return func(path string) ([]byte, error) {
|
resp, err := http.Get(path)
|
||||||
client := &http.Client{Timeout: timeout}
|
if err != nil {
|
||||||
req, err := http.NewRequest("GET", path, nil)
|
return nil, err
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resp, err := client.Do(req)
|
|
||||||
defer func() {
|
|
||||||
if resp != nil {
|
|
||||||
resp.Body.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ioutil.ReadAll(resp.Body)
|
|
||||||
}
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ioutil.ReadAll(resp.Body)
|
||||||
}
|
}
|
||||||
|
3
vendor/github.com/go-openapi/swag/util.go
generated
vendored
3
vendor/github.com/go-openapi/swag/util.go
generated
vendored
@ -246,9 +246,6 @@ func ToJSONName(name string) string {
|
|||||||
// ToVarName camelcases a name which can be underscored or pascal cased
|
// ToVarName camelcases a name which can be underscored or pascal cased
|
||||||
func ToVarName(name string) string {
|
func ToVarName(name string) string {
|
||||||
res := ToGoName(name)
|
res := ToGoName(name)
|
||||||
if _, ok := commonInitialisms[res]; ok {
|
|
||||||
return lower(res)
|
|
||||||
}
|
|
||||||
if len(res) <= 1 {
|
if len(res) <= 1 {
|
||||||
return lower(res)
|
return lower(res)
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/gogo/protobuf/LICENSE
generated
vendored
6
vendor/github.com/gogo/protobuf/LICENSE
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
Protocol Buffers for Go with Gadgets
|
Extensions for Protocol Buffers to create more go like structures.
|
||||||
|
|
||||||
Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
http://github.com/gogo/protobuf
|
http://github.com/gogo/protobuf/gogoproto
|
||||||
|
|
||||||
Go support for Protocol Buffers - Google's data interchange format
|
Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
|
||||||
|
2
vendor/github.com/gogo/protobuf/proto/Makefile
generated
vendored
2
vendor/github.com/gogo/protobuf/proto/Makefile
generated
vendored
@ -39,5 +39,5 @@ test: install generate-test-pbs
|
|||||||
generate-test-pbs:
|
generate-test-pbs:
|
||||||
make install
|
make install
|
||||||
make -C testdata
|
make -C testdata
|
||||||
protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto
|
protoc-min-version --version="3.0.0" --proto_path=.:../../../../ --gogo_out=. proto3_proto/proto3.proto
|
||||||
make
|
make
|
||||||
|
14
vendor/github.com/gogo/protobuf/proto/clone.go
generated
vendored
14
vendor/github.com/gogo/protobuf/proto/clone.go
generated
vendored
@ -84,20 +84,14 @@ func mergeStruct(out, in reflect.Value) {
|
|||||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
|
if emIn, ok := in.Addr().Interface().(extensionsMap); ok {
|
||||||
|
emOut := out.Addr().Interface().(extensionsMap)
|
||||||
|
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
|
||||||
|
} else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
|
||||||
emOut := out.Addr().Interface().(extensionsBytes)
|
emOut := out.Addr().Interface().(extensionsBytes)
|
||||||
bIn := emIn.GetExtensions()
|
bIn := emIn.GetExtensions()
|
||||||
bOut := emOut.GetExtensions()
|
bOut := emOut.GetExtensions()
|
||||||
*bOut = append(*bOut, *bIn...)
|
*bOut = append(*bOut, *bIn...)
|
||||||
} else if emIn, ok := extendable(in.Addr().Interface()); ok {
|
|
||||||
emOut, _ := extendable(out.Addr().Interface())
|
|
||||||
mIn, muIn := emIn.extensionsRead()
|
|
||||||
if mIn != nil {
|
|
||||||
mOut := emOut.extensionsWrite()
|
|
||||||
muIn.Lock()
|
|
||||||
mergeExtension(mOut, mIn)
|
|
||||||
muIn.Unlock()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uf := in.FieldByName("XXX_unrecognized")
|
uf := in.FieldByName("XXX_unrecognized")
|
||||||
|
25
vendor/github.com/gogo/protobuf/proto/decode.go
generated
vendored
25
vendor/github.com/gogo/protobuf/proto/decode.go
generated
vendored
@ -378,11 +378,6 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
|
|||||||
wire := int(u & 0x7)
|
wire := int(u & 0x7)
|
||||||
if wire == WireEndGroup {
|
if wire == WireEndGroup {
|
||||||
if is_group {
|
if is_group {
|
||||||
if required > 0 {
|
|
||||||
// Not enough information to determine the exact field.
|
|
||||||
// (See below.)
|
|
||||||
return &RequiredNotSetError{"{Unknown}"}
|
|
||||||
}
|
|
||||||
return nil // input is satisfied
|
return nil // input is satisfied
|
||||||
}
|
}
|
||||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
||||||
@ -395,20 +390,16 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
|
|||||||
if !ok {
|
if !ok {
|
||||||
// Maybe it's an extension?
|
// Maybe it's an extension?
|
||||||
if prop.extendable {
|
if prop.extendable {
|
||||||
if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok {
|
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
|
||||||
if isExtensionField(e, int32(tag)) {
|
if err = o.skip(st, tag, wire); err == nil {
|
||||||
if err = o.skip(st, tag, wire); err == nil {
|
if ee, eok := e.(extensionsMap); eok {
|
||||||
ext := e.GetExtensions()
|
ext := ee.ExtensionMap()[int32(tag)] // may be missing
|
||||||
|
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||||
|
ee.ExtensionMap()[int32(tag)] = ext
|
||||||
|
} else if ee, eok := e.(extensionsBytes); eok {
|
||||||
|
ext := ee.GetExtensions()
|
||||||
*ext = append(*ext, o.buf[oi:o.index]...)
|
*ext = append(*ext, o.buf[oi:o.index]...)
|
||||||
}
|
}
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
|
|
||||||
if err = o.skip(st, tag, wire); err == nil {
|
|
||||||
extmap := e.extensionsWrite()
|
|
||||||
ext := extmap[int32(tag)] // may be missing
|
|
||||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
|
||||||
extmap[int32(tag)] = ext
|
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/gogo/protobuf/proto/decode_gogo.go
generated
vendored
6
vendor/github.com/gogo/protobuf/proto/decode_gogo.go
generated
vendored
@ -1,7 +1,5 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
//
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
|
||||||
// http://github.com/gogo/protobuf
|
|
||||||
//
|
//
|
||||||
// Redistribution and use in source and binary forms, with or without
|
// Redistribution and use in source and binary forms, with or without
|
||||||
// modification, are permitted provided that the following conditions are
|
// modification, are permitted provided that the following conditions are
|
||||||
|
100
vendor/github.com/gogo/protobuf/proto/duration.go
generated
vendored
100
vendor/github.com/gogo/protobuf/proto/duration.go
generated
vendored
@ -1,100 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
// This file implements conversions between google.protobuf.Duration
|
|
||||||
// and time.Duration.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Range of a Duration in seconds, as specified in
|
|
||||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
|
||||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
|
||||||
minSeconds = -maxSeconds
|
|
||||||
)
|
|
||||||
|
|
||||||
// validateDuration determines whether the Duration is valid according to the
|
|
||||||
// definition in google/protobuf/duration.proto. A valid Duration
|
|
||||||
// may still be too large to fit into a time.Duration (the range of Duration
|
|
||||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
|
||||||
func validateDuration(d *duration) error {
|
|
||||||
if d == nil {
|
|
||||||
return errors.New("duration: nil Duration")
|
|
||||||
}
|
|
||||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
|
||||||
return fmt.Errorf("duration: %#v: seconds out of range", d)
|
|
||||||
}
|
|
||||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
|
||||||
return fmt.Errorf("duration: %#v: nanos out of range", d)
|
|
||||||
}
|
|
||||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
|
||||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
|
||||||
return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
|
|
||||||
// returns an error if the Duration is invalid or is too large to be
|
|
||||||
// represented in a time.Duration.
|
|
||||||
func durationFromProto(p *duration) (time.Duration, error) {
|
|
||||||
if err := validateDuration(p); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
d := time.Duration(p.Seconds) * time.Second
|
|
||||||
if int64(d/time.Second) != p.Seconds {
|
|
||||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
|
||||||
}
|
|
||||||
if p.Nanos != 0 {
|
|
||||||
d += time.Duration(p.Nanos)
|
|
||||||
if (d < 0) != (p.Nanos < 0) {
|
|
||||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DurationProto converts a time.Duration to a Duration.
|
|
||||||
func durationProto(d time.Duration) *duration {
|
|
||||||
nanos := d.Nanoseconds()
|
|
||||||
secs := nanos / 1e9
|
|
||||||
nanos -= secs * 1e9
|
|
||||||
return &duration{
|
|
||||||
Seconds: secs,
|
|
||||||
Nanos: int32(nanos),
|
|
||||||
}
|
|
||||||
}
|
|
202
vendor/github.com/gogo/protobuf/proto/duration_gogo.go
generated
vendored
202
vendor/github.com/gogo/protobuf/proto/duration_gogo.go
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
|
||||||
//
|
|
||||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
|
||||||
// http://github.com/gogo/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
|
|
||||||
|
|
||||||
type duration struct {
|
|
||||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *duration) Reset() { *m = duration{} }
|
|
||||||
func (*duration) ProtoMessage() {}
|
|
||||||
func (*duration) String() string { return "duration<string>" }
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) decDuration() (time.Duration, error) {
|
|
||||||
b, err := o.DecodeRawBytes(true)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
dproto := &duration{}
|
|
||||||
if err := Unmarshal(b, dproto); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return durationFromProto(dproto)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_duration(p *Properties, base structPointer) error {
|
|
||||||
d, err := o.decDuration()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
word64_Set(structPointer_Word64(base, p.field), o, uint64(d))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error {
|
|
||||||
d, err := o.decDuration()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error {
|
|
||||||
d, err := o.decDuration()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType)))
|
|
||||||
setPtrCustomType(newBas, 0, &d)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error {
|
|
||||||
d, err := o.decDuration()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
structPointer_Word64Slice(base, p.field).Append(uint64(d))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func size_duration(p *Properties, base structPointer) (n int) {
|
|
||||||
structp := structPointer_GetStructPointer(base, p.field)
|
|
||||||
if structPointer_IsNil(structp) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
dur := structPointer_Interface(structp, durationType).(*time.Duration)
|
|
||||||
d := durationProto(*dur)
|
|
||||||
size := Size(d)
|
|
||||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) enc_duration(p *Properties, base structPointer) error {
|
|
||||||
structp := structPointer_GetStructPointer(base, p.field)
|
|
||||||
if structPointer_IsNil(structp) {
|
|
||||||
return ErrNil
|
|
||||||
}
|
|
||||||
dur := structPointer_Interface(structp, durationType).(*time.Duration)
|
|
||||||
d := durationProto(*dur)
|
|
||||||
data, err := Marshal(d)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
o.buf = append(o.buf, p.tagcode...)
|
|
||||||
o.EncodeRawBytes(data)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func size_ref_duration(p *Properties, base structPointer) (n int) {
|
|
||||||
dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
|
|
||||||
d := durationProto(*dur)
|
|
||||||
size := Size(d)
|
|
||||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error {
|
|
||||||
dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
|
|
||||||
d := durationProto(*dur)
|
|
||||||
data, err := Marshal(d)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
o.buf = append(o.buf, p.tagcode...)
|
|
||||||
o.EncodeRawBytes(data)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func size_slice_duration(p *Properties, base structPointer) (n int) {
|
|
||||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
|
|
||||||
durs := *pdurs
|
|
||||||
for i := 0; i < len(durs); i++ {
|
|
||||||
if durs[i] == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
dproto := durationProto(*durs[i])
|
|
||||||
size := Size(dproto)
|
|
||||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error {
|
|
||||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
|
|
||||||
durs := *pdurs
|
|
||||||
for i := 0; i < len(durs); i++ {
|
|
||||||
if durs[i] == nil {
|
|
||||||
return errRepeatedHasNil
|
|
||||||
}
|
|
||||||
dproto := durationProto(*durs[i])
|
|
||||||
data, err := Marshal(dproto)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
o.buf = append(o.buf, p.tagcode...)
|
|
||||||
o.EncodeRawBytes(data)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func size_slice_ref_duration(p *Properties, base structPointer) (n int) {
|
|
||||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
|
|
||||||
durs := *pdurs
|
|
||||||
for i := 0; i < len(durs); i++ {
|
|
||||||
dproto := durationProto(durs[i])
|
|
||||||
size := Size(dproto)
|
|
||||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error {
|
|
||||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
|
|
||||||
durs := *pdurs
|
|
||||||
for i := 0; i < len(durs); i++ {
|
|
||||||
dproto := durationProto(durs[i])
|
|
||||||
data, err := Marshal(dproto)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
o.buf = append(o.buf, p.tagcode...)
|
|
||||||
o.EncodeRawBytes(data)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
66
vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
66
vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
@ -70,10 +70,6 @@ var (
|
|||||||
|
|
||||||
// ErrNil is the error returned if Marshal is called with nil.
|
// ErrNil is the error returned if Marshal is called with nil.
|
||||||
ErrNil = errors.New("proto: Marshal called with nil")
|
ErrNil = errors.New("proto: Marshal called with nil")
|
||||||
|
|
||||||
// ErrTooLarge is the error returned if Marshal is called with a
|
|
||||||
// message that encodes to >2GB.
|
|
||||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// The fundamental encoders that put bytes on the wire.
|
// The fundamental encoders that put bytes on the wire.
|
||||||
@ -82,10 +78,6 @@ var (
|
|||||||
|
|
||||||
const maxVarintBytes = 10 // maximum length of a varint
|
const maxVarintBytes = 10 // maximum length of a varint
|
||||||
|
|
||||||
// maxMarshalSize is the largest allowed size of an encoded protobuf,
|
|
||||||
// since C++ and Java use signed int32s for the size.
|
|
||||||
const maxMarshalSize = 1<<31 - 1
|
|
||||||
|
|
||||||
// EncodeVarint returns the varint encoding of x.
|
// EncodeVarint returns the varint encoding of x.
|
||||||
// This is the format for the
|
// This is the format for the
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
@ -234,6 +226,10 @@ func Marshal(pb Message) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
p := NewBuffer(nil)
|
p := NewBuffer(nil)
|
||||||
err := p.Marshal(pb)
|
err := p.Marshal(pb)
|
||||||
|
var state errorState
|
||||||
|
if err != nil && !state.shouldContinue(err, nil) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if p.buf == nil && err == nil {
|
if p.buf == nil && err == nil {
|
||||||
// Return a non-nil slice on success.
|
// Return a non-nil slice on success.
|
||||||
return []byte{}, nil
|
return []byte{}, nil
|
||||||
@ -262,8 +258,11 @@ func (p *Buffer) Marshal(pb Message) error {
|
|||||||
// Can the object marshal itself?
|
// Can the object marshal itself?
|
||||||
if m, ok := pb.(Marshaler); ok {
|
if m, ok := pb.(Marshaler); ok {
|
||||||
data, err := m.Marshal()
|
data, err := m.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
p.buf = append(p.buf, data...)
|
p.buf = append(p.buf, data...)
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
t, base, err := getbase(pb)
|
t, base, err := getbase(pb)
|
||||||
@ -275,12 +274,9 @@ func (p *Buffer) Marshal(pb Message) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if collectStats {
|
if collectStats {
|
||||||
(stats).Encode++ // Parens are to work around a goimports bug.
|
stats.Encode++
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(p.buf) > maxMarshalSize {
|
|
||||||
return ErrTooLarge
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -302,7 +298,7 @@ func Size(pb Message) (n int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if collectStats {
|
if collectStats {
|
||||||
(stats).Size++ // Parens are to work around a goimports bug.
|
stats.Size++
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
@ -1007,6 +1003,7 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
|
|||||||
if p.isMarshaler {
|
if p.isMarshaler {
|
||||||
m := structPointer_Interface(structp, p.stype).(Marshaler)
|
m := structPointer_Interface(structp, p.stype).(Marshaler)
|
||||||
data, _ := m.Marshal()
|
data, _ := m.Marshal()
|
||||||
|
n += len(p.tagcode)
|
||||||
n += sizeRawBytes(data)
|
n += sizeRawBytes(data)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1065,25 +1062,10 @@ func size_slice_struct_group(p *Properties, base structPointer) (n int) {
|
|||||||
|
|
||||||
// Encode an extension map.
|
// Encode an extension map.
|
||||||
func (o *Buffer) enc_map(p *Properties, base structPointer) error {
|
func (o *Buffer) enc_map(p *Properties, base structPointer) error {
|
||||||
exts := structPointer_ExtMap(base, p.field)
|
v := *structPointer_ExtMap(base, p.field)
|
||||||
if err := encodeExtensionsMap(*exts); err != nil {
|
if err := encodeExtensionMap(v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return o.enc_map_body(*exts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
|
|
||||||
exts := structPointer_Extensions(base, p.field)
|
|
||||||
if err := encodeExtensions(exts); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v, _ := exts.extensionsRead()
|
|
||||||
|
|
||||||
return o.enc_map_body(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) enc_map_body(v map[int32]Extension) error {
|
|
||||||
// Fast-path for common cases: zero or one extensions.
|
// Fast-path for common cases: zero or one extensions.
|
||||||
if len(v) <= 1 {
|
if len(v) <= 1 {
|
||||||
for _, e := range v {
|
for _, e := range v {
|
||||||
@ -1106,13 +1088,8 @@ func (o *Buffer) enc_map_body(v map[int32]Extension) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func size_map(p *Properties, base structPointer) int {
|
func size_map(p *Properties, base structPointer) int {
|
||||||
v := structPointer_ExtMap(base, p.field)
|
v := *structPointer_ExtMap(base, p.field)
|
||||||
return extensionsMapSize(*v)
|
return sizeExtensionMap(v)
|
||||||
}
|
|
||||||
|
|
||||||
func size_exts(p *Properties, base structPointer) int {
|
|
||||||
v := structPointer_Extensions(base, p.field)
|
|
||||||
return extensionsSize(v)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode a map field.
|
// Encode a map field.
|
||||||
@ -1141,7 +1118,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
|
|||||||
if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
|
if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
|
if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -1151,6 +1128,11 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
|
|||||||
for _, key := range v.MapKeys() {
|
for _, key := range v.MapKeys() {
|
||||||
val := v.MapIndex(key)
|
val := v.MapIndex(key)
|
||||||
|
|
||||||
|
// The only illegal map entry values are nil message pointers.
|
||||||
|
if val.Kind() == reflect.Ptr && val.IsNil() {
|
||||||
|
return errors.New("proto: map has nil element")
|
||||||
|
}
|
||||||
|
|
||||||
keycopy.Set(key)
|
keycopy.Set(key)
|
||||||
valcopy.Set(val)
|
valcopy.Set(val)
|
||||||
|
|
||||||
@ -1238,9 +1220,6 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(o.buf) > maxMarshalSize {
|
|
||||||
return ErrTooLarge
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1257,9 +1236,6 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
|
|||||||
// Add unrecognized fields at the end.
|
// Add unrecognized fields at the end.
|
||||||
if prop.unrecField.IsValid() {
|
if prop.unrecField.IsValid() {
|
||||||
v := *structPointer_Bytes(base, prop.unrecField)
|
v := *structPointer_Bytes(base, prop.unrecField)
|
||||||
if len(o.buf)+len(v) > maxMarshalSize {
|
|
||||||
return ErrTooLarge
|
|
||||||
}
|
|
||||||
if len(v) > 0 {
|
if len(v) > 0 {
|
||||||
o.buf = append(o.buf, v...)
|
o.buf = append(o.buf, v...)
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/gogo/protobuf/proto/encode_gogo.go
generated
vendored
6
vendor/github.com/gogo/protobuf/proto/encode_gogo.go
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Extensions for Protocol Buffers to create more go like structures.
|
||||||
//
|
//
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
// http://github.com/gogo/protobuf
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
//
|
//
|
||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
//
|
//
|
||||||
|
34
vendor/github.com/gogo/protobuf/proto/equal.go
generated
vendored
34
vendor/github.com/gogo/protobuf/proto/equal.go
generated
vendored
@ -54,17 +54,13 @@ Equality is defined in this way:
|
|||||||
in a proto3 .proto file, fields are not "set"; specifically,
|
in a proto3 .proto file, fields are not "set"; specifically,
|
||||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||||
- Two repeated fields are equal iff their lengths are the same,
|
- Two repeated fields are equal iff their lengths are the same,
|
||||||
and their corresponding elements are equal. Note a "bytes" field,
|
and their corresponding elements are equal (a "bytes" field,
|
||||||
although represented by []byte, is not a repeated field and the
|
although represented by []byte, is not a repeated field)
|
||||||
rule for the scalar fields described above applies.
|
|
||||||
- Two unset fields are equal.
|
- Two unset fields are equal.
|
||||||
- Two unknown field sets are equal if their current
|
- Two unknown field sets are equal if their current
|
||||||
encoded state is equal.
|
encoded state is equal.
|
||||||
- Two extension sets are equal iff they have corresponding
|
- Two extension sets are equal iff they have corresponding
|
||||||
elements that are pairwise equal.
|
elements that are pairwise equal.
|
||||||
- Two map fields are equal iff their lengths are the same,
|
|
||||||
and they contain the same set of elements. Zero-length map
|
|
||||||
fields are equal.
|
|
||||||
- Every other combination of things are not equal.
|
- Every other combination of things are not equal.
|
||||||
|
|
||||||
The return value is undefined if a and b are not protocol buffers.
|
The return value is undefined if a and b are not protocol buffers.
|
||||||
@ -125,16 +121,9 @@ func equalStruct(v1, v2 reflect.Value) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
|
||||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
|
||||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||||
em2 := v2.FieldByName("XXX_extensions")
|
em2 := v2.FieldByName("XXX_extensions")
|
||||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -195,13 +184,6 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
// Maps may have nil values in them, so check for nil.
|
|
||||||
if v1.IsNil() && v2.IsNil() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if v1.IsNil() != v2.IsNil() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||||
@ -241,14 +223,8 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// base is the struct type that the extensions are based on.
|
// base is the struct type that the extensions are based on.
|
||||||
// x1 and x2 are InternalExtensions.
|
// em1 and em2 are extension maps.
|
||||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||||
em1, _ := x1.extensionsRead()
|
|
||||||
em2, _ := x2.extensionsRead()
|
|
||||||
return equalExtMap(base, em1, em2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
|
||||||
if len(em1) != len(em2) {
|
if len(em1) != len(em2) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
412
vendor/github.com/gogo/protobuf/proto/extensions.go
generated
vendored
412
vendor/github.com/gogo/protobuf/proto/extensions.go
generated
vendored
@ -52,112 +52,23 @@ type ExtensionRange struct {
|
|||||||
Start, End int32 // both inclusive
|
Start, End int32 // both inclusive
|
||||||
}
|
}
|
||||||
|
|
||||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
// extendableProto is an interface implemented by any protocol buffer that may be extended.
|
||||||
// proto compiler that may be extended.
|
|
||||||
type extendableProto interface {
|
type extendableProto interface {
|
||||||
Message
|
Message
|
||||||
ExtensionRangeArray() []ExtensionRange
|
ExtensionRangeArray() []ExtensionRange
|
||||||
extensionsWrite() map[int32]Extension
|
|
||||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
type extensionsMap interface {
|
||||||
// version of the proto compiler that may be extended.
|
extendableProto
|
||||||
type extendableProtoV1 interface {
|
|
||||||
Message
|
|
||||||
ExtensionRangeArray() []ExtensionRange
|
|
||||||
ExtensionMap() map[int32]Extension
|
ExtensionMap() map[int32]Extension
|
||||||
}
|
}
|
||||||
|
|
||||||
type extensionsBytes interface {
|
type extensionsBytes interface {
|
||||||
Message
|
extendableProto
|
||||||
ExtensionRangeArray() []ExtensionRange
|
|
||||||
GetExtensions() *[]byte
|
GetExtensions() *[]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
|
||||||
type extensionAdapter struct {
|
|
||||||
extendableProtoV1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
|
||||||
return e.ExtensionMap()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
|
||||||
return e.ExtensionMap(), notLocker{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
|
||||||
type notLocker struct{}
|
|
||||||
|
|
||||||
func (n notLocker) Lock() {}
|
|
||||||
func (n notLocker) Unlock() {}
|
|
||||||
|
|
||||||
// extendable returns the extendableProto interface for the given generated proto message.
|
|
||||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
|
||||||
// the extendableProto interface.
|
|
||||||
func extendable(p interface{}) (extendableProto, bool) {
|
|
||||||
if ep, ok := p.(extendableProto); ok {
|
|
||||||
return ep, ok
|
|
||||||
}
|
|
||||||
if ep, ok := p.(extendableProtoV1); ok {
|
|
||||||
return extensionAdapter{ep}, ok
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
|
||||||
//
|
|
||||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
|
||||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
|
||||||
//
|
|
||||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
|
||||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
|
||||||
type XXX_InternalExtensions struct {
|
|
||||||
// The struct must be indirect so that if a user inadvertently copies a
|
|
||||||
// generated message and its embedded XXX_InternalExtensions, they
|
|
||||||
// avoid the mayhem of a copied mutex.
|
|
||||||
//
|
|
||||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
|
||||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
|
||||||
// mutually exclusive with other accesses.
|
|
||||||
p *struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
extensionMap map[int32]Extension
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// extensionsWrite returns the extension map, creating it on first use.
|
|
||||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = new(struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
extensionMap map[int32]Extension
|
|
||||||
})
|
|
||||||
e.p.extensionMap = make(map[int32]Extension)
|
|
||||||
}
|
|
||||||
return e.p.extensionMap
|
|
||||||
}
|
|
||||||
|
|
||||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
|
||||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
|
||||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
|
||||||
if e.p == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return e.p.extensionMap, &e.p.mu
|
|
||||||
}
|
|
||||||
|
|
||||||
type extensionRange interface {
|
|
||||||
Message
|
|
||||||
ExtensionRangeArray() []ExtensionRange
|
|
||||||
}
|
|
||||||
|
|
||||||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
||||||
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
|
|
||||||
var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem()
|
|
||||||
var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem()
|
|
||||||
|
|
||||||
// ExtensionDesc represents an extension specification.
|
// ExtensionDesc represents an extension specification.
|
||||||
// Used in generated code from the protocol compiler.
|
// Used in generated code from the protocol compiler.
|
||||||
@ -190,23 +101,20 @@ type Extension struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetRawExtension is for testing only.
|
// SetRawExtension is for testing only.
|
||||||
func SetRawExtension(base Message, id int32, b []byte) {
|
func SetRawExtension(base extendableProto, id int32, b []byte) {
|
||||||
if ebase, ok := base.(extensionsBytes); ok {
|
if ebase, ok := base.(extensionsMap); ok {
|
||||||
|
ebase.ExtensionMap()[id] = Extension{enc: b}
|
||||||
|
} else if ebase, ok := base.(extensionsBytes); ok {
|
||||||
clearExtension(base, id)
|
clearExtension(base, id)
|
||||||
ext := ebase.GetExtensions()
|
ext := ebase.GetExtensions()
|
||||||
*ext = append(*ext, b...)
|
*ext = append(*ext, b...)
|
||||||
return
|
} else {
|
||||||
|
panic("unreachable")
|
||||||
}
|
}
|
||||||
epb, ok := extendable(base)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
extmap := epb.extensionsWrite()
|
|
||||||
extmap[id] = Extension{enc: b}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// isExtensionField returns true iff the given field number is in an extension range.
|
// isExtensionField returns true iff the given field number is in an extension range.
|
||||||
func isExtensionField(pb extensionRange, field int32) bool {
|
func isExtensionField(pb extendableProto, field int32) bool {
|
||||||
for _, er := range pb.ExtensionRangeArray() {
|
for _, er := range pb.ExtensionRangeArray() {
|
||||||
if er.Start <= field && field <= er.End {
|
if er.Start <= field && field <= er.End {
|
||||||
return true
|
return true
|
||||||
@ -217,12 +125,8 @@ func isExtensionField(pb extensionRange, field int32) bool {
|
|||||||
|
|
||||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||||
var pbi interface{} = pb
|
|
||||||
// Check the extended type.
|
// Check the extended type.
|
||||||
if ea, ok := pbi.(extensionAdapter); ok {
|
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||||
pbi = ea.extendableProtoV1
|
|
||||||
}
|
|
||||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
|
||||||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
||||||
}
|
}
|
||||||
// Check the range.
|
// Check the range.
|
||||||
@ -268,57 +172,43 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
|
|||||||
return prop
|
return prop
|
||||||
}
|
}
|
||||||
|
|
||||||
// encode encodes any unmarshaled (unencoded) extensions in e.
|
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
|
||||||
func encodeExtensions(e *XXX_InternalExtensions) error {
|
func encodeExtensionMap(m map[int32]Extension) error {
|
||||||
m, mu := e.extensionsRead()
|
|
||||||
if m == nil {
|
|
||||||
return nil // fast path
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
return encodeExtensionsMap(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode encodes any unmarshaled (unencoded) extensions in e.
|
|
||||||
func encodeExtensionsMap(m map[int32]Extension) error {
|
|
||||||
for k, e := range m {
|
for k, e := range m {
|
||||||
if e.value == nil || e.desc == nil {
|
err := encodeExtension(&e)
|
||||||
// Extension is only in its encoded form.
|
if err != nil {
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't skip extensions that have an encoded form set,
|
|
||||||
// because the extension value may have been mutated after
|
|
||||||
// the last time this function was called.
|
|
||||||
|
|
||||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
|
||||||
props := extensionProperties(e.desc)
|
|
||||||
|
|
||||||
p := NewBuffer(nil)
|
|
||||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
|
||||||
// Pass a *T with a zero field and hope it all works out.
|
|
||||||
x := reflect.New(et)
|
|
||||||
x.Elem().Set(reflect.ValueOf(e.value))
|
|
||||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
e.enc = p.buf
|
|
||||||
m[k] = e
|
m[k] = e
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func extensionsSize(e *XXX_InternalExtensions) (n int) {
|
func encodeExtension(e *Extension) error {
|
||||||
m, mu := e.extensionsRead()
|
if e.value == nil || e.desc == nil {
|
||||||
if m == nil {
|
// Extension is only in its encoded form.
|
||||||
return 0
|
return nil
|
||||||
}
|
}
|
||||||
mu.Lock()
|
// We don't skip extensions that have an encoded form set,
|
||||||
defer mu.Unlock()
|
// because the extension value may have been mutated after
|
||||||
return extensionsMapSize(m)
|
// the last time this function was called.
|
||||||
|
|
||||||
|
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||||
|
props := extensionProperties(e.desc)
|
||||||
|
|
||||||
|
p := NewBuffer(nil)
|
||||||
|
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||||
|
// Pass a *T with a zero field and hope it all works out.
|
||||||
|
x := reflect.New(et)
|
||||||
|
x.Elem().Set(reflect.ValueOf(e.value))
|
||||||
|
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
e.enc = p.buf
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func extensionsMapSize(m map[int32]Extension) (n int) {
|
func sizeExtensionMap(m map[int32]Extension) (n int) {
|
||||||
for _, e := range m {
|
for _, e := range m {
|
||||||
if e.value == nil || e.desc == nil {
|
if e.value == nil || e.desc == nil {
|
||||||
// Extension is only in its encoded form.
|
// Extension is only in its encoded form.
|
||||||
@ -343,8 +233,12 @@ func extensionsMapSize(m map[int32]Extension) (n int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// HasExtension returns whether the given extension is present in pb.
|
// HasExtension returns whether the given extension is present in pb.
|
||||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
|
||||||
if epb, doki := pb.(extensionsBytes); doki {
|
// TODO: Check types, field numbers, etc.?
|
||||||
|
if epb, doki := pb.(extensionsMap); doki {
|
||||||
|
_, ok := epb.ExtensionMap()[extension.Field]
|
||||||
|
return ok
|
||||||
|
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||||
ext := epb.GetExtensions()
|
ext := epb.GetExtensions()
|
||||||
buf := *ext
|
buf := *ext
|
||||||
o := 0
|
o := 0
|
||||||
@ -364,19 +258,7 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// TODO: Check types, field numbers, etc.?
|
panic("unreachable")
|
||||||
epb, ok := extendable(pb)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
extmap, mu := epb.extensionsRead()
|
|
||||||
if extmap == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
_, ok = extmap[extension.Field]
|
|
||||||
mu.Unlock()
|
|
||||||
return ok
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
|
func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
|
||||||
@ -399,32 +281,64 @@ func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
|
|||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClearExtension removes the given extension from pb.
|
func clearExtension(pb extendableProto, fieldNum int32) {
|
||||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
if epb, doki := pb.(extensionsMap); doki {
|
||||||
clearExtension(pb, extension.Field)
|
delete(epb.ExtensionMap(), fieldNum)
|
||||||
}
|
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||||
|
|
||||||
func clearExtension(pb Message, fieldNum int32) {
|
|
||||||
if epb, doki := pb.(extensionsBytes); doki {
|
|
||||||
offset := 0
|
offset := 0
|
||||||
for offset != -1 {
|
for offset != -1 {
|
||||||
offset = deleteExtension(epb, fieldNum, offset)
|
offset = deleteExtension(epb, fieldNum, offset)
|
||||||
}
|
}
|
||||||
return
|
} else {
|
||||||
}
|
panic("unreachable")
|
||||||
epb, ok := extendable(pb)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearExtension removes the given extension from pb.
|
||||||
|
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
|
||||||
// TODO: Check types, field numbers, etc.?
|
// TODO: Check types, field numbers, etc.?
|
||||||
extmap := epb.extensionsWrite()
|
clearExtension(pb, extension.Field)
|
||||||
delete(extmap, fieldNum)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetExtension parses and returns the given extension of pb.
|
// GetExtension parses and returns the given extension of pb.
|
||||||
// If the extension is not present and has no default value it returns ErrMissingExtension.
|
// If the extension is not present it returns ErrMissingExtension.
|
||||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
|
||||||
if epb, doki := pb.(extensionsBytes); doki {
|
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if epb, doki := pb.(extensionsMap); doki {
|
||||||
|
emap := epb.ExtensionMap()
|
||||||
|
e, ok := emap[extension.Field]
|
||||||
|
if !ok {
|
||||||
|
// defaultExtensionValue returns the default value or
|
||||||
|
// ErrMissingExtension if there is no default.
|
||||||
|
return defaultExtensionValue(extension)
|
||||||
|
}
|
||||||
|
if e.value != nil {
|
||||||
|
// Already decoded. Check the descriptor, though.
|
||||||
|
if e.desc != extension {
|
||||||
|
// This shouldn't happen. If it does, it means that
|
||||||
|
// GetExtension was called twice with two different
|
||||||
|
// descriptors with the same field number.
|
||||||
|
return nil, errors.New("proto: descriptor conflict")
|
||||||
|
}
|
||||||
|
return e.value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := decodeExtension(e.enc, extension)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remember the decoded version and drop the encoded version.
|
||||||
|
// That way it is safe to mutate what we return.
|
||||||
|
e.value = v
|
||||||
|
e.desc = extension
|
||||||
|
e.enc = nil
|
||||||
|
emap[extension.Field] = e
|
||||||
|
return e.value, nil
|
||||||
|
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||||
ext := epb.GetExtensions()
|
ext := epb.GetExtensions()
|
||||||
o := 0
|
o := 0
|
||||||
for o < len(*ext) {
|
for o < len(*ext) {
|
||||||
@ -446,50 +360,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return defaultExtensionValue(extension)
|
return defaultExtensionValue(extension)
|
||||||
}
|
}
|
||||||
epb, ok := extendable(pb)
|
panic("unreachable")
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("proto: not an extendable proto")
|
|
||||||
}
|
|
||||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
emap, mu := epb.extensionsRead()
|
|
||||||
if emap == nil {
|
|
||||||
return defaultExtensionValue(extension)
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
e, ok := emap[extension.Field]
|
|
||||||
if !ok {
|
|
||||||
// defaultExtensionValue returns the default value or
|
|
||||||
// ErrMissingExtension if there is no default.
|
|
||||||
return defaultExtensionValue(extension)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.value != nil {
|
|
||||||
// Already decoded. Check the descriptor, though.
|
|
||||||
if e.desc != extension {
|
|
||||||
// This shouldn't happen. If it does, it means that
|
|
||||||
// GetExtension was called twice with two different
|
|
||||||
// descriptors with the same field number.
|
|
||||||
return nil, errors.New("proto: descriptor conflict")
|
|
||||||
}
|
|
||||||
return e.value, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := decodeExtension(e.enc, extension)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remember the decoded version and drop the encoded version.
|
|
||||||
// That way it is safe to mutate what we return.
|
|
||||||
e.value = v
|
|
||||||
e.desc = extension
|
|
||||||
e.enc = nil
|
|
||||||
emap[extension.Field] = e
|
|
||||||
return e.value, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaultExtensionValue returns the default value for extension.
|
// defaultExtensionValue returns the default value for extension.
|
||||||
@ -563,9 +434,14 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
|||||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||||
|
epb, ok := pb.(extendableProto)
|
||||||
|
if !ok {
|
||||||
|
err = errors.New("proto: not an extendable proto")
|
||||||
|
return
|
||||||
|
}
|
||||||
extensions = make([]interface{}, len(es))
|
extensions = make([]interface{}, len(es))
|
||||||
for i, e := range es {
|
for i, e := range es {
|
||||||
extensions[i], err = GetExtension(pb, e)
|
extensions[i], err = GetExtension(epb, e)
|
||||||
if err == ErrMissingExtension {
|
if err == ErrMissingExtension {
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
@ -576,58 +452,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
|
||||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
|
||||||
// just the Field field, which defines the extension's field number.
|
|
||||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
|
||||||
epb, ok := extendable(pb)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
|
|
||||||
}
|
|
||||||
registeredExtensions := RegisteredExtensions(pb)
|
|
||||||
|
|
||||||
emap, mu := epb.extensionsRead()
|
|
||||||
if emap == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
|
||||||
for extid, e := range emap {
|
|
||||||
desc := e.desc
|
|
||||||
if desc == nil {
|
|
||||||
desc = registeredExtensions[extid]
|
|
||||||
if desc == nil {
|
|
||||||
desc = &ExtensionDesc{Field: extid}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
extensions = append(extensions, desc)
|
|
||||||
}
|
|
||||||
return extensions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetExtension sets the specified extension of pb to the specified value.
|
// SetExtension sets the specified extension of pb to the specified value.
|
||||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
|
||||||
if epb, doki := pb.(extensionsBytes); doki {
|
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||||
ClearExtension(pb, extension)
|
|
||||||
ext := epb.GetExtensions()
|
|
||||||
et := reflect.TypeOf(extension.ExtensionType)
|
|
||||||
props := extensionProperties(extension)
|
|
||||||
p := NewBuffer(nil)
|
|
||||||
x := reflect.New(et)
|
|
||||||
x.Elem().Set(reflect.ValueOf(value))
|
|
||||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*ext = append(*ext, p.buf...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
epb, ok := extendable(pb)
|
|
||||||
if !ok {
|
|
||||||
return errors.New("proto: not an extendable proto")
|
|
||||||
}
|
|
||||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
typ := reflect.TypeOf(extension.ExtensionType)
|
typ := reflect.TypeOf(extension.ExtensionType)
|
||||||
@ -642,27 +469,26 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
|
|||||||
if reflect.ValueOf(value).IsNil() {
|
if reflect.ValueOf(value).IsNil() {
|
||||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||||
}
|
}
|
||||||
|
return setExtension(pb, extension, value)
|
||||||
extmap := epb.extensionsWrite()
|
|
||||||
extmap[extension.Field] = Extension{desc: extension, value: value}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClearAllExtensions clears all extensions from pb.
|
func setExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
|
||||||
func ClearAllExtensions(pb Message) {
|
if epb, doki := pb.(extensionsMap); doki {
|
||||||
if epb, doki := pb.(extensionsBytes); doki {
|
epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
||||||
|
} else if epb, doki := pb.(extensionsBytes); doki {
|
||||||
|
ClearExtension(pb, extension)
|
||||||
ext := epb.GetExtensions()
|
ext := epb.GetExtensions()
|
||||||
*ext = []byte{}
|
et := reflect.TypeOf(extension.ExtensionType)
|
||||||
return
|
props := extensionProperties(extension)
|
||||||
}
|
p := NewBuffer(nil)
|
||||||
epb, ok := extendable(pb)
|
x := reflect.New(et)
|
||||||
if !ok {
|
x.Elem().Set(reflect.ValueOf(value))
|
||||||
return
|
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||||
}
|
return err
|
||||||
m := epb.extensionsWrite()
|
}
|
||||||
for k := range m {
|
*ext = append(*ext, p.buf...)
|
||||||
delete(m, k)
|
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// A global registry of extensions.
|
// A global registry of extensions.
|
||||||
|
88
vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
generated
vendored
88
vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
generated
vendored
@ -1,7 +1,5 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
//
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
|
||||||
// http://github.com/gogo/protobuf
|
|
||||||
//
|
//
|
||||||
// Redistribution and use in source and binary forms, with or without
|
// Redistribution and use in source and binary forms, with or without
|
||||||
// modification, are permitted provided that the following conditions are
|
// modification, are permitted provided that the following conditions are
|
||||||
@ -35,10 +33,9 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
|
func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool {
|
||||||
if reflect.ValueOf(pb).IsNil() {
|
if reflect.ValueOf(pb).IsNil() {
|
||||||
return ifnotset
|
return ifnotset
|
||||||
}
|
}
|
||||||
@ -63,12 +60,8 @@ func (this *Extension) Compare(that *Extension) int {
|
|||||||
return bytes.Compare(this.enc, that.enc)
|
return bytes.Compare(this.enc, that.enc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SizeOfInternalExtension(m extendableProto) (n int) {
|
|
||||||
return SizeOfExtensionMap(m.extensionsWrite())
|
|
||||||
}
|
|
||||||
|
|
||||||
func SizeOfExtensionMap(m map[int32]Extension) (n int) {
|
func SizeOfExtensionMap(m map[int32]Extension) (n int) {
|
||||||
return extensionsMapSize(m)
|
return sizeExtensionMap(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
type sortableMapElem struct {
|
type sortableMapElem struct {
|
||||||
@ -101,10 +94,6 @@ func (this sortableExtensions) String() string {
|
|||||||
return "map[" + strings.Join(ss, ",") + "]"
|
return "map[" + strings.Join(ss, ",") + "]"
|
||||||
}
|
}
|
||||||
|
|
||||||
func StringFromInternalExtension(m extendableProto) string {
|
|
||||||
return StringFromExtensionsMap(m.extensionsWrite())
|
|
||||||
}
|
|
||||||
|
|
||||||
func StringFromExtensionsMap(m map[int32]Extension) string {
|
func StringFromExtensionsMap(m map[int32]Extension) string {
|
||||||
return newSortableExtensionsFromMap(m).String()
|
return newSortableExtensionsFromMap(m).String()
|
||||||
}
|
}
|
||||||
@ -117,12 +106,8 @@ func StringFromExtensionsBytes(ext []byte) string {
|
|||||||
return StringFromExtensionsMap(m)
|
return StringFromExtensionsMap(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) {
|
|
||||||
return EncodeExtensionMap(m.extensionsWrite(), data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
|
func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
|
||||||
if err := encodeExtensionsMap(m); err != nil {
|
if err := encodeExtensionMap(m); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
keys := make([]int, 0, len(m))
|
keys := make([]int, 0, len(m))
|
||||||
@ -140,7 +125,7 @@ func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
|
|||||||
if m[id].value == nil || m[id].desc == nil {
|
if m[id].value == nil || m[id].desc == nil {
|
||||||
return m[id].enc, nil
|
return m[id].enc, nil
|
||||||
}
|
}
|
||||||
if err := encodeExtensionsMap(m); err != nil {
|
if err := encodeExtensionMap(m); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return m[id].enc, nil
|
return m[id].enc, nil
|
||||||
@ -204,42 +189,15 @@ func NewExtension(e []byte) Extension {
|
|||||||
return ee
|
return ee
|
||||||
}
|
}
|
||||||
|
|
||||||
func AppendExtension(e Message, tag int32, buf []byte) {
|
func AppendExtension(e extendableProto, tag int32, buf []byte) {
|
||||||
if ee, eok := e.(extensionsBytes); eok {
|
if ee, eok := e.(extensionsMap); eok {
|
||||||
|
ext := ee.ExtensionMap()[int32(tag)] // may be missing
|
||||||
|
ext.enc = append(ext.enc, buf...)
|
||||||
|
ee.ExtensionMap()[int32(tag)] = ext
|
||||||
|
} else if ee, eok := e.(extensionsBytes); eok {
|
||||||
ext := ee.GetExtensions()
|
ext := ee.GetExtensions()
|
||||||
*ext = append(*ext, buf...)
|
*ext = append(*ext, buf...)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
if ee, eok := e.(extendableProto); eok {
|
|
||||||
m := ee.extensionsWrite()
|
|
||||||
ext := m[int32(tag)] // may be missing
|
|
||||||
ext.enc = append(ext.enc, buf...)
|
|
||||||
m[int32(tag)] = ext
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeExtension(e *Extension) error {
|
|
||||||
if e.value == nil || e.desc == nil {
|
|
||||||
// Extension is only in its encoded form.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// We don't skip extensions that have an encoded form set,
|
|
||||||
// because the extension value may have been mutated after
|
|
||||||
// the last time this function was called.
|
|
||||||
|
|
||||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
|
||||||
props := extensionProperties(e.desc)
|
|
||||||
|
|
||||||
p := NewBuffer(nil)
|
|
||||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
|
||||||
// Pass a *T with a zero field and hope it all works out.
|
|
||||||
x := reflect.New(et)
|
|
||||||
x.Elem().Set(reflect.ValueOf(e.value))
|
|
||||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
e.enc = p.buf
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (this Extension) GoString() string {
|
func (this Extension) GoString() string {
|
||||||
@ -251,7 +209,7 @@ func (this Extension) GoString() string {
|
|||||||
return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
|
return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
|
func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) error {
|
||||||
typ := reflect.TypeOf(pb).Elem()
|
typ := reflect.TypeOf(pb).Elem()
|
||||||
ext, ok := extensionMaps[typ]
|
ext, ok := extensionMaps[typ]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -261,10 +219,10 @@ func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return errors.New("proto: bad extension number; not in declared ranges")
|
return errors.New("proto: bad extension number; not in declared ranges")
|
||||||
}
|
}
|
||||||
return SetExtension(pb, desc, value)
|
return setExtension(pb, desc, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
|
func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error) {
|
||||||
typ := reflect.TypeOf(pb).Elem()
|
typ := reflect.TypeOf(pb).Elem()
|
||||||
ext, ok := extensionMaps[typ]
|
ext, ok := extensionMaps[typ]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -276,19 +234,3 @@ func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return GetExtension(pb, desc)
|
return GetExtension(pb, desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions {
|
|
||||||
x := &XXX_InternalExtensions{
|
|
||||||
p: new(struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
extensionMap map[int32]Extension
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
x.p.extensionMap = m
|
|
||||||
return *x
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
|
|
||||||
pb := extendable.(extendableProto)
|
|
||||||
return pb.extensionsWrite()
|
|
||||||
}
|
|
||||||
|
6
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
6
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
@ -308,7 +308,7 @@ func GetStats() Stats { return stats }
|
|||||||
// temporary Buffer and are fine for most applications.
|
// temporary Buffer and are fine for most applications.
|
||||||
type Buffer struct {
|
type Buffer struct {
|
||||||
buf []byte // encode/decode byte stream
|
buf []byte // encode/decode byte stream
|
||||||
index int // read point
|
index int // write point
|
||||||
|
|
||||||
// pools of basic types to amortize allocation.
|
// pools of basic types to amortize allocation.
|
||||||
bools []bool
|
bools []bool
|
||||||
@ -889,10 +889,6 @@ func isProto3Zero(v reflect.Value) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
|
||||||
const GoGoProtoPackageIsVersion2 = true
|
|
||||||
|
|
||||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
const GoGoProtoPackageIsVersion1 = true
|
const GoGoProtoPackageIsVersion1 = true
|
||||||
|
6
vendor/github.com/gogo/protobuf/proto/lib_gogo.go
generated
vendored
6
vendor/github.com/gogo/protobuf/proto/lib_gogo.go
generated
vendored
@ -1,7 +1,5 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
//
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
|
||||||
// http://github.com/gogo/protobuf
|
|
||||||
//
|
//
|
||||||
// Redistribution and use in source and binary forms, with or without
|
// Redistribution and use in source and binary forms, with or without
|
||||||
// modification, are permitted provided that the following conditions are
|
// modification, are permitted provided that the following conditions are
|
||||||
|
43
vendor/github.com/gogo/protobuf/proto/message_set.go
generated
vendored
43
vendor/github.com/gogo/protobuf/proto/message_set.go
generated
vendored
@ -149,21 +149,9 @@ func skipVarint(buf []byte) []byte {
|
|||||||
|
|
||||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
|
||||||
var m map[int32]Extension
|
if err := encodeExtensionMap(m); err != nil {
|
||||||
switch exts := exts.(type) {
|
return nil, err
|
||||||
case *XXX_InternalExtensions:
|
|
||||||
if err := encodeExtensions(exts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m, _ = exts.extensionsRead()
|
|
||||||
case map[int32]Extension:
|
|
||||||
if err := encodeExtensionsMap(exts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m = exts
|
|
||||||
default:
|
|
||||||
return nil, errors.New("proto: not an extension map")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort extension IDs to provide a deterministic encoding.
|
// Sort extension IDs to provide a deterministic encoding.
|
||||||
@ -190,17 +178,7 @@ func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
|||||||
|
|
||||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
|
||||||
var m map[int32]Extension
|
|
||||||
switch exts := exts.(type) {
|
|
||||||
case *XXX_InternalExtensions:
|
|
||||||
m = exts.extensionsWrite()
|
|
||||||
case map[int32]Extension:
|
|
||||||
m = exts
|
|
||||||
default:
|
|
||||||
return errors.New("proto: not an extension map")
|
|
||||||
}
|
|
||||||
|
|
||||||
ms := new(messageSet)
|
ms := new(messageSet)
|
||||||
if err := Unmarshal(buf, ms); err != nil {
|
if err := Unmarshal(buf, ms); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -231,16 +209,7 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
|||||||
|
|
||||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
||||||
var m map[int32]Extension
|
|
||||||
switch exts := exts.(type) {
|
|
||||||
case *XXX_InternalExtensions:
|
|
||||||
m, _ = exts.extensionsRead()
|
|
||||||
case map[int32]Extension:
|
|
||||||
m = exts
|
|
||||||
default:
|
|
||||||
return nil, errors.New("proto: not an extension map")
|
|
||||||
}
|
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
b.WriteByte('{')
|
b.WriteByte('{')
|
||||||
|
|
||||||
@ -283,7 +252,7 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
|||||||
|
|
||||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
|
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
|
||||||
// Common-case fast path.
|
// Common-case fast path.
|
||||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||||
return nil
|
return nil
|
||||||
|
7
vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
generated
vendored
7
vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
generated
vendored
@ -29,7 +29,7 @@
|
|||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
// +build appengine js
|
// +build appengine
|
||||||
|
|
||||||
// This file contains an implementation of proto field accesses using package reflect.
|
// This file contains an implementation of proto field accesses using package reflect.
|
||||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||||
@ -139,11 +139,6 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
|
|||||||
return structPointer_ifield(p, f).(*[]string)
|
return structPointer_ifield(p, f).(*[]string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extensions returns the address of an extension map field in the struct.
|
|
||||||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
|
||||||
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtMap returns the address of an extension map field in the struct.
|
// ExtMap returns the address of an extension map field in the struct.
|
||||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||||
return structPointer_ifield(p, f).(*map[int32]Extension)
|
return structPointer_ifield(p, f).(*map[int32]Extension)
|
||||||
|
6
vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
generated
vendored
6
vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
generated
vendored
@ -29,7 +29,7 @@
|
|||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
// +build !appengine,!js
|
// +build !appengine
|
||||||
|
|
||||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||||
|
|
||||||
@ -126,10 +126,6 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ExtMap returns the address of an extension map field in the struct.
|
// ExtMap returns the address of an extension map field in the struct.
|
||||||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
|
||||||
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||||
}
|
}
|
||||||
|
19
vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
generated
vendored
19
vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
generated
vendored
@ -1,7 +1,5 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
//
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
|
||||||
// http://github.com/gogo/protobuf
|
|
||||||
//
|
//
|
||||||
// Redistribution and use in source and binary forms, with or without
|
// Redistribution and use in source and binary forms, with or without
|
||||||
// modification, are permitted provided that the following conditions are
|
// modification, are permitted provided that the following conditions are
|
||||||
@ -72,13 +70,16 @@ func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) {
|
|||||||
|
|
||||||
func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
|
func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer {
|
||||||
size := typ.Elem().Size()
|
size := typ.Elem().Size()
|
||||||
|
|
||||||
oldHeader := structPointer_GetSliceHeader(base, f)
|
oldHeader := structPointer_GetSliceHeader(base, f)
|
||||||
oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem()
|
|
||||||
newLen := oldHeader.Len + 1
|
newLen := oldHeader.Len + 1
|
||||||
newSlice := reflect.MakeSlice(typ, newLen, newLen)
|
slice := reflect.MakeSlice(typ, newLen, newLen)
|
||||||
reflect.Copy(newSlice, oldSlice)
|
bas := toStructPointer(slice)
|
||||||
bas := toStructPointer(newSlice)
|
for i := 0; i < oldHeader.Len; i++ {
|
||||||
|
newElemptr := uintptr(bas) + uintptr(i)*size
|
||||||
|
oldElemptr := oldHeader.Data + uintptr(i)*size
|
||||||
|
copyUintPtr(oldElemptr, newElemptr, int(size))
|
||||||
|
}
|
||||||
|
|
||||||
oldHeader.Data = uintptr(bas)
|
oldHeader.Data = uintptr(bas)
|
||||||
oldHeader.Len = newLen
|
oldHeader.Len = newLen
|
||||||
oldHeader.Cap = newLen
|
oldHeader.Cap = newLen
|
||||||
|
93
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
93
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Extensions for Protocol Buffers to create more go like structures.
|
||||||
//
|
//
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
// http://github.com/gogo/protobuf
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
//
|
//
|
||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
//
|
//
|
||||||
@ -190,11 +190,10 @@ type Properties struct {
|
|||||||
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
||||||
oneof bool // whether this is a oneof field
|
oneof bool // whether this is a oneof field
|
||||||
|
|
||||||
Default string // default value
|
Default string // default value
|
||||||
HasDefault bool // whether an explicit default was provided
|
HasDefault bool // whether an explicit default was provided
|
||||||
CustomType string
|
CustomType string
|
||||||
StdTime bool
|
def_uint64 uint64
|
||||||
StdDuration bool
|
|
||||||
|
|
||||||
enc encoder
|
enc encoder
|
||||||
valEnc valueEncoder // set for bool and numeric types only
|
valEnc valueEncoder // set for bool and numeric types only
|
||||||
@ -341,10 +340,6 @@ func (p *Properties) Parse(s string) {
|
|||||||
p.OrigName = strings.Split(f, "=")[1]
|
p.OrigName = strings.Split(f, "=")[1]
|
||||||
case strings.HasPrefix(f, "customtype="):
|
case strings.HasPrefix(f, "customtype="):
|
||||||
p.CustomType = strings.Split(f, "=")[1]
|
p.CustomType = strings.Split(f, "=")[1]
|
||||||
case f == "stdtime":
|
|
||||||
p.StdTime = true
|
|
||||||
case f == "stdduration":
|
|
||||||
p.StdDuration = true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -360,22 +355,11 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
|||||||
p.enc = nil
|
p.enc = nil
|
||||||
p.dec = nil
|
p.dec = nil
|
||||||
p.size = nil
|
p.size = nil
|
||||||
isMap := typ.Kind() == reflect.Map
|
if len(p.CustomType) > 0 {
|
||||||
if len(p.CustomType) > 0 && !isMap {
|
|
||||||
p.setCustomEncAndDec(typ)
|
p.setCustomEncAndDec(typ)
|
||||||
p.setTag(lockGetProp)
|
p.setTag(lockGetProp)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if p.StdTime && !isMap {
|
|
||||||
p.setTimeEncAndDec(typ)
|
|
||||||
p.setTag(lockGetProp)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if p.StdDuration && !isMap {
|
|
||||||
p.setDurationEncAndDec(typ)
|
|
||||||
p.setTag(lockGetProp)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch t1 := typ; t1.Kind() {
|
switch t1 := typ; t1.Kind() {
|
||||||
default:
|
default:
|
||||||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
||||||
@ -558,13 +542,17 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
|||||||
p.dec = (*Buffer).dec_slice_int64
|
p.dec = (*Buffer).dec_slice_int64
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||||
case reflect.Uint8:
|
case reflect.Uint8:
|
||||||
|
p.enc = (*Buffer).enc_slice_byte
|
||||||
p.dec = (*Buffer).dec_slice_byte
|
p.dec = (*Buffer).dec_slice_byte
|
||||||
if p.proto3 {
|
p.size = size_slice_byte
|
||||||
|
// This is a []byte, which is either a bytes field,
|
||||||
|
// or the value of a map field. In the latter case,
|
||||||
|
// we always encode an empty []byte, so we should not
|
||||||
|
// use the proto3 enc/size funcs.
|
||||||
|
// f == nil iff this is the key/value of a map field.
|
||||||
|
if p.proto3 && f != nil {
|
||||||
p.enc = (*Buffer).enc_proto3_slice_byte
|
p.enc = (*Buffer).enc_proto3_slice_byte
|
||||||
p.size = size_proto3_slice_byte
|
p.size = size_proto3_slice_byte
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_byte
|
|
||||||
p.size = size_slice_byte
|
|
||||||
}
|
}
|
||||||
case reflect.Float32, reflect.Float64:
|
case reflect.Float32, reflect.Float64:
|
||||||
switch t2.Bits() {
|
switch t2.Bits() {
|
||||||
@ -646,10 +634,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
|||||||
// so we need encoders for the pointer to this type.
|
// so we need encoders for the pointer to this type.
|
||||||
vtype = reflect.PtrTo(vtype)
|
vtype = reflect.PtrTo(vtype)
|
||||||
}
|
}
|
||||||
|
|
||||||
p.mvalprop.CustomType = p.CustomType
|
|
||||||
p.mvalprop.StdDuration = p.StdDuration
|
|
||||||
p.mvalprop.StdTime = p.StdTime
|
|
||||||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||||
}
|
}
|
||||||
p.setTag(lockGetProp)
|
p.setTag(lockGetProp)
|
||||||
@ -760,9 +744,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||||||
propertiesMap[t] = prop
|
propertiesMap[t] = prop
|
||||||
|
|
||||||
// build properties
|
// build properties
|
||||||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
|
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
|
||||||
reflect.PtrTo(t).Implements(extendableProtoV1Type) ||
|
|
||||||
reflect.PtrTo(t).Implements(extendableBytesType)
|
|
||||||
prop.unrecField = invalidField
|
prop.unrecField = invalidField
|
||||||
prop.Prop = make([]*Properties, t.NumField())
|
prop.Prop = make([]*Properties, t.NumField())
|
||||||
prop.order = make([]int, t.NumField())
|
prop.order = make([]int, t.NumField())
|
||||||
@ -774,11 +756,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||||||
name := f.Name
|
name := f.Name
|
||||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||||
|
|
||||||
if f.Name == "XXX_InternalExtensions" { // special case
|
if f.Name == "XXX_extensions" { // special case
|
||||||
p.enc = (*Buffer).enc_exts
|
|
||||||
p.dec = nil // not needed
|
|
||||||
p.size = size_exts
|
|
||||||
} else if f.Name == "XXX_extensions" { // special case
|
|
||||||
if len(f.Tag.Get("protobuf")) > 0 {
|
if len(f.Tag.Get("protobuf")) > 0 {
|
||||||
p.enc = (*Buffer).enc_ext_slice_byte
|
p.enc = (*Buffer).enc_ext_slice_byte
|
||||||
p.dec = nil // not needed
|
p.dec = nil // not needed
|
||||||
@ -788,14 +766,13 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||||||
p.dec = nil // not needed
|
p.dec = nil // not needed
|
||||||
p.size = size_map
|
p.size = size_map
|
||||||
}
|
}
|
||||||
} else if f.Name == "XXX_unrecognized" { // special case
|
}
|
||||||
|
if f.Name == "XXX_unrecognized" { // special case
|
||||||
prop.unrecField = toField(&f)
|
prop.unrecField = toField(&f)
|
||||||
}
|
}
|
||||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
|
||||||
if oneof != "" {
|
if oneof {
|
||||||
isOneofMessage = true
|
isOneofMessage = true
|
||||||
// Oneof fields don't use the traditional protobuf tag.
|
|
||||||
p.OrigName = oneof
|
|
||||||
}
|
}
|
||||||
prop.Prop[i] = p
|
prop.Prop[i] = p
|
||||||
prop.order[i] = i
|
prop.order[i] = i
|
||||||
@ -806,7 +783,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||||||
}
|
}
|
||||||
print("\n")
|
print("\n")
|
||||||
}
|
}
|
||||||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
|
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
|
||||||
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
|
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -940,29 +917,7 @@ func RegisterType(x Message, name string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MessageName returns the fully-qualified proto name for the given message type.
|
// MessageName returns the fully-qualified proto name for the given message type.
|
||||||
func MessageName(x Message) string {
|
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
|
||||||
type xname interface {
|
|
||||||
XXX_MessageName() string
|
|
||||||
}
|
|
||||||
if m, ok := x.(xname); ok {
|
|
||||||
return m.XXX_MessageName()
|
|
||||||
}
|
|
||||||
return revProtoTypes[reflect.TypeOf(x)]
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageType returns the message type (pointer to struct) for a named message.
|
// MessageType returns the message type (pointer to struct) for a named message.
|
||||||
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
||||||
|
|
||||||
// A registry of all linked proto files.
|
|
||||||
var (
|
|
||||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
|
||||||
)
|
|
||||||
|
|
||||||
// RegisterFile is called from generated code and maps from the
|
|
||||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
|
||||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
|
||||||
protoFiles[filename] = fileDescriptor
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
|
||||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
|
||||||
|
51
vendor/github.com/gogo/protobuf/proto/properties_gogo.go
generated
vendored
51
vendor/github.com/gogo/protobuf/proto/properties_gogo.go
generated
vendored
@ -1,7 +1,5 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
//
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
|
||||||
// http://github.com/gogo/protobuf
|
|
||||||
//
|
//
|
||||||
// Redistribution and use in source and binary forms, with or without
|
// Redistribution and use in source and binary forms, with or without
|
||||||
// modification, are permitted provided that the following conditions are
|
// modification, are permitted provided that the following conditions are
|
||||||
@ -51,51 +49,6 @@ func (p *Properties) setCustomEncAndDec(typ reflect.Type) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Properties) setDurationEncAndDec(typ reflect.Type) {
|
|
||||||
if p.Repeated {
|
|
||||||
if typ.Elem().Kind() == reflect.Ptr {
|
|
||||||
p.enc = (*Buffer).enc_slice_duration
|
|
||||||
p.dec = (*Buffer).dec_slice_duration
|
|
||||||
p.size = size_slice_duration
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_ref_duration
|
|
||||||
p.dec = (*Buffer).dec_slice_ref_duration
|
|
||||||
p.size = size_slice_ref_duration
|
|
||||||
}
|
|
||||||
} else if typ.Kind() == reflect.Ptr {
|
|
||||||
p.enc = (*Buffer).enc_duration
|
|
||||||
p.dec = (*Buffer).dec_duration
|
|
||||||
p.size = size_duration
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_ref_duration
|
|
||||||
p.dec = (*Buffer).dec_ref_duration
|
|
||||||
p.size = size_ref_duration
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Properties) setTimeEncAndDec(typ reflect.Type) {
|
|
||||||
if p.Repeated {
|
|
||||||
if typ.Elem().Kind() == reflect.Ptr {
|
|
||||||
p.enc = (*Buffer).enc_slice_time
|
|
||||||
p.dec = (*Buffer).dec_slice_time
|
|
||||||
p.size = size_slice_time
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_ref_time
|
|
||||||
p.dec = (*Buffer).dec_slice_ref_time
|
|
||||||
p.size = size_slice_ref_time
|
|
||||||
}
|
|
||||||
} else if typ.Kind() == reflect.Ptr {
|
|
||||||
p.enc = (*Buffer).enc_time
|
|
||||||
p.dec = (*Buffer).dec_time
|
|
||||||
p.size = size_time
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_ref_time
|
|
||||||
p.dec = (*Buffer).dec_ref_time
|
|
||||||
p.size = size_ref_time
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
|
func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
|
||||||
t2 := typ.Elem()
|
t2 := typ.Elem()
|
||||||
p.sstype = typ
|
p.sstype = typ
|
||||||
|
6
vendor/github.com/gogo/protobuf/proto/skip_gogo.go
generated
vendored
6
vendor/github.com/gogo/protobuf/proto/skip_gogo.go
generated
vendored
@ -1,7 +1,5 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
//
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
|
||||||
// http://github.com/gogo/protobuf
|
|
||||||
//
|
//
|
||||||
// Redistribution and use in source and binary forms, with or without
|
// Redistribution and use in source and binary forms, with or without
|
||||||
// modification, are permitted provided that the following conditions are
|
// modification, are permitted provided that the following conditions are
|
||||||
|
213
vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
213
vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Extensions for Protocol Buffers to create more go like structures.
|
||||||
//
|
//
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
// http://github.com/gogo/protobuf
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
//
|
//
|
||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
//
|
//
|
||||||
@ -50,8 +50,6 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -161,7 +159,7 @@ func (w *textWriter) indent() { w.ind++ }
|
|||||||
|
|
||||||
func (w *textWriter) unindent() {
|
func (w *textWriter) unindent() {
|
||||||
if w.ind == 0 {
|
if w.ind == 0 {
|
||||||
log.Print("proto: textWriter unindented too far")
|
log.Printf("proto: textWriter unindented too far")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.ind--
|
w.ind--
|
||||||
@ -182,93 +180,7 @@ type raw interface {
|
|||||||
Bytes() []byte
|
Bytes() []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func requiresQuotes(u string) bool {
|
func writeStruct(w *textWriter, sv reflect.Value) error {
|
||||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
|
||||||
for _, ch := range u {
|
|
||||||
switch {
|
|
||||||
case ch == '.' || ch == '/' || ch == '_':
|
|
||||||
continue
|
|
||||||
case '0' <= ch && ch <= '9':
|
|
||||||
continue
|
|
||||||
case 'A' <= ch && ch <= 'Z':
|
|
||||||
continue
|
|
||||||
case 'a' <= ch && ch <= 'z':
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isAny reports whether sv is a google.protobuf.Any message
|
|
||||||
func isAny(sv reflect.Value) bool {
|
|
||||||
type wkt interface {
|
|
||||||
XXX_WellKnownType() string
|
|
||||||
}
|
|
||||||
t, ok := sv.Addr().Interface().(wkt)
|
|
||||||
return ok && t.XXX_WellKnownType() == "Any"
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
|
||||||
//
|
|
||||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
|
||||||
// required messages are not linked in).
|
|
||||||
//
|
|
||||||
// It returns (true, error) when sv was written in expanded format or an error
|
|
||||||
// was encountered.
|
|
||||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
|
||||||
turl := sv.FieldByName("TypeUrl")
|
|
||||||
val := sv.FieldByName("Value")
|
|
||||||
if !turl.IsValid() || !val.IsValid() {
|
|
||||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
|
||||||
}
|
|
||||||
|
|
||||||
b, ok := val.Interface().([]byte)
|
|
||||||
if !ok {
|
|
||||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(turl.String(), "/")
|
|
||||||
mt := MessageType(parts[len(parts)-1])
|
|
||||||
if mt == nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
m := reflect.New(mt.Elem())
|
|
||||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
w.Write([]byte("["))
|
|
||||||
u := turl.String()
|
|
||||||
if requiresQuotes(u) {
|
|
||||||
writeString(w, u)
|
|
||||||
} else {
|
|
||||||
w.Write([]byte(u))
|
|
||||||
}
|
|
||||||
if w.compact {
|
|
||||||
w.Write([]byte("]:<"))
|
|
||||||
} else {
|
|
||||||
w.Write([]byte("]: <\n"))
|
|
||||||
w.ind++
|
|
||||||
}
|
|
||||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
if w.compact {
|
|
||||||
w.Write([]byte("> "))
|
|
||||||
} else {
|
|
||||||
w.ind--
|
|
||||||
w.Write([]byte(">\n"))
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|
||||||
if tm.ExpandAny && isAny(sv) {
|
|
||||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
st := sv.Type()
|
st := sv.Type()
|
||||||
sprops := GetProperties(st)
|
sprops := GetProperties(st)
|
||||||
for i := 0; i < sv.NumField(); i++ {
|
for i := 0; i < sv.NumField(); i++ {
|
||||||
@ -321,10 +233,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(props.Enum) > 0 {
|
if len(props.Enum) > 0 {
|
||||||
if err := tm.writeEnum(w, v, props); err != nil {
|
if err := writeEnum(w, v, props); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if err := tm.writeAny(w, v, props); err != nil {
|
} else if err := writeAny(w, v, props); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
@ -366,7 +278,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
|
if err := writeAny(w, key, props.mkeyprop); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
@ -383,7 +295,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
|
if err := writeAny(w, val, props.mvalprop); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
@ -455,10 +367,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(props.Enum) > 0 {
|
if len(props.Enum) > 0 {
|
||||||
if err := tm.writeEnum(w, fv, props); err != nil {
|
if err := writeEnum(w, fv, props); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if err := tm.writeAny(w, fv, props); err != nil {
|
} else if err := writeAny(w, fv, props); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -475,8 +387,8 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||||||
pv = reflect.New(sv.Type())
|
pv = reflect.New(sv.Type())
|
||||||
pv.Elem().Set(sv)
|
pv.Elem().Set(sv)
|
||||||
}
|
}
|
||||||
if pv.Type().Implements(extensionRangeType) {
|
if pv.Type().Implements(extendableProtoType) {
|
||||||
if err := tm.writeExtensions(w, pv); err != nil {
|
if err := writeExtensions(w, pv); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -506,45 +418,20 @@ func writeRaw(w *textWriter, b []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// writeAny writes an arbitrary field.
|
// writeAny writes an arbitrary field.
|
||||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||||
v = reflect.Indirect(v)
|
v = reflect.Indirect(v)
|
||||||
|
|
||||||
if props != nil {
|
if props != nil && len(props.CustomType) > 0 {
|
||||||
if len(props.CustomType) > 0 {
|
custom, ok := v.Interface().(Marshaler)
|
||||||
custom, ok := v.Interface().(Marshaler)
|
if ok {
|
||||||
if ok {
|
data, err := custom.Marshal()
|
||||||
data, err := custom.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := writeString(w, string(data)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
} else if props.StdTime {
|
|
||||||
t, ok := v.Interface().(time.Time)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface())
|
|
||||||
}
|
|
||||||
tproto, err := timestampProto(t)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
props.StdTime = false
|
if err := writeString(w, string(data)); err != nil {
|
||||||
err = tm.writeAny(w, reflect.ValueOf(tproto), props)
|
return err
|
||||||
props.StdTime = true
|
|
||||||
return err
|
|
||||||
} else if props.StdDuration {
|
|
||||||
d, ok := v.Interface().(time.Duration)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
|
|
||||||
}
|
}
|
||||||
dproto := durationProto(d)
|
return nil
|
||||||
props.StdDuration = false
|
|
||||||
err := tm.writeAny(w, reflect.ValueOf(dproto), props)
|
|
||||||
props.StdDuration = true
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -594,15 +481,15 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
w.indent()
|
w.indent()
|
||||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||||
text, err := etm.MarshalText()
|
text, err := tm.MarshalText()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err = w.Write(text); err != nil {
|
if _, err = w.Write(text); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if err := tm.writeStruct(w, v); err != nil {
|
} else if err := writeStruct(w, v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
w.unindent()
|
w.unindent()
|
||||||
@ -746,39 +633,30 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|||||||
|
|
||||||
// writeExtensions writes all the extensions in pv.
|
// writeExtensions writes all the extensions in pv.
|
||||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
func writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||||
emap := extensionMaps[pv.Type().Elem()]
|
emap := extensionMaps[pv.Type().Elem()]
|
||||||
e := pv.Interface().(Message)
|
ep := pv.Interface().(extendableProto)
|
||||||
|
|
||||||
|
// Order the extensions by ID.
|
||||||
|
// This isn't strictly necessary, but it will give us
|
||||||
|
// canonical output, which will also make testing easier.
|
||||||
var m map[int32]Extension
|
var m map[int32]Extension
|
||||||
var mu sync.Locker
|
if em, ok := ep.(extensionsMap); ok {
|
||||||
if em, ok := e.(extensionsBytes); ok {
|
m = em.ExtensionMap()
|
||||||
|
} else if em, ok := ep.(extensionsBytes); ok {
|
||||||
eb := em.GetExtensions()
|
eb := em.GetExtensions()
|
||||||
var err error
|
var err error
|
||||||
m, err = BytesToExtensionsMap(*eb)
|
m, err = BytesToExtensionsMap(*eb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
mu = notLocker{}
|
|
||||||
} else if _, ok := e.(extendableProto); ok {
|
|
||||||
ep, _ := extendable(e)
|
|
||||||
m, mu = ep.extensionsRead()
|
|
||||||
if m == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Order the extensions by ID.
|
|
||||||
// This isn't strictly necessary, but it will give us
|
|
||||||
// canonical output, which will also make testing easier.
|
|
||||||
|
|
||||||
mu.Lock()
|
|
||||||
ids := make([]int32, 0, len(m))
|
ids := make([]int32, 0, len(m))
|
||||||
for id := range m {
|
for id := range m {
|
||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
}
|
}
|
||||||
sort.Sort(int32Slice(ids))
|
sort.Sort(int32Slice(ids))
|
||||||
mu.Unlock()
|
|
||||||
|
|
||||||
for _, extNum := range ids {
|
for _, extNum := range ids {
|
||||||
ext := m[extNum]
|
ext := m[extNum]
|
||||||
@ -794,20 +672,20 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
pb, err := GetExtension(e, desc)
|
pb, err := GetExtension(ep, desc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed getting extension: %v", err)
|
return fmt.Errorf("failed getting extension: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Repeated extensions will appear as a slice.
|
// Repeated extensions will appear as a slice.
|
||||||
if !desc.repeated() {
|
if !desc.repeated() {
|
||||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
if err := writeExtension(w, desc.Name, pb); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
v := reflect.ValueOf(pb)
|
v := reflect.ValueOf(pb)
|
||||||
for i := 0; i < v.Len(); i++ {
|
for i := 0; i < v.Len(); i++ {
|
||||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -816,7 +694,7 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
func writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -825,7 +703,7 @@ func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
@ -852,13 +730,12 @@ func (w *textWriter) writeIndent() {
|
|||||||
|
|
||||||
// TextMarshaler is a configurable text format marshaler.
|
// TextMarshaler is a configurable text format marshaler.
|
||||||
type TextMarshaler struct {
|
type TextMarshaler struct {
|
||||||
Compact bool // use compact text format (one line).
|
Compact bool // use compact text format (one line).
|
||||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Marshal writes a given protocol buffer in text format.
|
// Marshal writes a given protocol buffer in text format.
|
||||||
// The only errors returned are from w.
|
// The only errors returned are from w.
|
||||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||||
val := reflect.ValueOf(pb)
|
val := reflect.ValueOf(pb)
|
||||||
if pb == nil || val.IsNil() {
|
if pb == nil || val.IsNil() {
|
||||||
w.Write([]byte("<nil>"))
|
w.Write([]byte("<nil>"))
|
||||||
@ -873,11 +750,11 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|||||||
aw := &textWriter{
|
aw := &textWriter{
|
||||||
w: ww,
|
w: ww,
|
||||||
complete: true,
|
complete: true,
|
||||||
compact: tm.Compact,
|
compact: m.Compact,
|
||||||
}
|
}
|
||||||
|
|
||||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
if tm, ok := pb.(encoding.TextMarshaler); ok {
|
||||||
text, err := etm.MarshalText()
|
text, err := tm.MarshalText()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -891,7 +768,7 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|||||||
}
|
}
|
||||||
// Dereference the received pointer so we don't have outer < and >.
|
// Dereference the received pointer so we don't have outer < and >.
|
||||||
v := reflect.Indirect(val)
|
v := reflect.Indirect(val)
|
||||||
if err := tm.writeStruct(aw, v); err != nil {
|
if err := writeStruct(aw, v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if bw != nil {
|
if bw != nil {
|
||||||
@ -901,9 +778,9 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Text is the same as Marshal, but returns the string directly.
|
// Text is the same as Marshal, but returns the string directly.
|
||||||
func (tm *TextMarshaler) Text(pb Message) string {
|
func (m *TextMarshaler) Text(pb Message) string {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
tm.Marshal(&buf, pb)
|
m.Marshal(&buf, pb)
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
12
vendor/github.com/gogo/protobuf/proto/text_gogo.go
generated
vendored
12
vendor/github.com/gogo/protobuf/proto/text_gogo.go
generated
vendored
@ -1,7 +1,5 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
//
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
|
||||||
// http://github.com/gogo/protobuf
|
|
||||||
//
|
//
|
||||||
// Redistribution and use in source and binary forms, with or without
|
// Redistribution and use in source and binary forms, with or without
|
||||||
// modification, are permitted provided that the following conditions are
|
// modification, are permitted provided that the following conditions are
|
||||||
@ -33,10 +31,10 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
|
func writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
|
||||||
m, ok := enumStringMaps[props.Enum]
|
m, ok := enumStringMaps[props.Enum]
|
||||||
if !ok {
|
if !ok {
|
||||||
if err := tm.writeAny(w, v, props); err != nil {
|
if err := writeAny(w, v, props); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -48,7 +46,7 @@ func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Proper
|
|||||||
}
|
}
|
||||||
s, ok := m[key]
|
s, ok := m[key]
|
||||||
if !ok {
|
if !ok {
|
||||||
if err := tm.writeAny(w, v, props); err != nil {
|
if err := writeAny(w, v, props); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
264
vendor/github.com/gogo/protobuf/proto/text_parser.go
generated
vendored
264
vendor/github.com/gogo/protobuf/proto/text_parser.go
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Extensions for Protocol Buffers to create more go like structures.
|
||||||
//
|
//
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
// http://github.com/gogo/protobuf
|
// http://github.com/gogo/protobuf/gogoproto
|
||||||
//
|
//
|
||||||
// Go support for Protocol Buffers - Google's data interchange format
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
//
|
//
|
||||||
@ -46,13 +46,9 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error string emitted when deserializing Any and fields are already set
|
|
||||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
|
||||||
|
|
||||||
type ParseError struct {
|
type ParseError struct {
|
||||||
Message string
|
Message string
|
||||||
Line int // 1-based line number
|
Line int // 1-based line number
|
||||||
@ -172,7 +168,7 @@ func (p *textParser) advance() {
|
|||||||
p.cur.offset, p.cur.line = p.offset, p.line
|
p.cur.offset, p.cur.line = p.offset, p.line
|
||||||
p.cur.unquoted = ""
|
p.cur.unquoted = ""
|
||||||
switch p.s[0] {
|
switch p.s[0] {
|
||||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
|
||||||
// Single symbol
|
// Single symbol
|
||||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||||
case '"', '\'':
|
case '"', '\'':
|
||||||
@ -460,10 +456,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||||||
fieldSet := make(map[string]bool)
|
fieldSet := make(map[string]bool)
|
||||||
// A struct is a sequence of "name: value", terminated by one of
|
// A struct is a sequence of "name: value", terminated by one of
|
||||||
// '>' or '}', or the end of the input. A name may also be
|
// '>' or '}', or the end of the input. A name may also be
|
||||||
// "[extension]" or "[type/url]".
|
// "[extension]".
|
||||||
//
|
|
||||||
// The whole struct can also be an expanded Any message, like:
|
|
||||||
// [type/url] < ... struct contents ... >
|
|
||||||
for {
|
for {
|
||||||
tok := p.next()
|
tok := p.next()
|
||||||
if tok.err != nil {
|
if tok.err != nil {
|
||||||
@ -473,74 +466,33 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
if tok.value == "[" {
|
if tok.value == "[" {
|
||||||
// Looks like an extension or an Any.
|
// Looks like an extension.
|
||||||
//
|
//
|
||||||
// TODO: Check whether we need to handle
|
// TODO: Check whether we need to handle
|
||||||
// namespace rooted names (e.g. ".something.Foo").
|
// namespace rooted names (e.g. ".something.Foo").
|
||||||
extName, err := p.consumeExtName()
|
tok = p.next()
|
||||||
if err != nil {
|
if tok.err != nil {
|
||||||
return err
|
return tok.err
|
||||||
}
|
}
|
||||||
|
|
||||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
|
||||||
// If it contains a slash, it's an Any type URL.
|
|
||||||
messageName := extName[s+1:]
|
|
||||||
mt := MessageType(messageName)
|
|
||||||
if mt == nil {
|
|
||||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
|
||||||
}
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
// consume an optional colon
|
|
||||||
if tok.value == ":" {
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
default:
|
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
v := reflect.New(mt.Elem())
|
|
||||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
|
||||||
return pe
|
|
||||||
}
|
|
||||||
b, err := Marshal(v.Interface().(Message))
|
|
||||||
if err != nil {
|
|
||||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
|
||||||
}
|
|
||||||
if fieldSet["type_url"] {
|
|
||||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
|
||||||
}
|
|
||||||
if fieldSet["value"] {
|
|
||||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
|
||||||
}
|
|
||||||
sv.FieldByName("TypeUrl").SetString(extName)
|
|
||||||
sv.FieldByName("Value").SetBytes(b)
|
|
||||||
fieldSet["type_url"] = true
|
|
||||||
fieldSet["value"] = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var desc *ExtensionDesc
|
var desc *ExtensionDesc
|
||||||
// This could be faster, but it's functional.
|
// This could be faster, but it's functional.
|
||||||
// TODO: Do something smarter than a linear scan.
|
// TODO: Do something smarter than a linear scan.
|
||||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||||
if d.Name == extName {
|
if d.Name == tok.value {
|
||||||
desc = d
|
desc = d
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if desc == nil {
|
if desc == nil {
|
||||||
return p.errorf("unrecognized extension %q", extName)
|
return p.errorf("unrecognized extension %q", tok.value)
|
||||||
|
}
|
||||||
|
// Check the extension terminator.
|
||||||
|
tok = p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
|
}
|
||||||
|
if tok.value != "]" {
|
||||||
|
return p.errorf("unrecognized extension terminator %q", tok.value)
|
||||||
}
|
}
|
||||||
|
|
||||||
props := &Properties{}
|
props := &Properties{}
|
||||||
@ -567,7 +519,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||||||
}
|
}
|
||||||
reqFieldErr = err
|
reqFieldErr = err
|
||||||
}
|
}
|
||||||
ep := sv.Addr().Interface().(Message)
|
ep := sv.Addr().Interface().(extendableProto)
|
||||||
if !rep {
|
if !rep {
|
||||||
SetExtension(ep, desc, ext.Interface())
|
SetExtension(ep, desc, ext.Interface())
|
||||||
} else {
|
} else {
|
||||||
@ -619,9 +571,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||||||
|
|
||||||
// The map entry should be this sequence of tokens:
|
// The map entry should be this sequence of tokens:
|
||||||
// < key : KEY value : VALUE >
|
// < key : KEY value : VALUE >
|
||||||
// However, implementations may omit key or value, and technically
|
// Technically the "key" and "value" could come in any order,
|
||||||
// we should support them in any order. See b/28924776 for a time
|
// but in practice they won't.
|
||||||
// this went wrong.
|
|
||||||
|
|
||||||
tok := p.next()
|
tok := p.next()
|
||||||
var terminator string
|
var terminator string
|
||||||
@ -633,39 +584,32 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||||||
default:
|
default:
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
}
|
}
|
||||||
for {
|
if err := p.consumeToken("key"); err != nil {
|
||||||
tok := p.next()
|
return err
|
||||||
if tok.err != nil {
|
}
|
||||||
return tok.err
|
if err := p.consumeToken(":"); err != nil {
|
||||||
}
|
return err
|
||||||
if tok.value == terminator {
|
}
|
||||||
break
|
if err := p.readAny(key, props.mkeyprop); err != nil {
|
||||||
}
|
return err
|
||||||
switch tok.value {
|
}
|
||||||
case "key":
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
if err := p.consumeToken(":"); err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
if err := p.consumeToken("value"); err != nil {
|
||||||
if err := p.readAny(key, props.mkeyprop); err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
if err := p.readAny(val, props.mvalprop); err != nil {
|
||||||
case "value":
|
return err
|
||||||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
}
|
||||||
return err
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
}
|
return err
|
||||||
if err := p.readAny(val, props.mvalprop); err != nil {
|
}
|
||||||
return err
|
if err := p.consumeToken(terminator); err != nil {
|
||||||
}
|
return err
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
p.back()
|
|
||||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dst.SetMapIndex(key, val)
|
dst.SetMapIndex(key, val)
|
||||||
@ -688,8 +632,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
reqFieldErr = err
|
reqFieldErr = err
|
||||||
}
|
} else if props.Required {
|
||||||
if props.Required {
|
|
||||||
reqCount--
|
reqCount--
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -705,35 +648,6 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||||||
return reqFieldErr
|
return reqFieldErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
|
||||||
// following ']'. It returns the name or URL consumed.
|
|
||||||
func (p *textParser) consumeExtName() (string, error) {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return "", tok.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If extension name or type url is quoted, it's a single token.
|
|
||||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
|
||||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return name, p.consumeToken("]")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume everything up to "]"
|
|
||||||
var parts []string
|
|
||||||
for tok.value != "]" {
|
|
||||||
parts = append(parts, tok.value)
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(parts, ""), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||||
// It is used in readStruct to provide backward compatibility.
|
// It is used in readStruct to provide backward compatibility.
|
||||||
func (p *textParser) consumeOptionalSeparator() error {
|
func (p *textParser) consumeOptionalSeparator() error {
|
||||||
@ -794,80 +708,6 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if props.StdTime {
|
|
||||||
fv := v
|
|
||||||
p.back()
|
|
||||||
props.StdTime = false
|
|
||||||
tproto := ×tamp{}
|
|
||||||
err := p.readAny(reflect.ValueOf(tproto).Elem(), props)
|
|
||||||
props.StdTime = true
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tim, err := timestampFromProto(tproto)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if props.Repeated {
|
|
||||||
t := reflect.TypeOf(v.Interface())
|
|
||||||
if t.Kind() == reflect.Slice {
|
|
||||||
if t.Elem().Kind() == reflect.Ptr {
|
|
||||||
ts := fv.Interface().([]*time.Time)
|
|
||||||
ts = append(ts, &tim)
|
|
||||||
fv.Set(reflect.ValueOf(ts))
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
ts := fv.Interface().([]time.Time)
|
|
||||||
ts = append(ts, tim)
|
|
||||||
fv.Set(reflect.ValueOf(ts))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
|
|
||||||
v.Set(reflect.ValueOf(&tim))
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Indirect(reflect.ValueOf(&tim)))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if props.StdDuration {
|
|
||||||
fv := v
|
|
||||||
p.back()
|
|
||||||
props.StdDuration = false
|
|
||||||
dproto := &duration{}
|
|
||||||
err := p.readAny(reflect.ValueOf(dproto).Elem(), props)
|
|
||||||
props.StdDuration = true
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dur, err := durationFromProto(dproto)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if props.Repeated {
|
|
||||||
t := reflect.TypeOf(v.Interface())
|
|
||||||
if t.Kind() == reflect.Slice {
|
|
||||||
if t.Elem().Kind() == reflect.Ptr {
|
|
||||||
ds := fv.Interface().([]*time.Duration)
|
|
||||||
ds = append(ds, &dur)
|
|
||||||
fv.Set(reflect.ValueOf(ds))
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
ds := fv.Interface().([]time.Duration)
|
|
||||||
ds = append(ds, dur)
|
|
||||||
fv.Set(reflect.ValueOf(ds))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
|
|
||||||
v.Set(reflect.ValueOf(&dur))
|
|
||||||
} else {
|
|
||||||
v.Set(reflect.Indirect(reflect.ValueOf(&dur)))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch fv := v; fv.Kind() {
|
switch fv := v; fv.Kind() {
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
at := v.Type()
|
at := v.Type()
|
||||||
@ -910,12 +750,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|||||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
// true/1/t/True or false/f/0/False.
|
// Either "true", "false", 1 or 0.
|
||||||
switch tok.value {
|
switch tok.value {
|
||||||
case "true", "1", "t", "True":
|
case "true", "1":
|
||||||
fv.SetBool(true)
|
fv.SetBool(true)
|
||||||
return nil
|
return nil
|
||||||
case "false", "0", "f", "False":
|
case "false", "0":
|
||||||
fv.SetBool(false)
|
fv.SetBool(false)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
113
vendor/github.com/gogo/protobuf/proto/timestamp.go
generated
vendored
113
vendor/github.com/gogo/protobuf/proto/timestamp.go
generated
vendored
@ -1,113 +0,0 @@
|
|||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
// This file implements operations on google.protobuf.Timestamp.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Seconds field of the earliest valid Timestamp.
|
|
||||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
|
||||||
minValidSeconds = -62135596800
|
|
||||||
// Seconds field just after the latest valid Timestamp.
|
|
||||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
|
||||||
maxValidSeconds = 253402300800
|
|
||||||
)
|
|
||||||
|
|
||||||
// validateTimestamp determines whether a Timestamp is valid.
|
|
||||||
// A valid timestamp represents a time in the range
|
|
||||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
|
||||||
// in the range [0, 1e9).
|
|
||||||
//
|
|
||||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
|
||||||
// Otherwise, it returns an error that describes
|
|
||||||
// the problem.
|
|
||||||
//
|
|
||||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
|
||||||
func validateTimestamp(ts *timestamp) error {
|
|
||||||
if ts == nil {
|
|
||||||
return errors.New("timestamp: nil Timestamp")
|
|
||||||
}
|
|
||||||
if ts.Seconds < minValidSeconds {
|
|
||||||
return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
|
|
||||||
}
|
|
||||||
if ts.Seconds >= maxValidSeconds {
|
|
||||||
return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
|
|
||||||
}
|
|
||||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
|
||||||
return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
|
|
||||||
// It returns an error if the argument is invalid.
|
|
||||||
//
|
|
||||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
|
||||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
|
||||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
|
||||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
|
||||||
// do map to valid time.Times.
|
|
||||||
//
|
|
||||||
// A nil Timestamp returns an error. The first return value in that case is
|
|
||||||
// undefined.
|
|
||||||
func timestampFromProto(ts *timestamp) (time.Time, error) {
|
|
||||||
// Don't return the zero value on error, because corresponds to a valid
|
|
||||||
// timestamp. Instead return whatever time.Unix gives us.
|
|
||||||
var t time.Time
|
|
||||||
if ts == nil {
|
|
||||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
|
||||||
} else {
|
|
||||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
|
||||||
}
|
|
||||||
return t, validateTimestamp(ts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
|
||||||
// It returns an error if the resulting Timestamp is invalid.
|
|
||||||
func timestampProto(t time.Time) (*timestamp, error) {
|
|
||||||
seconds := t.Unix()
|
|
||||||
nanos := int32(t.Sub(time.Unix(seconds, 0)))
|
|
||||||
ts := ×tamp{
|
|
||||||
Seconds: seconds,
|
|
||||||
Nanos: nanos,
|
|
||||||
}
|
|
||||||
if err := validateTimestamp(ts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ts, nil
|
|
||||||
}
|
|
227
vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
generated
vendored
227
vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
generated
vendored
@ -1,227 +0,0 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
|
||||||
//
|
|
||||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
|
||||||
// http://github.com/gogo/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
|
||||||
|
|
||||||
type timestamp struct {
|
|
||||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *timestamp) Reset() { *m = timestamp{} }
|
|
||||||
func (*timestamp) ProtoMessage() {}
|
|
||||||
func (*timestamp) String() string { return "timestamp<string>" }
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) decTimestamp() (time.Time, error) {
|
|
||||||
b, err := o.DecodeRawBytes(true)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
tproto := ×tamp{}
|
|
||||||
if err := Unmarshal(b, tproto); err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
return timestampFromProto(tproto)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_time(p *Properties, base structPointer) error {
|
|
||||||
t, err := o.decTimestamp()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setPtrCustomType(base, p.field, &t)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error {
|
|
||||||
t, err := o.decTimestamp()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setCustomType(base, p.field, &t)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error {
|
|
||||||
t, err := o.decTimestamp()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType)))
|
|
||||||
setPtrCustomType(newBas, 0, &t)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error {
|
|
||||||
t, err := o.decTimestamp()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType))
|
|
||||||
setCustomType(newBas, 0, &t)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func size_time(p *Properties, base structPointer) (n int) {
|
|
||||||
structp := structPointer_GetStructPointer(base, p.field)
|
|
||||||
if structPointer_IsNil(structp) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
tim := structPointer_Interface(structp, timeType).(*time.Time)
|
|
||||||
t, err := timestampProto(*tim)
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
size := Size(t)
|
|
||||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) enc_time(p *Properties, base structPointer) error {
|
|
||||||
structp := structPointer_GetStructPointer(base, p.field)
|
|
||||||
if structPointer_IsNil(structp) {
|
|
||||||
return ErrNil
|
|
||||||
}
|
|
||||||
tim := structPointer_Interface(structp, timeType).(*time.Time)
|
|
||||||
t, err := timestampProto(*tim)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
data, err := Marshal(t)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
o.buf = append(o.buf, p.tagcode...)
|
|
||||||
o.EncodeRawBytes(data)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func size_ref_time(p *Properties, base structPointer) (n int) {
|
|
||||||
tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
|
|
||||||
t, err := timestampProto(*tim)
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
size := Size(t)
|
|
||||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error {
|
|
||||||
tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
|
|
||||||
t, err := timestampProto(*tim)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
data, err := Marshal(t)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
o.buf = append(o.buf, p.tagcode...)
|
|
||||||
o.EncodeRawBytes(data)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func size_slice_time(p *Properties, base structPointer) (n int) {
|
|
||||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
|
|
||||||
tims := *ptims
|
|
||||||
for i := 0; i < len(tims); i++ {
|
|
||||||
if tims[i] == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
tproto, err := timestampProto(*tims[i])
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
size := Size(tproto)
|
|
||||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error {
|
|
||||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
|
|
||||||
tims := *ptims
|
|
||||||
for i := 0; i < len(tims); i++ {
|
|
||||||
if tims[i] == nil {
|
|
||||||
return errRepeatedHasNil
|
|
||||||
}
|
|
||||||
tproto, err := timestampProto(*tims[i])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
data, err := Marshal(tproto)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
o.buf = append(o.buf, p.tagcode...)
|
|
||||||
o.EncodeRawBytes(data)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func size_slice_ref_time(p *Properties, base structPointer) (n int) {
|
|
||||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
|
|
||||||
tims := *ptims
|
|
||||||
for i := 0; i < len(tims); i++ {
|
|
||||||
tproto, err := timestampProto(tims[i])
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
size := Size(tproto)
|
|
||||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error {
|
|
||||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
|
|
||||||
tims := *ptims
|
|
||||||
for i := 0; i < len(tims); i++ {
|
|
||||||
tproto, err := timestampProto(tims[i])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
data, err := Marshal(tproto)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
o.buf = append(o.buf, p.tagcode...)
|
|
||||||
o.EncodeRawBytes(data)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
4
vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
generated
vendored
4
vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
generated
vendored
@ -1,6 +1,4 @@
|
|||||||
// Protocol Buffers for Go with Gadgets
|
// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved.
|
||||||
//
|
|
||||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
|
||||||
// http://github.com/gogo/protobuf
|
// http://github.com/gogo/protobuf
|
||||||
//
|
//
|
||||||
// Redistribution and use in source and binary forms, with or without
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
2
vendor/github.com/golang/glog/README
generated
vendored
2
vendor/github.com/golang/glog/README
generated
vendored
@ -5,7 +5,7 @@ Leveled execution logs for Go.
|
|||||||
|
|
||||||
This is an efficient pure Go implementation of leveled logs in the
|
This is an efficient pure Go implementation of leveled logs in the
|
||||||
manner of the open source C++ package
|
manner of the open source C++ package
|
||||||
https://github.com/google/glog
|
http://code.google.com/p/google-glog
|
||||||
|
|
||||||
By binding methods to booleans it is possible to use the log package
|
By binding methods to booleans it is possible to use the log package
|
||||||
without paying the expense of evaluating the arguments to the log.
|
without paying the expense of evaluating the arguments to the log.
|
||||||
|
5
vendor/github.com/golang/glog/glog.go
generated
vendored
5
vendor/github.com/golang/glog/glog.go
generated
vendored
@ -676,10 +676,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
data := buf.Bytes()
|
data := buf.Bytes()
|
||||||
if !flag.Parsed() {
|
if l.toStderr {
|
||||||
os.Stderr.Write([]byte("ERROR: logging before flag.Parse: "))
|
|
||||||
os.Stderr.Write(data)
|
|
||||||
} else if l.toStderr {
|
|
||||||
os.Stderr.Write(data)
|
os.Stderr.Write(data)
|
||||||
} else {
|
} else {
|
||||||
if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
|
if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
|
||||||
|
117
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
117
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
@ -61,6 +61,7 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
|
|||||||
// int32, int64, uint32, uint64, bool, and enum
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
// protocol buffer types.
|
// protocol buffer types.
|
||||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||||
|
// x, n already 0
|
||||||
for shift := uint(0); shift < 64; shift += 7 {
|
for shift := uint(0); shift < 64; shift += 7 {
|
||||||
if n >= len(buf) {
|
if n >= len(buf) {
|
||||||
return 0, 0
|
return 0, 0
|
||||||
@ -77,7 +78,13 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
|
|||||||
return 0, 0
|
return 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||||
|
// This is the format for the
|
||||||
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
|
// protocol buffer types.
|
||||||
|
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||||
|
// x, err already 0
|
||||||
|
|
||||||
i := p.index
|
i := p.index
|
||||||
l := len(p.buf)
|
l := len(p.buf)
|
||||||
|
|
||||||
@ -100,107 +107,6 @@ func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|
||||||
i := p.index
|
|
||||||
buf := p.buf
|
|
||||||
|
|
||||||
if i >= len(buf) {
|
|
||||||
return 0, io.ErrUnexpectedEOF
|
|
||||||
} else if buf[i] < 0x80 {
|
|
||||||
p.index++
|
|
||||||
return uint64(buf[i]), nil
|
|
||||||
} else if len(buf)-i < 10 {
|
|
||||||
return p.decodeVarintSlow()
|
|
||||||
}
|
|
||||||
|
|
||||||
var b uint64
|
|
||||||
// we already checked the first byte
|
|
||||||
x = uint64(buf[i]) - 0x80
|
|
||||||
i++
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 7
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 7
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 14
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 14
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 21
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 21
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 28
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 28
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 35
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 35
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 42
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 42
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 49
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 49
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 56
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
x -= 0x80 << 56
|
|
||||||
|
|
||||||
b = uint64(buf[i])
|
|
||||||
i++
|
|
||||||
x += b << 63
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
goto done
|
|
||||||
}
|
|
||||||
// x -= 0x80 << 63 // Always zero.
|
|
||||||
|
|
||||||
return 0, errOverflow
|
|
||||||
|
|
||||||
done:
|
|
||||||
p.index = i
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||||
// This is the format for the
|
// This is the format for the
|
||||||
// fixed64, sfixed64, and double protocol buffer types.
|
// fixed64, sfixed64, and double protocol buffer types.
|
||||||
@ -434,8 +340,6 @@ func (p *Buffer) DecodeGroup(pb Message) error {
|
|||||||
// Buffer and places the decoded result in pb. If the struct
|
// Buffer and places the decoded result in pb. If the struct
|
||||||
// underlying pb does not match the data in the buffer, the results can be
|
// underlying pb does not match the data in the buffer, the results can be
|
||||||
// unpredictable.
|
// unpredictable.
|
||||||
//
|
|
||||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
|
||||||
func (p *Buffer) Unmarshal(pb Message) error {
|
func (p *Buffer) Unmarshal(pb Message) error {
|
||||||
// If the object can unmarshal itself, let it.
|
// If the object can unmarshal itself, let it.
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
@ -474,11 +378,6 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
|
|||||||
wire := int(u & 0x7)
|
wire := int(u & 0x7)
|
||||||
if wire == WireEndGroup {
|
if wire == WireEndGroup {
|
||||||
if is_group {
|
if is_group {
|
||||||
if required > 0 {
|
|
||||||
// Not enough information to determine the exact field.
|
|
||||||
// (See below.)
|
|
||||||
return &RequiredNotSetError{"{Unknown}"}
|
|
||||||
}
|
|
||||||
return nil // input is satisfied
|
return nil // input is satisfied
|
||||||
}
|
}
|
||||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
||||||
|
14
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
14
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
@ -234,6 +234,10 @@ func Marshal(pb Message) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
p := NewBuffer(nil)
|
p := NewBuffer(nil)
|
||||||
err := p.Marshal(pb)
|
err := p.Marshal(pb)
|
||||||
|
var state errorState
|
||||||
|
if err != nil && !state.shouldContinue(err, nil) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if p.buf == nil && err == nil {
|
if p.buf == nil && err == nil {
|
||||||
// Return a non-nil slice on success.
|
// Return a non-nil slice on success.
|
||||||
return []byte{}, nil
|
return []byte{}, nil
|
||||||
@ -262,8 +266,11 @@ func (p *Buffer) Marshal(pb Message) error {
|
|||||||
// Can the object marshal itself?
|
// Can the object marshal itself?
|
||||||
if m, ok := pb.(Marshaler); ok {
|
if m, ok := pb.(Marshaler); ok {
|
||||||
data, err := m.Marshal()
|
data, err := m.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
p.buf = append(p.buf, data...)
|
p.buf = append(p.buf, data...)
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
t, base, err := getbase(pb)
|
t, base, err := getbase(pb)
|
||||||
@ -275,7 +282,7 @@ func (p *Buffer) Marshal(pb Message) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if collectStats {
|
if collectStats {
|
||||||
(stats).Encode++ // Parens are to work around a goimports bug.
|
stats.Encode++
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(p.buf) > maxMarshalSize {
|
if len(p.buf) > maxMarshalSize {
|
||||||
@ -302,7 +309,7 @@ func Size(pb Message) (n int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if collectStats {
|
if collectStats {
|
||||||
(stats).Size++ // Parens are to work around a goimports bug.
|
stats.Size++
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
@ -1007,6 +1014,7 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
|
|||||||
if p.isMarshaler {
|
if p.isMarshaler {
|
||||||
m := structPointer_Interface(structp, p.stype).(Marshaler)
|
m := structPointer_Interface(structp, p.stype).(Marshaler)
|
||||||
data, _ := m.Marshal()
|
data, _ := m.Marshal()
|
||||||
|
n += len(p.tagcode)
|
||||||
n += sizeRawBytes(data)
|
n += sizeRawBytes(data)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
8
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
8
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
@ -54,17 +54,13 @@ Equality is defined in this way:
|
|||||||
in a proto3 .proto file, fields are not "set"; specifically,
|
in a proto3 .proto file, fields are not "set"; specifically,
|
||||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||||
- Two repeated fields are equal iff their lengths are the same,
|
- Two repeated fields are equal iff their lengths are the same,
|
||||||
and their corresponding elements are equal. Note a "bytes" field,
|
and their corresponding elements are equal (a "bytes" field,
|
||||||
although represented by []byte, is not a repeated field and the
|
although represented by []byte, is not a repeated field)
|
||||||
rule for the scalar fields described above applies.
|
|
||||||
- Two unset fields are equal.
|
- Two unset fields are equal.
|
||||||
- Two unknown field sets are equal if their current
|
- Two unknown field sets are equal if their current
|
||||||
encoded state is equal.
|
encoded state is equal.
|
||||||
- Two extension sets are equal iff they have corresponding
|
- Two extension sets are equal iff they have corresponding
|
||||||
elements that are pairwise equal.
|
elements that are pairwise equal.
|
||||||
- Two map fields are equal iff their lengths are the same,
|
|
||||||
and they contain the same set of elements. Zero-length map
|
|
||||||
fields are equal.
|
|
||||||
- Every other combination of things are not equal.
|
- Every other combination of things are not equal.
|
||||||
|
|
||||||
The return value is undefined if a and b are not protocol buffers.
|
The return value is undefined if a and b are not protocol buffers.
|
||||||
|
31
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
31
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
@ -489,37 +489,6 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
|
||||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
|
||||||
// just the Field field, which defines the extension's field number.
|
|
||||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
|
||||||
epb, ok := extendable(pb)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
|
|
||||||
}
|
|
||||||
registeredExtensions := RegisteredExtensions(pb)
|
|
||||||
|
|
||||||
emap, mu := epb.extensionsRead()
|
|
||||||
if emap == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
|
||||||
for extid, e := range emap {
|
|
||||||
desc := e.desc
|
|
||||||
if desc == nil {
|
|
||||||
desc = registeredExtensions[extid]
|
|
||||||
if desc == nil {
|
|
||||||
desc = &ExtensionDesc{Field: extid}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
extensions = append(extensions, desc)
|
|
||||||
}
|
|
||||||
return extensions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetExtension sets the specified extension of pb to the specified value.
|
// SetExtension sets the specified extension of pb to the specified value.
|
||||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||||
epb, ok := extendable(pb)
|
epb, ok := extendable(pb)
|
||||||
|
2
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
2
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
@ -308,7 +308,7 @@ func GetStats() Stats { return stats }
|
|||||||
// temporary Buffer and are fine for most applications.
|
// temporary Buffer and are fine for most applications.
|
||||||
type Buffer struct {
|
type Buffer struct {
|
||||||
buf []byte // encode/decode byte stream
|
buf []byte // encode/decode byte stream
|
||||||
index int // read point
|
index int // write point
|
||||||
|
|
||||||
// pools of basic types to amortize allocation.
|
// pools of basic types to amortize allocation.
|
||||||
bools []bool
|
bools []bool
|
||||||
|
10
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
10
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
@ -844,15 +844,7 @@ func RegisterType(x Message, name string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MessageName returns the fully-qualified proto name for the given message type.
|
// MessageName returns the fully-qualified proto name for the given message type.
|
||||||
func MessageName(x Message) string {
|
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
|
||||||
type xname interface {
|
|
||||||
XXX_MessageName() string
|
|
||||||
}
|
|
||||||
if m, ok := x.(xname); ok {
|
|
||||||
return m.XXX_MessageName()
|
|
||||||
}
|
|
||||||
return revProtoTypes[reflect.TypeOf(x)]
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageType returns the message type (pointer to struct) for a named message.
|
// MessageType returns the message type (pointer to struct) for a named message.
|
||||||
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
||||||
|
2
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
2
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
@ -154,7 +154,7 @@ func (w *textWriter) indent() { w.ind++ }
|
|||||||
|
|
||||||
func (w *textWriter) unindent() {
|
func (w *textWriter) unindent() {
|
||||||
if w.ind == 0 {
|
if w.ind == 0 {
|
||||||
log.Print("proto: textWriter unindented too far")
|
log.Printf("proto: textWriter unindented too far")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.ind--
|
w.ind--
|
||||||
|
17
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
17
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
@ -44,9 +44,6 @@ import (
|
|||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error string emitted when deserializing Any and fields are already set
|
|
||||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
|
||||||
|
|
||||||
type ParseError struct {
|
type ParseError struct {
|
||||||
Message string
|
Message string
|
||||||
Line int // 1-based line number
|
Line int // 1-based line number
|
||||||
@ -511,16 +508,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||||
}
|
}
|
||||||
if fieldSet["type_url"] {
|
|
||||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
|
||||||
}
|
|
||||||
if fieldSet["value"] {
|
|
||||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
|
||||||
}
|
|
||||||
sv.FieldByName("TypeUrl").SetString(extName)
|
sv.FieldByName("TypeUrl").SetString(extName)
|
||||||
sv.FieldByName("Value").SetBytes(b)
|
sv.FieldByName("Value").SetBytes(b)
|
||||||
fieldSet["type_url"] = true
|
|
||||||
fieldSet["value"] = true
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -792,12 +781,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|||||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
// true/1/t/True or false/f/0/False.
|
// Either "true", "false", 1 or 0.
|
||||||
switch tok.value {
|
switch tok.value {
|
||||||
case "true", "1", "t", "True":
|
case "true", "1":
|
||||||
fv.SetBool(true)
|
fv.SetBool(true)
|
||||||
return nil
|
return nil
|
||||||
case "false", "0", "f", "False":
|
case "false", "0":
|
||||||
fv.SetBool(false)
|
fv.SetBool(false)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
190
vendor/github.com/google/cadvisor/LICENSE
generated
vendored
190
vendor/github.com/google/cadvisor/LICENSE
generated
vendored
@ -1,190 +0,0 @@
|
|||||||
Copyright 2014 The cAdvisor Authors
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
607
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
607
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@ -1,607 +0,0 @@
|
|||||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CpuSpec struct {
|
|
||||||
Limit uint64 `json:"limit"`
|
|
||||||
MaxLimit uint64 `json:"max_limit"`
|
|
||||||
Mask string `json:"mask,omitempty"`
|
|
||||||
Quota uint64 `json:"quota,omitempty"`
|
|
||||||
Period uint64 `json:"period,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type MemorySpec struct {
|
|
||||||
// The amount of memory requested. Default is unlimited (-1).
|
|
||||||
// Units: bytes.
|
|
||||||
Limit uint64 `json:"limit,omitempty"`
|
|
||||||
|
|
||||||
// The amount of guaranteed memory. Default is 0.
|
|
||||||
// Units: bytes.
|
|
||||||
Reservation uint64 `json:"reservation,omitempty"`
|
|
||||||
|
|
||||||
// The amount of swap space requested. Default is unlimited (-1).
|
|
||||||
// Units: bytes.
|
|
||||||
SwapLimit uint64 `json:"swap_limit,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ContainerSpec struct {
|
|
||||||
// Time at which the container was created.
|
|
||||||
CreationTime time.Time `json:"creation_time,omitempty"`
|
|
||||||
|
|
||||||
// Metadata labels associated with this container.
|
|
||||||
Labels map[string]string `json:"labels,omitempty"`
|
|
||||||
// Metadata envs associated with this container. Only whitelisted envs are added.
|
|
||||||
Envs map[string]string `json:"envs,omitempty"`
|
|
||||||
|
|
||||||
HasCpu bool `json:"has_cpu"`
|
|
||||||
Cpu CpuSpec `json:"cpu,omitempty"`
|
|
||||||
|
|
||||||
HasMemory bool `json:"has_memory"`
|
|
||||||
Memory MemorySpec `json:"memory,omitempty"`
|
|
||||||
|
|
||||||
HasNetwork bool `json:"has_network"`
|
|
||||||
|
|
||||||
HasFilesystem bool `json:"has_filesystem"`
|
|
||||||
|
|
||||||
// HasDiskIo when true, indicates that DiskIo stats will be available.
|
|
||||||
HasDiskIo bool `json:"has_diskio"`
|
|
||||||
|
|
||||||
HasCustomMetrics bool `json:"has_custom_metrics"`
|
|
||||||
CustomMetrics []MetricSpec `json:"custom_metrics,omitempty"`
|
|
||||||
|
|
||||||
// Image name used for this container.
|
|
||||||
Image string `json:"image,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Container reference contains enough information to uniquely identify a container
|
|
||||||
type ContainerReference struct {
|
|
||||||
// The container id
|
|
||||||
Id string `json:"id,omitempty"`
|
|
||||||
|
|
||||||
// The absolute name of the container. This is unique on the machine.
|
|
||||||
Name string `json:"name"`
|
|
||||||
|
|
||||||
// Other names by which the container is known within a certain namespace.
|
|
||||||
// This is unique within that namespace.
|
|
||||||
Aliases []string `json:"aliases,omitempty"`
|
|
||||||
|
|
||||||
// Namespace under which the aliases of a container are unique.
|
|
||||||
// An example of a namespace is "docker" for Docker containers.
|
|
||||||
Namespace string `json:"namespace,omitempty"`
|
|
||||||
|
|
||||||
Labels map[string]string `json:"labels,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sorts by container name.
|
|
||||||
type ContainerReferenceSlice []ContainerReference
|
|
||||||
|
|
||||||
func (self ContainerReferenceSlice) Len() int { return len(self) }
|
|
||||||
func (self ContainerReferenceSlice) Swap(i, j int) { self[i], self[j] = self[j], self[i] }
|
|
||||||
func (self ContainerReferenceSlice) Less(i, j int) bool { return self[i].Name < self[j].Name }
|
|
||||||
|
|
||||||
// ContainerInfoRequest is used when users check a container info from the REST API.
|
|
||||||
// It specifies how much data users want to get about a container
|
|
||||||
type ContainerInfoRequest struct {
|
|
||||||
// Max number of stats to return. Specify -1 for all stats currently available.
|
|
||||||
// Default: 60
|
|
||||||
NumStats int `json:"num_stats,omitempty"`
|
|
||||||
|
|
||||||
// Start time for which to query information.
|
|
||||||
// If ommitted, the beginning of time is assumed.
|
|
||||||
Start time.Time `json:"start,omitempty"`
|
|
||||||
|
|
||||||
// End time for which to query information.
|
|
||||||
// If ommitted, current time is assumed.
|
|
||||||
End time.Time `json:"end,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a ContainerInfoRequest with all default values specified.
|
|
||||||
func DefaultContainerInfoRequest() ContainerInfoRequest {
|
|
||||||
return ContainerInfoRequest{
|
|
||||||
NumStats: 60,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *ContainerInfoRequest) Equals(other ContainerInfoRequest) bool {
|
|
||||||
return self.NumStats == other.NumStats &&
|
|
||||||
self.Start.Equal(other.Start) &&
|
|
||||||
self.End.Equal(other.End)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ContainerInfo struct {
|
|
||||||
ContainerReference
|
|
||||||
|
|
||||||
// The direct subcontainers of the current container.
|
|
||||||
Subcontainers []ContainerReference `json:"subcontainers,omitempty"`
|
|
||||||
|
|
||||||
// The isolation used in the container.
|
|
||||||
Spec ContainerSpec `json:"spec,omitempty"`
|
|
||||||
|
|
||||||
// Historical statistics gathered from the container.
|
|
||||||
Stats []*ContainerStats `json:"stats,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(vmarmol): Refactor to not need this equality comparison.
|
|
||||||
// ContainerInfo may be (un)marshaled by json or other en/decoder. In that
|
|
||||||
// case, the Timestamp field in each stats/sample may not be precisely
|
|
||||||
// en/decoded. This will lead to small but acceptable differences between a
|
|
||||||
// ContainerInfo and its encode-then-decode version. Eq() is used to compare
|
|
||||||
// two ContainerInfo accepting small difference (<10ms) of Time fields.
|
|
||||||
func (self *ContainerInfo) Eq(b *ContainerInfo) bool {
|
|
||||||
|
|
||||||
// If both self and b are nil, then Eq() returns true
|
|
||||||
if self == nil {
|
|
||||||
return b == nil
|
|
||||||
}
|
|
||||||
if b == nil {
|
|
||||||
return self == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// For fields other than time.Time, we will compare them precisely.
|
|
||||||
// This would require that any slice should have same order.
|
|
||||||
if !reflect.DeepEqual(self.ContainerReference, b.ContainerReference) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(self.Subcontainers, b.Subcontainers) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !self.Spec.Eq(&b.Spec) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, expectedStats := range b.Stats {
|
|
||||||
selfStats := self.Stats[i]
|
|
||||||
if !expectedStats.Eq(selfStats) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *ContainerSpec) Eq(b *ContainerSpec) bool {
|
|
||||||
// Creation within 1s of each other.
|
|
||||||
diff := self.CreationTime.Sub(b.CreationTime)
|
|
||||||
if (diff > time.Second) || (diff < -time.Second) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.HasCpu != b.HasCpu {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(self.Cpu, b.Cpu) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if self.HasMemory != b.HasMemory {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(self.Memory, b.Memory) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if self.HasNetwork != b.HasNetwork {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if self.HasFilesystem != b.HasFilesystem {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if self.HasDiskIo != b.HasDiskIo {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if self.HasCustomMetrics != b.HasCustomMetrics {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *ContainerInfo) StatsAfter(ref time.Time) []*ContainerStats {
|
|
||||||
n := len(self.Stats) + 1
|
|
||||||
for i, s := range self.Stats {
|
|
||||||
if s.Timestamp.After(ref) {
|
|
||||||
n = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if n > len(self.Stats) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return self.Stats[n:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *ContainerInfo) StatsStartTime() time.Time {
|
|
||||||
var ret time.Time
|
|
||||||
for _, s := range self.Stats {
|
|
||||||
if s.Timestamp.Before(ret) || ret.IsZero() {
|
|
||||||
ret = s.Timestamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self *ContainerInfo) StatsEndTime() time.Time {
|
|
||||||
var ret time.Time
|
|
||||||
for i := len(self.Stats) - 1; i >= 0; i-- {
|
|
||||||
s := self.Stats[i]
|
|
||||||
if s.Timestamp.After(ret) {
|
|
||||||
ret = s.Timestamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// This mirrors kernel internal structure.
|
|
||||||
type LoadStats struct {
|
|
||||||
// Number of sleeping tasks.
|
|
||||||
NrSleeping uint64 `json:"nr_sleeping"`
|
|
||||||
|
|
||||||
// Number of running tasks.
|
|
||||||
NrRunning uint64 `json:"nr_running"`
|
|
||||||
|
|
||||||
// Number of tasks in stopped state
|
|
||||||
NrStopped uint64 `json:"nr_stopped"`
|
|
||||||
|
|
||||||
// Number of tasks in uninterruptible state
|
|
||||||
NrUninterruptible uint64 `json:"nr_uninterruptible"`
|
|
||||||
|
|
||||||
// Number of tasks waiting on IO
|
|
||||||
NrIoWait uint64 `json:"nr_io_wait"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CPU usage time statistics.
|
|
||||||
type CpuUsage struct {
|
|
||||||
// Total CPU usage.
|
|
||||||
// Unit: nanoseconds.
|
|
||||||
Total uint64 `json:"total"`
|
|
||||||
|
|
||||||
// Per CPU/core usage of the container.
|
|
||||||
// Unit: nanoseconds.
|
|
||||||
PerCpu []uint64 `json:"per_cpu_usage,omitempty"`
|
|
||||||
|
|
||||||
// Time spent in user space.
|
|
||||||
// Unit: nanoseconds.
|
|
||||||
User uint64 `json:"user"`
|
|
||||||
|
|
||||||
// Time spent in kernel space.
|
|
||||||
// Unit: nanoseconds.
|
|
||||||
System uint64 `json:"system"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cpu Completely Fair Scheduler statistics.
|
|
||||||
type CpuCFS struct {
|
|
||||||
// Total number of elapsed enforcement intervals.
|
|
||||||
Periods uint64 `json:"periods"`
|
|
||||||
|
|
||||||
// Total number of times tasks in the cgroup have been throttled.
|
|
||||||
ThrottledPeriods uint64 `json:"throttled_periods"`
|
|
||||||
|
|
||||||
// Total time duration for which tasks in the cgroup have been throttled.
|
|
||||||
// Unit: nanoseconds.
|
|
||||||
ThrottledTime uint64 `json:"throttled_time"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// All CPU usage metrics are cumulative from the creation of the container
|
|
||||||
type CpuStats struct {
|
|
||||||
Usage CpuUsage `json:"usage"`
|
|
||||||
CFS CpuCFS `json:"cfs"`
|
|
||||||
// Smoothed average of number of runnable threads x 1000.
|
|
||||||
// We multiply by thousand to avoid using floats, but preserving precision.
|
|
||||||
// Load is smoothed over the last 10 seconds. Instantaneous value can be read
|
|
||||||
// from LoadStats.NrRunning.
|
|
||||||
LoadAverage int32 `json:"load_average"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PerDiskStats struct {
|
|
||||||
Major uint64 `json:"major"`
|
|
||||||
Minor uint64 `json:"minor"`
|
|
||||||
Stats map[string]uint64 `json:"stats"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DiskIoStats struct {
|
|
||||||
IoServiceBytes []PerDiskStats `json:"io_service_bytes,omitempty"`
|
|
||||||
IoServiced []PerDiskStats `json:"io_serviced,omitempty"`
|
|
||||||
IoQueued []PerDiskStats `json:"io_queued,omitempty"`
|
|
||||||
Sectors []PerDiskStats `json:"sectors,omitempty"`
|
|
||||||
IoServiceTime []PerDiskStats `json:"io_service_time,omitempty"`
|
|
||||||
IoWaitTime []PerDiskStats `json:"io_wait_time,omitempty"`
|
|
||||||
IoMerged []PerDiskStats `json:"io_merged,omitempty"`
|
|
||||||
IoTime []PerDiskStats `json:"io_time,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type MemoryStats struct {
|
|
||||||
// Current memory usage, this includes all memory regardless of when it was
|
|
||||||
// accessed.
|
|
||||||
// Units: Bytes.
|
|
||||||
Usage uint64 `json:"usage"`
|
|
||||||
|
|
||||||
// Number of bytes of page cache memory.
|
|
||||||
// Units: Bytes.
|
|
||||||
Cache uint64 `json:"cache"`
|
|
||||||
|
|
||||||
// The amount of anonymous and swap cache memory (includes transparent
|
|
||||||
// hugepages).
|
|
||||||
// Units: Bytes.
|
|
||||||
RSS uint64 `json:"rss"`
|
|
||||||
|
|
||||||
// The amount of swap currently used by the processes in this cgroup
|
|
||||||
// Units: Bytes.
|
|
||||||
Swap uint64 `json:"swap"`
|
|
||||||
|
|
||||||
// The amount of working set memory, this includes recently accessed memory,
|
|
||||||
// dirty memory, and kernel memory. Working set is <= "usage".
|
|
||||||
// Units: Bytes.
|
|
||||||
WorkingSet uint64 `json:"working_set"`
|
|
||||||
|
|
||||||
Failcnt uint64 `json:"failcnt"`
|
|
||||||
|
|
||||||
ContainerData MemoryStatsMemoryData `json:"container_data,omitempty"`
|
|
||||||
HierarchicalData MemoryStatsMemoryData `json:"hierarchical_data,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type MemoryStatsMemoryData struct {
|
|
||||||
Pgfault uint64 `json:"pgfault"`
|
|
||||||
Pgmajfault uint64 `json:"pgmajfault"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type InterfaceStats struct {
|
|
||||||
// The name of the interface.
|
|
||||||
Name string `json:"name"`
|
|
||||||
// Cumulative count of bytes received.
|
|
||||||
RxBytes uint64 `json:"rx_bytes"`
|
|
||||||
// Cumulative count of packets received.
|
|
||||||
RxPackets uint64 `json:"rx_packets"`
|
|
||||||
// Cumulative count of receive errors encountered.
|
|
||||||
RxErrors uint64 `json:"rx_errors"`
|
|
||||||
// Cumulative count of packets dropped while receiving.
|
|
||||||
RxDropped uint64 `json:"rx_dropped"`
|
|
||||||
// Cumulative count of bytes transmitted.
|
|
||||||
TxBytes uint64 `json:"tx_bytes"`
|
|
||||||
// Cumulative count of packets transmitted.
|
|
||||||
TxPackets uint64 `json:"tx_packets"`
|
|
||||||
// Cumulative count of transmit errors encountered.
|
|
||||||
TxErrors uint64 `json:"tx_errors"`
|
|
||||||
// Cumulative count of packets dropped while transmitting.
|
|
||||||
TxDropped uint64 `json:"tx_dropped"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type NetworkStats struct {
|
|
||||||
InterfaceStats `json:",inline"`
|
|
||||||
Interfaces []InterfaceStats `json:"interfaces,omitempty"`
|
|
||||||
// TCP connection stats (Established, Listen...)
|
|
||||||
Tcp TcpStat `json:"tcp"`
|
|
||||||
// TCP6 connection stats (Established, Listen...)
|
|
||||||
Tcp6 TcpStat `json:"tcp6"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type TcpStat struct {
|
|
||||||
// Count of TCP connections in state "Established"
|
|
||||||
Established uint64
|
|
||||||
// Count of TCP connections in state "Syn_Sent"
|
|
||||||
SynSent uint64
|
|
||||||
// Count of TCP connections in state "Syn_Recv"
|
|
||||||
SynRecv uint64
|
|
||||||
// Count of TCP connections in state "Fin_Wait1"
|
|
||||||
FinWait1 uint64
|
|
||||||
// Count of TCP connections in state "Fin_Wait2"
|
|
||||||
FinWait2 uint64
|
|
||||||
// Count of TCP connections in state "Time_Wait
|
|
||||||
TimeWait uint64
|
|
||||||
// Count of TCP connections in state "Close"
|
|
||||||
Close uint64
|
|
||||||
// Count of TCP connections in state "Close_Wait"
|
|
||||||
CloseWait uint64
|
|
||||||
// Count of TCP connections in state "Listen_Ack"
|
|
||||||
LastAck uint64
|
|
||||||
// Count of TCP connections in state "Listen"
|
|
||||||
Listen uint64
|
|
||||||
// Count of TCP connections in state "Closing"
|
|
||||||
Closing uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type FsStats struct {
|
|
||||||
// The block device name associated with the filesystem.
|
|
||||||
Device string `json:"device,omitempty"`
|
|
||||||
|
|
||||||
// Type of the filesytem.
|
|
||||||
Type string `json:"type"`
|
|
||||||
|
|
||||||
// Number of bytes that can be consumed by the container on this filesystem.
|
|
||||||
Limit uint64 `json:"capacity"`
|
|
||||||
|
|
||||||
// Number of bytes that is consumed by the container on this filesystem.
|
|
||||||
Usage uint64 `json:"usage"`
|
|
||||||
|
|
||||||
// Base Usage that is consumed by the container's writable layer.
|
|
||||||
// This field is only applicable for docker container's as of now.
|
|
||||||
BaseUsage uint64 `json:"base_usage"`
|
|
||||||
|
|
||||||
// Number of bytes available for non-root user.
|
|
||||||
Available uint64 `json:"available"`
|
|
||||||
|
|
||||||
// HasInodes when true, indicates that Inodes info will be available.
|
|
||||||
HasInodes bool `json:"has_inodes"`
|
|
||||||
|
|
||||||
// Number of Inodes
|
|
||||||
Inodes uint64 `json:"inodes"`
|
|
||||||
|
|
||||||
// Number of available Inodes
|
|
||||||
InodesFree uint64 `json:"inodes_free"`
|
|
||||||
|
|
||||||
// Number of reads completed
|
|
||||||
// This is the total number of reads completed successfully.
|
|
||||||
ReadsCompleted uint64 `json:"reads_completed"`
|
|
||||||
|
|
||||||
// Number of reads merged
|
|
||||||
// Reads and writes which are adjacent to each other may be merged for
|
|
||||||
// efficiency. Thus two 4K reads may become one 8K read before it is
|
|
||||||
// ultimately handed to the disk, and so it will be counted (and queued)
|
|
||||||
// as only one I/O. This field lets you know how often this was done.
|
|
||||||
ReadsMerged uint64 `json:"reads_merged"`
|
|
||||||
|
|
||||||
// Number of sectors read
|
|
||||||
// This is the total number of sectors read successfully.
|
|
||||||
SectorsRead uint64 `json:"sectors_read"`
|
|
||||||
|
|
||||||
// Number of milliseconds spent reading
|
|
||||||
// This is the total number of milliseconds spent by all reads (as
|
|
||||||
// measured from __make_request() to end_that_request_last()).
|
|
||||||
ReadTime uint64 `json:"read_time"`
|
|
||||||
|
|
||||||
// Number of writes completed
|
|
||||||
// This is the total number of writes completed successfully.
|
|
||||||
WritesCompleted uint64 `json:"writes_completed"`
|
|
||||||
|
|
||||||
// Number of writes merged
|
|
||||||
// See the description of reads merged.
|
|
||||||
WritesMerged uint64 `json:"writes_merged"`
|
|
||||||
|
|
||||||
// Number of sectors written
|
|
||||||
// This is the total number of sectors written successfully.
|
|
||||||
SectorsWritten uint64 `json:"sectors_written"`
|
|
||||||
|
|
||||||
// Number of milliseconds spent writing
|
|
||||||
// This is the total number of milliseconds spent by all writes (as
|
|
||||||
// measured from __make_request() to end_that_request_last()).
|
|
||||||
WriteTime uint64 `json:"write_time"`
|
|
||||||
|
|
||||||
// Number of I/Os currently in progress
|
|
||||||
// The only field that should go to zero. Incremented as requests are
|
|
||||||
// given to appropriate struct request_queue and decremented as they finish.
|
|
||||||
IoInProgress uint64 `json:"io_in_progress"`
|
|
||||||
|
|
||||||
// Number of milliseconds spent doing I/Os
|
|
||||||
// This field increases so long as field 9 is nonzero.
|
|
||||||
IoTime uint64 `json:"io_time"`
|
|
||||||
|
|
||||||
// weighted number of milliseconds spent doing I/Os
|
|
||||||
// This field is incremented at each I/O start, I/O completion, I/O
|
|
||||||
// merge, or read of these stats by the number of I/Os in progress
|
|
||||||
// (field 9) times the number of milliseconds spent doing I/O since the
|
|
||||||
// last update of this field. This can provide an easy measure of both
|
|
||||||
// I/O completion time and the backlog that may be accumulating.
|
|
||||||
WeightedIoTime uint64 `json:"weighted_io_time"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ContainerStats struct {
|
|
||||||
// The time of this stat point.
|
|
||||||
Timestamp time.Time `json:"timestamp"`
|
|
||||||
Cpu CpuStats `json:"cpu,omitempty"`
|
|
||||||
DiskIo DiskIoStats `json:"diskio,omitempty"`
|
|
||||||
Memory MemoryStats `json:"memory,omitempty"`
|
|
||||||
Network NetworkStats `json:"network,omitempty"`
|
|
||||||
|
|
||||||
// Filesystem statistics
|
|
||||||
Filesystem []FsStats `json:"filesystem,omitempty"`
|
|
||||||
|
|
||||||
// Task load stats
|
|
||||||
TaskStats LoadStats `json:"task_stats,omitempty"`
|
|
||||||
|
|
||||||
// Custom metrics from all collectors
|
|
||||||
CustomMetrics map[string][]MetricVal `json:"custom_metrics,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func timeEq(t1, t2 time.Time, tolerance time.Duration) bool {
|
|
||||||
// t1 should not be later than t2
|
|
||||||
if t1.After(t2) {
|
|
||||||
t1, t2 = t2, t1
|
|
||||||
}
|
|
||||||
diff := t2.Sub(t1)
|
|
||||||
if diff <= tolerance {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// 10ms, i.e. 0.01s
|
|
||||||
timePrecision time.Duration = 10 * time.Millisecond
|
|
||||||
)
|
|
||||||
|
|
||||||
// This function is useful because we do not require precise time
|
|
||||||
// representation.
|
|
||||||
func (a *ContainerStats) Eq(b *ContainerStats) bool {
|
|
||||||
if !timeEq(a.Timestamp, b.Timestamp, timePrecision) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return a.StatsEq(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks equality of the stats values.
|
|
||||||
func (a *ContainerStats) StatsEq(b *ContainerStats) bool {
|
|
||||||
// TODO(vmarmol): Consider using this through reflection.
|
|
||||||
if !reflect.DeepEqual(a.Cpu, b.Cpu) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(a.Memory, b.Memory) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(a.DiskIo, b.DiskIo) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(a.Network, b.Network) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(a.Filesystem, b.Filesystem) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Event contains information general to events such as the time at which they
|
|
||||||
// occurred, their specific type, and the actual event. Event types are
|
|
||||||
// differentiated by the EventType field of Event.
|
|
||||||
type Event struct {
|
|
||||||
// the absolute container name for which the event occurred
|
|
||||||
ContainerName string `json:"container_name"`
|
|
||||||
|
|
||||||
// the time at which the event occurred
|
|
||||||
Timestamp time.Time `json:"timestamp"`
|
|
||||||
|
|
||||||
// the type of event. EventType is an enumerated type
|
|
||||||
EventType EventType `json:"event_type"`
|
|
||||||
|
|
||||||
// the original event object and all of its extraneous data, ex. an
|
|
||||||
// OomInstance
|
|
||||||
EventData EventData `json:"event_data,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// EventType is an enumerated type which lists the categories under which
|
|
||||||
// events may fall. The Event field EventType is populated by this enum.
|
|
||||||
type EventType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
EventOom EventType = "oom"
|
|
||||||
EventOomKill = "oomKill"
|
|
||||||
EventContainerCreation = "containerCreation"
|
|
||||||
EventContainerDeletion = "containerDeletion"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Extra information about an event. Only one type will be set.
|
|
||||||
type EventData struct {
|
|
||||||
// Information about an OOM kill event.
|
|
||||||
OomKill *OomKillEventData `json:"oom,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Information related to an OOM kill instance
|
|
||||||
type OomKillEventData struct {
|
|
||||||
// process id of the killed process
|
|
||||||
Pid int `json:"pid"`
|
|
||||||
|
|
||||||
// The name of the killed process
|
|
||||||
ProcessName string `json:"process_name"`
|
|
||||||
}
|
|
37
vendor/github.com/google/cadvisor/info/v1/docker.go
generated
vendored
37
vendor/github.com/google/cadvisor/info/v1/docker.go
generated
vendored
@ -1,37 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Types used for docker containers.
|
|
||||||
package v1
|
|
||||||
|
|
||||||
type DockerStatus struct {
|
|
||||||
Version string `json:"version"`
|
|
||||||
KernelVersion string `json:"kernel_version"`
|
|
||||||
OS string `json:"os"`
|
|
||||||
Hostname string `json:"hostname"`
|
|
||||||
RootDir string `json:"root_dir"`
|
|
||||||
Driver string `json:"driver"`
|
|
||||||
DriverStatus map[string]string `json:"driver_status"`
|
|
||||||
ExecDriver string `json:"exec_driver"`
|
|
||||||
NumImages int `json:"num_images"`
|
|
||||||
NumContainers int `json:"num_containers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DockerImage struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
RepoTags []string `json:"repo_tags"` // repository name and tags.
|
|
||||||
Created int64 `json:"created"` // unix time since creation.
|
|
||||||
VirtualSize int64 `json:"virtual_size"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user